From 66cec45960ce1d9c794e9399de15c138acb18aed Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 18:03:42 +0200 Subject: Adding upstream version 7.3.0+dfsg. Signed-off-by: Daniel Baumann --- .../.github/workflows/ansible-test.yml | 42 + .../dellemc/enterprise_sonic/.gitignore | 387 + .../dellemc/enterprise_sonic/.yamllint | 14 + .../dellemc/enterprise_sonic/FILES.json | 4702 +++++ .../dellemc/enterprise_sonic/LICENSE | 674 + .../dellemc/enterprise_sonic/MANIFEST.json | 43 + .../dellemc/enterprise_sonic/README.md | 253 + .../enterprise_sonic/changelogs/.plugin-cache.yaml | 170 + .../enterprise_sonic/changelogs/CHANGELOG.rst | 119 + .../2.0.0/100-prefix_lists-resource-module.yaml | 2 + .../2.0.0/101-bgp-prefix_lists.yaml | 3 + .../2.0.0/102-playbook-example-updates.yaml | 3 + ...andard-interface-naming-and-other-bugfixes.yaml | 3 + .../2.0.0/105-vxlan-regression-test-fix.yaml | 2 + .../changelogs/archive_fragments/2.0.0/2.0.0.yaml | 2 + .../2.0.0/53-oc-yang-compliance.yaml | 2 + .../2.0.0/58-vxlan-primary-ip.yaml | 2 + .../2.0.0/60-bgp-regression-test_fixes.yaml | 4 + .../2.0.0/62-bgp-vxlan-primary-ip.yaml | 2 + .../2.0.0/63-bgp_af-route_advertise_list.yaml | 2 + ...eighbors-auth_pwd-and-neighbor_description.yaml | 2 + .../2.0.0/72-bgp_neighbors-add-attributes.yaml | 5 + .../2.0.0/76-CLI-command-prompt-answer-fix.yaml | 2 + .../2.0.0/78-aaa-regression-fix.yaml | 2 + .../2.0.0/81-add-bgp-peer-group-attributes.yaml | 5 + .../archive_fragments/2.0.0/82-static_routes.yaml | 2 + .../2.0.0/85-regression-test-fixes.yaml | 2 + ...88-port_breakout-and-execution-environment.yaml | 5 + .../2.0.0/90-aaa-and-users-bugfix.yaml | 6 + .../2.0.0/98-vlans-description.yaml | 2 + .../2.0.0/99-ntp-resource-module.yaml | 2 + .../enterprise_sonic/changelogs/changelog.yaml | 197 + .../enterprise_sonic/changelogs/config.yaml | 33 + .../meta/execution-environment.yml | 3 + .../dellemc/enterprise_sonic/meta/runtime.yml | 53 + .../playbooks/bgp_l3_fabric/clos-fabric.yaml | 43 + .../playbooks/bgp_l3_fabric/group_vars/all.yaml | 49 + .../playbooks/bgp_l3_fabric/group_vars/leaf.yaml | 103 + .../playbooks/bgp_l3_fabric/group_vars/spine.yaml | 87 + .../playbooks/bgp_l3_fabric/inventory.yaml | 20 + .../bgp_l3_fabric/templates/clos_fabric_cfg.j2 | 25 + .../playbooks/common_examples/hosts | 12 + .../common_examples/interface_naming.yaml | 32 + .../playbooks/common_examples/patch.txt | 8 + .../playbooks/common_examples/sonic_aaa.yaml | 79 + .../playbooks/common_examples/sonic_api.yaml | 37 + .../playbooks/common_examples/sonic_bgp.yaml | 321 + .../common_examples/sonic_bgp_communities.yaml | 66 + .../playbooks/common_examples/sonic_command.yaml | 58 + .../playbooks/common_examples/sonic_config.yaml | 42 + .../playbooks/common_examples/sonic_facts.yaml | 22 + .../common_examples/sonic_interfaces_config.yaml | 77 + .../playbooks/common_examples/sonic_system.yaml | 18 + .../common_examples/sonic_vxlans_config.yaml | 38 + .../playbooks/common_examples/src.txt | 4 + .../enterprise_sonic/plugins/action/__init__.py | 0 .../enterprise_sonic/plugins/action/sonic.py | 51 + .../enterprise_sonic/plugins/cliconf/__init__.py | 0 .../enterprise_sonic/plugins/cliconf/sonic.py | 118 + .../enterprise_sonic/plugins/httpapi/__init__.py | 0 .../enterprise_sonic/plugins/httpapi/sonic.py | 113 + .../plugins/module_utils/__init__.py | 0 .../plugins/module_utils/network/__init__.py | 0 .../plugins/module_utils/network/sonic/__init__.py | 0 .../module_utils/network/sonic/argspec/__init__.py | 0 .../network/sonic/argspec/aaa/__init__.py | 0 .../module_utils/network/sonic/argspec/aaa/aaa.py | 66 + .../network/sonic/argspec/bgp/__init__.py | 0 .../module_utils/network/sonic/argspec/bgp/bgp.py | 97 + .../network/sonic/argspec/bgp_af/__init__.py | 0 .../network/sonic/argspec/bgp_af/bgp_af.py | 117 + .../network/sonic/argspec/bgp_as_paths/__init__.py | 0 .../sonic/argspec/bgp_as_paths/bgp_as_paths.py | 48 + .../sonic/argspec/bgp_communities/__init__.py | 0 .../argspec/bgp_communities/bgp_communities.py | 59 + .../sonic/argspec/bgp_ext_communities/__init__.py | 0 .../bgp_ext_communities/bgp_ext_communities.py | 75 + .../sonic/argspec/bgp_neighbors/__init__.py | 0 .../sonic/argspec/bgp_neighbors/bgp_neighbors.py | 249 + .../sonic/argspec/bgp_neighbors_af/__init__.py | 0 .../argspec/bgp_neighbors_af/bgp_neighbors_af.py | 114 + .../network/sonic/argspec/facts/__init__.py | 0 .../network/sonic/argspec/facts/facts.py | 53 + .../network/sonic/argspec/interfaces/__init__.py | 0 .../network/sonic/argspec/interfaces/interfaces.py | 56 + .../sonic/argspec/l2_interfaces/l2_interfaces.py | 71 + .../sonic/argspec/l3_interfaces/__init__.py | 0 .../sonic/argspec/l3_interfaces/l3_interfaces.py | 81 + .../sonic/argspec/lag_interfaces/__init__.py | 0 .../sonic/argspec/lag_interfaces/lag_interfaces.py | 67 + .../network/sonic/argspec/mclag/mclag.py | 82 + .../module_utils/network/sonic/argspec/ntp/ntp.py | 89 + .../sonic/argspec/port_breakout/port_breakout.py | 57 + .../network/sonic/argspec/prefix_lists/__init__.py | 0 .../sonic/argspec/prefix_lists/prefix_lists.py | 71 + .../sonic/argspec/radius_server/radius_server.py | 83 + .../sonic/argspec/static_routes/__init__.py | 0 .../sonic/argspec/static_routes/static_routes.py | 79 + .../network/sonic/argspec/system/__init__.py | 0 .../network/sonic/argspec/system/system.py | 64 + .../sonic/argspec/tacacs_server/tacacs_server.py | 80 + .../network/sonic/argspec/users/users.py | 62 + .../network/sonic/argspec/vlans/__init__.py | 0 .../network/sonic/argspec/vlans/vlans.py | 54 + .../network/sonic/argspec/vrfs/vrfs.py | 66 + .../network/sonic/argspec/vxlans/__init__.py | 0 .../network/sonic/argspec/vxlans/vxlans.py | 73 + .../module_utils/network/sonic/config/__init__.py | 0 .../network/sonic/config/aaa/__init__.py | 0 .../module_utils/network/sonic/config/aaa/aaa.py | 236 + .../module_utils/network/sonic/config/bgp/bgp.py | 598 + .../network/sonic/config/bgp_af/bgp_af.py | 848 + .../sonic/config/bgp_as_paths/bgp_as_paths.py | 304 + .../sonic/config/bgp_communities/__init__.py | 0 .../config/bgp_communities/bgp_communities.py | 368 + .../sonic/config/bgp_ext_communities/__init__.py | 0 .../bgp_ext_communities/bgp_ext_communities.py | 371 + .../sonic/config/bgp_neighbors/bgp_neighbors.py | 1100 ++ .../config/bgp_neighbors_af/bgp_neighbors_af.py | 584 + .../network/sonic/config/interfaces/interfaces.py | 354 + .../sonic/config/l2_interfaces/l2_interfaces.py | 414 + .../sonic/config/l3_interfaces/l3_interfaces.py | 515 + .../sonic/config/lag_interfaces/lag_interfaces.py | 421 + .../network/sonic/config/mclag/mclag.py | 323 + .../module_utils/network/sonic/config/ntp/ntp.py | 548 + .../sonic/config/port_breakout/port_breakout.py | 260 + .../network/sonic/config/prefix_lists/__init__.py | 0 .../sonic/config/prefix_lists/prefix_lists.py | 458 + .../sonic/config/radius_server/radius_server.py | 362 + .../network/sonic/config/static_routes/__init__.py | 0 .../sonic/config/static_routes/static_routes.py | 344 + .../network/sonic/config/system/__init__.py | 0 .../network/sonic/config/system/system.py | 294 + .../sonic/config/tacacs_server/tacacs_server.py | 318 + .../network/sonic/config/users/users.py | 299 + .../network/sonic/config/vlans/vlans.py | 265 + .../module_utils/network/sonic/config/vrfs/vrfs.py | 303 + .../network/sonic/config/vxlans/vxlans.py | 606 + .../module_utils/network/sonic/facts/__init__.py | 0 .../network/sonic/facts/aaa/__init__.py | 0 .../module_utils/network/sonic/facts/aaa/aaa.py | 111 + .../network/sonic/facts/bgp/__init__.py | 0 .../module_utils/network/sonic/facts/bgp/bgp.py | 156 + .../network/sonic/facts/bgp_af/__init__.py | 0 .../network/sonic/facts/bgp_af/bgp_af.py | 258 + .../network/sonic/facts/bgp_as_paths/__init__.py | 0 .../sonic/facts/bgp_as_paths/bgp_as_paths.py | 129 + .../sonic/facts/bgp_communities/__init__.py | 0 .../sonic/facts/bgp_communities/bgp_communities.py | 145 + .../sonic/facts/bgp_ext_communities/__init__.py | 0 .../bgp_ext_communities/bgp_ext_communities.py | 158 + .../network/sonic/facts/bgp_neighbors/__init__.py | 0 .../sonic/facts/bgp_neighbors/bgp_neighbors.py | 229 + .../sonic/facts/bgp_neighbors_af/__init__.py | 0 .../facts/bgp_neighbors_af/bgp_neighbors_af.py | 222 + .../module_utils/network/sonic/facts/facts.py | 101 + .../network/sonic/facts/interfaces/__init__.py | 0 .../network/sonic/facts/interfaces/interfaces.py | 147 + .../sonic/facts/l2_interfaces/l2_interfaces.py | 160 + .../network/sonic/facts/l3_interfaces/__init__.py | 0 .../sonic/facts/l3_interfaces/l3_interfaces.py | 185 + .../network/sonic/facts/lag_interfaces/__init__.py | 0 .../sonic/facts/lag_interfaces/lag_interfaces.py | 135 + .../network/sonic/facts/mclag/mclag.py | 139 + .../network/sonic/facts/ntp/__init__.py | 0 .../module_utils/network/sonic/facts/ntp/ntp.py | 153 + .../sonic/facts/port_breakout/port_breakout.py | 125 + .../network/sonic/facts/prefix_lists/__init__.py | 0 .../sonic/facts/prefix_lists/prefix_lists.py | 158 + .../sonic/facts/radius_server/radius_server.py | 168 + .../network/sonic/facts/static_routes/__init__.py | 0 .../sonic/facts/static_routes/static_routes.py | 173 + .../network/sonic/facts/system/__init__.py | 0 .../network/sonic/facts/system/system.py | 143 + .../sonic/facts/tacacs_server/tacacs_server.py | 150 + .../network/sonic/facts/users/users.py | 122 + .../network/sonic/facts/vlans/__init__.py | 0 .../network/sonic/facts/vlans/vlans.py | 126 + .../module_utils/network/sonic/facts/vrfs/vrfs.py | 120 + .../network/sonic/facts/vxlans/__init__.py | 0 .../network/sonic/facts/vxlans/vxlans.py | 207 + .../plugins/module_utils/network/sonic/sonic.py | 155 + .../module_utils/network/sonic/utils/__init__.py | 0 .../module_utils/network/sonic/utils/bgp_utils.py | 611 + .../network/sonic/utils/interfaces_util.py | 55 + .../module_utils/network/sonic/utils/utils.py | 511 + .../enterprise_sonic/plugins/modules/__init__.py | 0 .../enterprise_sonic/plugins/modules/sonic_aaa.py | 215 + .../enterprise_sonic/plugins/modules/sonic_api.py | 158 + .../enterprise_sonic/plugins/modules/sonic_bgp.py | 390 + .../plugins/modules/sonic_bgp_af.py | 414 + .../plugins/modules/sonic_bgp_as_paths.py | 224 + .../plugins/modules/sonic_bgp_communities.py | 301 + .../plugins/modules/sonic_bgp_ext_communities.py | 288 + .../plugins/modules/sonic_bgp_neighbors.py | 1112 ++ .../plugins/modules/sonic_bgp_neighbors_af.py | 451 + .../plugins/modules/sonic_command.py | 235 + .../plugins/modules/sonic_config.py | 329 + .../plugins/modules/sonic_facts.py | 136 + .../plugins/modules/sonic_interfaces.py | 230 + .../plugins/modules/sonic_l2_interfaces.py | 296 + .../plugins/modules/sonic_l3_interfaces.py | 375 + .../plugins/modules/sonic_lag_interfaces.py | 238 + .../plugins/modules/sonic_mclag.py | 516 + .../enterprise_sonic/plugins/modules/sonic_ntp.py | 360 + .../plugins/modules/sonic_port_breakout.py | 228 + .../plugins/modules/sonic_prefix_lists.py | 423 + .../plugins/modules/sonic_radius_server.py | 328 + .../plugins/modules/sonic_static_routes.py | 267 + .../plugins/modules/sonic_system.py | 214 + .../plugins/modules/sonic_tacacs_server.py | 297 + .../plugins/modules/sonic_users.py | 210 + .../plugins/modules/sonic_vlans.py | 241 + .../enterprise_sonic/plugins/modules/sonic_vrfs.py | 204 + .../plugins/modules/sonic_vxlans.py | 245 + .../enterprise_sonic/plugins/terminal/__init__.py | 0 .../enterprise_sonic/plugins/terminal/sonic.py | 73 + .../dellemc/enterprise_sonic/rebuild.sh | 22 + .../dellemc/enterprise_sonic/requirements.txt | 2 + .../enterprise_sonic/tests/regression/hosts | 13 + .../tests/regression/image-upgrade.yaml | 31 + .../regression/roles/common/defaults/main.yml | 66 + .../tests/regression/roles/common/meta/main.yaml | 3 + .../roles/common/tasks/action.facts.report.yaml | 10 + .../tasks/cli.contains.test.facts.report.yaml | 11 + .../roles/common/tasks/cli.test.facts.report.yaml | 11 + .../roles/common/tasks/cli_tasks_template.yaml | 14 + .../common/tasks/idempotent.facts.report.yaml | 12 + .../tests/regression/roles/common/tasks/main.yml | 1 + .../common/tasks/single.run.facts.report.yaml | 10 + .../roles/common/templates/task_template.j2 | 14 + .../roles/common/templates/task_template1.j2 | 14 + .../regression/roles/sonic_aaa/defaults/main.yml | 54 + .../regression/roles/sonic_aaa/meta/main.yaml | 5 + .../regression/roles/sonic_aaa/tasks/main.yml | 17 + .../roles/sonic_aaa/tasks/preparation_tests.yaml | 5 + .../roles/sonic_aaa/tasks/tasks_template.yaml | 21 + .../roles/sonic_aaa/tasks/tasks_template_del.yaml | 21 + .../regression/roles/sonic_api/defaults/main.yml | 6 + .../regression/roles/sonic_api/meta/main.yaml | 5 + .../regression/roles/sonic_api/tasks/invalid.yaml | 28 + .../regression/roles/sonic_api/tasks/main.yaml | 10 + .../regression/roles/sonic_api/tasks/patch.txt | 8 + .../roles/sonic_api/tasks/preparation_tests.yaml | 6 + .../roles/sonic_api/tasks/test_delete.yaml | 12 + .../regression/roles/sonic_api/tasks/test_get.yaml | 11 + .../roles/sonic_api/tasks/test_patch.yaml | 13 + .../roles/sonic_api/tasks/test_post.yaml | 27 + .../regression/roles/sonic_api/tasks/test_put.yaml | 13 + .../regression/roles/sonic_bgp/defaults/main.yml | 250 + .../regression/roles/sonic_bgp/meta/main.yaml | 5 + .../roles/sonic_bgp/tasks/cleanup_tests.yaml | 6 + .../regression/roles/sonic_bgp/tasks/main.yml | 28 + .../roles/sonic_bgp/tasks/preparation_tests.yaml | 11 + .../roles/sonic_bgp/tasks/tasks_template.yaml | 21 + .../roles/sonic_bgp/templates/cli_test_case_01.cfg | 18 + .../roles/sonic_bgp_af/defaults/main.yml | 324 + .../regression/roles/sonic_bgp_af/meta/main.yaml | 5 + .../roles/sonic_bgp_af/tasks/cleanup_tests.yaml | 10 + .../regression/roles/sonic_bgp_af/tasks/main.yml | 15 + .../sonic_bgp_af/tasks/preparation_tests.yaml | 20 + .../roles/sonic_bgp_af/tasks/tasks_template.yaml | 21 + .../roles/sonic_bgp_as_paths/defaults/main.yml | 78 + .../roles/sonic_bgp_as_paths/meta/main.yaml | 5 + .../roles/sonic_bgp_as_paths/tasks/main.yml | 13 + .../tasks/preparation_tests.yaml | 5 + .../sonic_bgp_as_paths/tasks/tasks_template.yaml | 21 + .../roles/sonic_bgp_communities/defaults/main.yml | 101 + .../roles/sonic_bgp_communities/meta/main.yaml | 5 + .../roles/sonic_bgp_communities/tasks/main.yml | 13 + .../tasks/preparation_tests.yaml | 5 + .../tasks/tasks_template.yaml | 23 + .../sonic_bgp_ext_communities/defaults/main.yml | 321 + .../roles/sonic_bgp_ext_communities/meta/main.yaml | 5 + .../roles/sonic_bgp_ext_communities/tasks/main.yml | 13 + .../tasks/preparation_tests.yaml | 5 + .../tasks/tasks_template.yaml | 23 + .../sonic_bgp_neighbors/defaults/main copy.yml | 316 + .../roles/sonic_bgp_neighbors/defaults/main.yml | 880 + .../roles/sonic_bgp_neighbors/meta/main.yaml | 5 + .../sonic_bgp_neighbors/tasks/action_template.yaml | 10 + .../sonic_bgp_neighbors/tasks/cleanup_tests.yaml | 10 + .../roles/sonic_bgp_neighbors/tasks/main.yml | 19 + .../tasks/preparation_tests.yaml | 38 + .../sonic_bgp_neighbors/tasks/tasks_template.yaml | 21 + .../roles/sonic_bgp_neighbors_af/defaults/main.yml | 468 + .../roles/sonic_bgp_neighbors_af/meta/main.yaml | 5 + .../tasks/cleanup_tests.yaml | 15 + .../roles/sonic_bgp_neighbors_af/tasks/main.yml | 21 + .../tasks/preparation_tests.yaml | 43 + .../tasks/tasks_template.yaml | 21 + .../roles/sonic_command/defaults/main.yaml | 1 + .../regression/roles/sonic_command/meta/main.yaml | 5 + .../roles/sonic_command/tasks/bad_operator.yaml | 33 + .../roles/sonic_command/tasks/cli_command.yaml | 26 + .../roles/sonic_command/tasks/contains.yaml | 29 + .../roles/sonic_command/tasks/invalid.yaml | 25 + .../regression/roles/sonic_command/tasks/main.yaml | 12 + .../roles/sonic_command/tasks/output.yaml | 24 + .../roles/sonic_command/tasks/prompt.yaml | 15 + .../roles/sonic_command/tasks/test_local.yaml | 16 + .../roles/sonic_command/tasks/timeout.yaml | 30 + .../roles/sonic_config/defaults/main.yml | 83 + .../regression/roles/sonic_config/meta/main.yaml | 5 + .../roles/sonic_config/tasks/backup.yaml | 16 + .../regression/roles/sonic_config/tasks/main.yml | 24 + .../roles/sonic_config/tasks/match_template.yaml | 64 + .../sonic_config/tasks/preparation_tests.yaml | 11 + .../roles/sonic_config/tasks/prompt.yaml | 12 + .../sonic_config/tasks/replace_tasks_template.yaml | 38 + .../roles/sonic_config/tasks/tasks_template.yaml | 21 + .../roles/sonic_config/templates/snmp.j2 | 6 + .../roles/sonic_config/templates/src.txt | 3 + .../roles/sonic_interfaces/defaults/main.yml | 153 + .../roles/sonic_interfaces/meta/main.yaml | 5 + .../roles/sonic_interfaces/tasks/main.yml | 13 + .../sonic_interfaces/tasks/preparation_tests.yaml | 44 + .../sonic_interfaces/tasks/tasks_template.yaml | 21 + .../roles/sonic_l2_interfaces/defaults/main.yml | 101 + .../roles/sonic_l2_interfaces/meta/main.yaml | 5 + .../roles/sonic_l2_interfaces/tasks/main.yml | 12 + .../tasks/preparation_tests.yaml | 35 + .../sonic_l2_interfaces/tasks/tasks_template.yaml | 21 + .../roles/sonic_l3_interfaces/defaults/main.yml | 244 + .../roles/sonic_l3_interfaces/meta/main.yaml | 5 + .../roles/sonic_l3_interfaces/tasks/main.yml | 13 + .../tasks/preparation_tests.yaml | 28 + .../sonic_l3_interfaces/tasks/tasks_template.yaml | 21 + .../roles/sonic_lag_interfaces/defaults/main.yml | 89 + .../roles/sonic_lag_interfaces/meta/main.yaml | 5 + .../roles/sonic_lag_interfaces/tasks/main.yml | 13 + .../tasks/preparation_tests.yaml | 22 + .../sonic_lag_interfaces/tasks/tasks_template.yaml | 22 + .../regression/roles/sonic_mclag/defaults/main.yml | 107 + .../regression/roles/sonic_mclag/meta/main.yaml | 5 + .../regression/roles/sonic_mclag/tasks/main.yml | 22 + .../roles/sonic_mclag/tasks/preparation_tests.yaml | 21 + .../roles/sonic_mclag/tasks/tasks_template.yaml | 21 + .../sonic_mclag/tasks/tasks_template_del.yaml | 21 + .../regression/roles/sonic_ntp/defaults/main.yml | 209 + .../regression/roles/sonic_ntp/meta/main.yaml | 5 + .../roles/sonic_ntp/tasks/cleanup_tests.yaml | 15 + .../regression/roles/sonic_ntp/tasks/main.yml | 16 + .../roles/sonic_ntp/tasks/preparation_tests.yaml | 21 + .../roles/sonic_ntp/tasks/tasks_template.yaml | 21 + .../roles/sonic_port_breakout/defaults/main.yml | 57 + .../roles/sonic_port_breakout/meta/main.yaml | 5 + .../sonic_port_breakout/tasks/cleanup_tests.yaml | 6 + .../roles/sonic_port_breakout/tasks/main.yml | 31 + .../tasks/preparation_tests.yaml | 5 + .../sonic_port_breakout/tasks/tasks_template.yaml | 21 + .../templates/cli_test_case_01.cfg | 2 + .../roles/sonic_prefix_lists/defaults/main.yml | 122 + .../roles/sonic_prefix_lists/meta/main.yaml | 5 + .../sonic_prefix_lists/tasks/cleanup_tests.yaml | 5 + .../roles/sonic_prefix_lists/tasks/main.yml | 15 + .../tasks/preparation_tests.yaml | 7 + .../sonic_prefix_lists/tasks/tasks_template.yaml | 21 + .../roles/sonic_radius_server/defaults/main.yml | 114 + .../roles/sonic_radius_server/meta/main.yaml | 5 + .../sonic_radius_server/tasks/cleanup_tests.yaml | 6 + .../roles/sonic_radius_server/tasks/main.yml | 16 + .../tasks/preparation_tests.yaml | 5 + .../sonic_radius_server/tasks/tasks_template.yaml | 21 + .../tasks/tasks_template_del.yaml | 21 + .../templates/cli_test_case_01.cfg | 0 .../roles/sonic_static_routes/defaults/main.yml | 172 + .../roles/sonic_static_routes/meta/main.yaml | 5 + .../roles/sonic_static_routes/tasks/main.yml | 11 + .../tasks/preparation_tests.yaml | 11 + .../sonic_static_routes/tasks/tasks_template.yaml | 21 + .../tasks/tasks_template_del.yaml | 21 + .../roles/sonic_system/defaults/main.yml | 47 + .../regression/roles/sonic_system/meta/main.yaml | 5 + .../roles/sonic_system/tasks/cleanup_tests.yaml | 13 + .../regression/roles/sonic_system/tasks/main.yml | 20 + .../sonic_system/tasks/preparation_tests.yaml | 5 + .../roles/sonic_system/tasks/tasks_template.yaml | 21 + .../sonic_system/tasks/tasks_template_del.yaml | 21 + .../roles/sonic_tacacs_server/defaults/main.yml | 91 + .../roles/sonic_tacacs_server/meta/main.yaml | 5 + .../sonic_tacacs_server/tasks/cleanup_tests.yaml | 6 + .../roles/sonic_tacacs_server/tasks/main.yml | 16 + .../tasks/preparation_tests.yaml | 5 + .../sonic_tacacs_server/tasks/tasks_template.yaml | 21 + .../tasks/tasks_template_del.yaml | 21 + .../templates/cli_test_case_01.cfg | 0 .../regression/roles/sonic_users/defaults/main.yml | 96 + .../regression/roles/sonic_users/meta/main.yaml | 5 + .../roles/sonic_users/tasks/cli_tests.yaml | 14 + .../regression/roles/sonic_users/tasks/main.yml | 39 + .../roles/sonic_users/tasks/preparation_tests.yaml | 5 + .../sonic_users/tasks/single_run_template.yaml | 19 + .../roles/sonic_users/tasks/tasks_template.yaml | 21 + .../sonic_users/templates/cli_test_case_01.cfg | 2 + .../regression/roles/sonic_vlans/defaults/main.yml | 42 + .../regression/roles/sonic_vlans/meta/main.yaml | 5 + .../regression/roles/sonic_vlans/tasks/main.yml | 12 + .../roles/sonic_vlans/tasks/preparation_tests.yaml | 23 + .../roles/sonic_vlans/tasks/tasks_template.yaml | 21 + .../regression/roles/sonic_vrfs/defaults/main.yml | 139 + .../regression/roles/sonic_vrfs/meta/main.yaml | 5 + .../roles/sonic_vrfs/tasks/cleanup_tests.yaml | 6 + .../regression/roles/sonic_vrfs/tasks/main.yml | 34 + .../roles/sonic_vrfs/tasks/preparation_tests.yaml | 34 + .../roles/sonic_vrfs/tasks/tasks_template.yaml | 21 + .../sonic_vrfs/templates/cli_test_case_01.cfg | 10 + .../regression/roles/sonic_vxlan/defaults/main.yml | 121 + .../regression/roles/sonic_vxlan/meta/main.yaml | 5 + .../roles/sonic_vxlan/tasks/cleanup_tests.yaml | 6 + .../regression/roles/sonic_vxlan/tasks/main.yml | 34 + .../roles/sonic_vxlan/tasks/preparation_tests.yaml | 29 + .../roles/sonic_vxlan/tasks/tasks_template.yaml | 21 + .../sonic_vxlan/templates/cli_test_case_01.cfg | 8 + .../roles/test_reports/defaults/main.yml | 0 .../regression/roles/test_reports/meta/main.yaml | 3 + .../regression/roles/test_reports/tasks/main.yml | 12 + .../templates/regression_html_report.j2 | 328 + .../enterprise_sonic/tests/regression/test.yaml | 39 + .../enterprise_sonic/tests/sanity/ignore-2.10.txt | 1 + .../enterprise_sonic/tests/sanity/ignore-2.11.txt | 1 + .../enterprise_sonic/tests/sanity/ignore-2.12.txt | 1 + .../enterprise_sonic/tests/sanity/ignore-2.13.txt | 1 + .../enterprise_sonic/tests/sanity/ignore-2.14.txt | 1 + .../enterprise_sonic/tests/sanity/ignore-2.9.txt | 1 + .../tests/unit/utils/run_test_cases.sh | 2 + .../utils/test_01_dict_diff_with_key_name.yaml | 43 + .../utils/test_02_dict_diff_with_key_other.yaml | 46 + .../unit/utils/test_03_dict_diff_without_key.yaml | 38 + .../utils/test_04_dict_diff_with_similar_dict.yaml | 28 + .../unit/utils/test_05_dict_diff_left_only.yaml | 29 + .../test_06_dict_diff_left_only_with_none.yaml | 18 + .../utils/test_07_dict_diff_skeleton_only.yaml | 30 + .../utils/test_08_list_diff_with_key_name.yaml | 34 + .../utils/test_09_list_diff_with_multi_keys.yaml | 50 + .../utils/test_10_list_diff_with_key_other.yaml | 37 + .../utils/test_11_list_diff_with_similar_list.yaml | 31 + .../utils/test_12_list_diff_with_left_only.yaml | 40 + ...test_13_list_diff_with_left_only_with_none.yaml | 48 + .../utils/test_14_list_diff_skeleton_only.yaml | 61 + .../unit/utils/test_15_list_of_list_diff.yaml | 163 + .../utils/test_16_complex_list_with_dict_diff.yaml | 252 + .../tests/unit/utils/test_diff_util.py | 87 + .../dellemc/openmanage/.github/CODEOWNERS | 28 + .../.github/ISSUE_TEMPLATE/ask_a_question.md | 11 + .../.github/ISSUE_TEMPLATE/bug_report.yml | 124 + .../openmanage/.github/ISSUE_TEMPLATE/config.yml | 5 + .../.github/ISSUE_TEMPLATE/feature_request.md | 23 + .../openmanage/.github/PULL_REQUEST_TEMPLATE.md | 43 + .../openmanage/.github/workflows/ansible-test.yml | 130 + ansible_collections/dellemc/openmanage/.gitignore | 9 + .../dellemc/openmanage/CHANGELOG.rst | 857 + ansible_collections/dellemc/openmanage/FILES.json | 3113 +++ ansible_collections/dellemc/openmanage/LICENSE | 674 + .../dellemc/openmanage/MANIFEST.json | 45 + ansible_collections/dellemc/openmanage/README.md | 56 + .../openmanage/changelogs/.plugin-cache.yaml | 340 + .../dellemc/openmanage/changelogs/changelog.yaml | 993 + .../dellemc/openmanage/changelogs/config.yaml | 31 + .../openmanage/docs/ADDITIONAL_INFORMATION.md | 20 + .../dellemc/openmanage/docs/BRANCHING.md | 10 + .../dellemc/openmanage/docs/CODE_OF_CONDUCT.md | 133 + .../dellemc/openmanage/docs/COMMITTER_GUIDE.md | 41 + .../dellemc/openmanage/docs/CONTRIBUTING.md | 197 + .../dellemc/openmanage/docs/DEBUG.md | 58 + .../dellemc/openmanage/docs/DOCUMENTATION.md | 40 + .../openmanage/docs/EXECUTION_ENVIRONMENT.md | 347 + .../dellemc/openmanage/docs/ISSUE_TRIAGE.md | 195 + .../dellemc/openmanage/docs/MAINTAINERS.md | 18 + .../dellemc/openmanage/docs/README.md | 93 + .../dellemc/openmanage/docs/SECURITY.md | 20 + .../dellemc/openmanage/docs/SUPPORT.md | 15 + .../modules/dellemc_configure_idrac_eventing.rst | 218 + .../modules/dellemc_configure_idrac_services.rst | 215 + .../modules/dellemc_get_firmware_inventory.rst | 107 + .../docs/modules/dellemc_get_system_inventory.rst | 107 + .../docs/modules/dellemc_idrac_lc_attributes.rst | 153 + .../docs/modules/dellemc_idrac_storage_volume.rst | 281 + .../docs/modules/dellemc_system_lockdown_mode.rst | 153 + .../openmanage/docs/modules/idrac_attributes.rst | 271 + .../dellemc/openmanage/docs/modules/idrac_bios.rst | 341 + .../dellemc/openmanage/docs/modules/idrac_boot.rst | 282 + .../openmanage/docs/modules/idrac_certificates.rst | 259 + .../openmanage/docs/modules/idrac_firmware.rst | 224 + .../docs/modules/idrac_firmware_info.rst | 121 + .../idrac_lifecycle_controller_job_status_info.rst | 127 + .../modules/idrac_lifecycle_controller_jobs.rst | 136 + .../modules/idrac_lifecycle_controller_logs.rst | 161 + .../idrac_lifecycle_controller_status_info.rst | 122 + .../openmanage/docs/modules/idrac_network.rst | 264 + .../docs/modules/idrac_os_deployment.rst | 141 + .../modules/idrac_redfish_storage_controller.rst | 425 + .../openmanage/docs/modules/idrac_reset.rst | 125 + .../docs/modules/idrac_server_config_profile.rst | 386 + .../openmanage/docs/modules/idrac_syslog.rst | 160 + .../openmanage/docs/modules/idrac_system_info.rst | 121 + .../openmanage/docs/modules/idrac_timezone_ntp.rst | 174 + .../dellemc/openmanage/docs/modules/idrac_user.rst | 233 + .../docs/modules/idrac_virtual_media.rst | 255 + .../docs/modules/ome_active_directory.rst | 273 + .../docs/modules/ome_application_alerts_smtp.rst | 171 + .../docs/modules/ome_application_alerts_syslog.rst | 167 + .../docs/modules/ome_application_certificate.rst | 173 + .../ome_application_console_preferences.rst | 314 + .../modules/ome_application_network_address.rst | 394 + .../docs/modules/ome_application_network_proxy.rst | 183 + .../modules/ome_application_network_settings.rst | 238 + .../docs/modules/ome_application_network_time.rst | 173 + .../modules/ome_application_network_webserver.rst | 150 + .../modules/ome_application_security_settings.rst | 229 + .../openmanage/docs/modules/ome_chassis_slots.rst | 229 + .../ome_configuration_compliance_baseline.rst | 297 + .../modules/ome_configuration_compliance_info.rst | 155 + .../openmanage/docs/modules/ome_device_group.rst | 327 + .../openmanage/docs/modules/ome_device_info.rst | 207 + .../ome_device_local_access_configuration.rst | 304 + .../docs/modules/ome_device_location.rst | 194 + .../docs/modules/ome_device_mgmt_network.rst | 429 + .../docs/modules/ome_device_network_services.rst | 229 + .../docs/modules/ome_device_power_settings.rst | 208 + .../docs/modules/ome_device_quick_deploy.rst | 293 + .../openmanage/docs/modules/ome_devices.rst | 219 + .../openmanage/docs/modules/ome_diagnostics.rst | 284 + .../openmanage/docs/modules/ome_discovery.rst | 665 + .../docs/modules/ome_domain_user_groups.rst | 191 + .../openmanage/docs/modules/ome_firmware.rst | 322 + .../docs/modules/ome_firmware_baseline.rst | 260 + .../ome_firmware_baseline_compliance_info.rst | 189 + .../docs/modules/ome_firmware_baseline_info.rst | 128 + .../docs/modules/ome_firmware_catalog.rst | 331 + .../dellemc/openmanage/docs/modules/ome_groups.rst | 220 + .../openmanage/docs/modules/ome_identity_pool.rst | 316 + .../openmanage/docs/modules/ome_job_info.rst | 157 + .../docs/modules/ome_network_port_breakout.rst | 143 + .../openmanage/docs/modules/ome_network_vlan.rst | 206 + .../docs/modules/ome_network_vlan_info.rst | 144 + .../openmanage/docs/modules/ome_powerstate.rst | 167 + .../openmanage/docs/modules/ome_profile.rst | 470 + .../modules/ome_server_interface_profile_info.rst | 145 + .../docs/modules/ome_server_interface_profiles.rst | 241 + .../openmanage/docs/modules/ome_smart_fabric.rst | 199 + .../docs/modules/ome_smart_fabric_uplink.rst | 291 + .../openmanage/docs/modules/ome_template.rst | 547 + .../docs/modules/ome_template_identity_pool.rst | 134 + .../openmanage/docs/modules/ome_template_info.rst | 146 + .../docs/modules/ome_template_network_vlan.rst | 238 + .../dellemc/openmanage/docs/modules/ome_user.rst | 199 + .../openmanage/docs/modules/ome_user_info.rst | 146 + .../docs/modules/redfish_event_subscription.rst | 172 + .../openmanage/docs/modules/redfish_firmware.rst | 139 + .../openmanage/docs/modules/redfish_powerstate.rst | 154 + .../docs/modules/redfish_storage_volume.rst | 277 + .../openmanage/meta/execution-environment.yml | 5 + .../dellemc/openmanage/meta/runtime.yml | 36 + .../idrac/dellemc_idrac_storage_volume.yml | 110 + .../dellemc_configure_idrac_eventing.yml | 62 + .../dellemc_configure_idrac_services.yml | 46 + .../deprecated/dellemc_get_firmware_inventory.yml | 16 + .../deprecated/dellemc_get_system_inventory.yml | 16 + .../deprecated/dellemc_idrac_lc_attributes.yml | 17 + .../deprecated/dellemc_system_lockdown_mode.yml | 17 + .../playbooks/idrac/deprecated/idrac_network.yml | 75 + .../idrac/deprecated/idrac_timezone_ntp.yml | 24 + .../playbooks/idrac/idrac_attributes.yml | 155 + .../openmanage/playbooks/idrac/idrac_bios.yml | 115 + .../openmanage/playbooks/idrac/idrac_boot.yml | 69 + .../idrac/idrac_boot_virtual_media_workflow.yml | 56 + .../playbooks/idrac/idrac_certificates.yml | 69 + .../openmanage/playbooks/idrac/idrac_firmware.yml | 69 + .../playbooks/idrac/idrac_firmware_info.yml | 16 + .../idrac_lifecycle_controller_job_status_info.yml | 17 + .../idrac/idrac_lifecycle_controller_jobs.yml | 28 + .../idrac/idrac_lifecycle_controller_logs.yml | 18 + .../idrac_lifecycle_controller_status_info.yml | 16 + .../playbooks/idrac/idrac_os_deployment.yml | 22 + .../idrac/idrac_redfish_storage_controller.yml | 216 + ...rac_redfish_storage_controller_job_tracking.yml | 138 + .../openmanage/playbooks/idrac/idrac_reset.yml | 19 + .../idrac/idrac_reset_result_tracking.yml | 39 + .../idrac/idrac_server_config_profile.yml | 220 + .../openmanage/playbooks/idrac/idrac_syslog.yml | 18 + .../playbooks/idrac/idrac_system_info.yml | 16 + .../openmanage/playbooks/idrac/idrac_user.yml | 71 + .../playbooks/idrac/idrac_virtual_media.yml | 107 + .../application/ome_application_alerts_smtp.yml | 37 + .../application/ome_application_alerts_syslog.yml | 40 + .../application/ome_application_certificate.yml | 53 + .../ome_application_console_preferences.yml | 97 + .../ome_application_network_address.yml | 115 + ...plication_network_address_with_job_tracking.yml | 65 + .../application/ome_application_network_proxy.yml | 44 + .../ome_application_network_settings.yml | 73 + .../application/ome_application_network_time.yml | 33 + .../ome_application_network_time_zone_info.yml | 31 + .../ome_application_network_webserver.yml | 40 + ...ion_network_webserver_port_changed_tracking.yml | 61 + .../ome_application_security_settings.yml | 57 + .../ome_configuration_compliance_baseline.yml | 119 + ..._configuration_compliance_baseline_workflow.yml | 52 + .../ome_configuration_compliance_info.yml | 35 + .../component_complaince_report_with_baseline.yml | 26 + .../component_complaince_report_with_devices.yml | 28 + .../firmware/baseline/ome_firmware_baseline.yml | 75 + .../ome_firmware_baseline_compliance_info.yml | 51 + ...e_firmware_baseline_compliance_info_filters.yml | 63 + .../baseline/ome_firmware_baseline_info.yml | 26 + .../ome/firmware/catalog/ome_firmware_catalog.yml | 121 + .../playbooks/ome/firmware/ome_firmware.yml | 142 + .../firmware/ome_firmware_with_job_tracking.yml | 111 + .../playbooks/ome/ome_active_directory.yml | 72 + .../openmanage/playbooks/ome/ome_chassis_slots.yml | 65 + .../openmanage/playbooks/ome/ome_device_group.yml | 167 + .../openmanage/playbooks/ome/ome_device_info.yml | 79 + .../ome/ome_device_local_access_configuration.yml | 68 + .../playbooks/ome/ome_device_location.yml | 52 + .../playbooks/ome/ome_device_mgmt_network.yml | 105 + .../playbooks/ome/ome_device_network_services.yml | 59 + .../playbooks/ome/ome_device_power_settings.yml | 54 + .../playbooks/ome/ome_device_quick_deploy.yml | 66 + .../openmanage/playbooks/ome/ome_devices.yml | 60 + .../openmanage/playbooks/ome/ome_diagnostics.yml | 72 + .../openmanage/playbooks/ome/ome_discovery.yml | 189 + .../playbooks/ome/ome_domain_user_groups.yml | 59 + .../playbooks/ome/ome_group_device_action.yml | 69 + .../openmanage/playbooks/ome/ome_groups.yml | 57 + .../openmanage/playbooks/ome/ome_identity_pool.yml | 134 + .../openmanage/playbooks/ome/ome_job_info.yml | 35 + .../playbooks/ome/ome_network_port_breakout.yml | 32 + .../ome/ome_network_port_breakout_job_traking.yml | 37 + .../openmanage/playbooks/ome/ome_network_vlan.yml | 62 + .../playbooks/ome/ome_network_vlan_info.yml | 32 + .../ome/ome_server_interface_profile_info.yml | 33 + .../ome/ome_server_interface_profile_workflow.yml | 125 + .../ome/ome_server_interface_profiles.yml | 57 + .../openmanage/playbooks/ome/ome_smart_fabric.yml | 47 + .../playbooks/ome/ome_smart_fabric_uplink.yml | 119 + .../playbooks/ome/ome_template_identity_pool.yml | 31 + .../playbooks/ome/powerstate/ome_powerstate.yml | 51 + .../ome_powerstate_with_job_tracking.yml | 36 + .../playbooks/ome/profile/ome_profile.yml | 212 + .../profile/ome_profile_assign_job_tracking.yml | 47 + .../profile/ome_profile_migrate_job_tracking.yml | 48 + .../profile/ome_profile_unassign_job_tracking.yml | 47 + .../playbooks/ome/template/ome_template.yml | 338 + .../ome_template_create_modify_lcd_display.yml | 129 + .../playbooks/ome/template/ome_template_info.yml | 33 + .../ome/template/ome_template_info_with_filter.yml | 27 + .../ome_template_lcd_display_string_deploy.yml | 46 + .../ome/template/ome_template_network_vlan.yml | 66 + .../template/ome_template_with_job_tracking.yml | 48 + .../openmanage/playbooks/ome/user/ome_user.yml | 70 + .../playbooks/ome/user/ome_user_info.yml | 33 + .../redfish/firmware/redfish_firmware.yml | 32 + .../redfish_firmware_from_http_jobtracking.yml | 92 + .../redfish_firmware_from_local_jobtracking.yml | 92 + .../redfish/redfish_event_subscription.yml | 46 + .../playbooks/redfish/redfish_powerstate.yml | 26 + .../redfish/storage/redfish_storage_volume.yml | 85 + .../redfish_storage_volume_create_job_tracking.yml | 93 + .../redfish_storage_volume_delete_job_tracking.yml | 87 + ...fish_storage_volume_initialize_job_tracking.yml | 88 + .../redfish_storage_volume_modify_job_tracking.yml | 89 + .../dellemc/openmanage/plugins/README.md | 100 + .../openmanage/plugins/doc_fragments/__init__.py | 0 .../plugins/doc_fragments/idrac_auth_options.py | 55 + .../plugins/doc_fragments/network_share_options.py | 36 + .../plugins/doc_fragments/ome_auth_options.py | 54 + .../plugins/doc_fragments/omem_auth_options.py | 54 + .../plugins/doc_fragments/oment_auth_options.py | 54 + .../plugins/doc_fragments/redfish_auth_options.py | 50 + .../openmanage/plugins/module_utils/__init__.py | 0 .../plugins/module_utils/dellemc_idrac.py | 104 + .../plugins/module_utils/idrac_redfish.py | 377 + .../dellemc/openmanage/plugins/module_utils/ome.py | 399 + .../openmanage/plugins/module_utils/redfish.py | 219 + .../openmanage/plugins/module_utils/utils.py | 350 + .../dellemc/openmanage/plugins/modules/__init__.py | 0 .../modules/dellemc_configure_idrac_eventing.py | 342 + .../modules/dellemc_configure_idrac_services.py | 394 + .../modules/dellemc_get_firmware_inventory.py | 148 + .../modules/dellemc_get_system_inventory.py | 141 + .../plugins/modules/dellemc_idrac_lc_attributes.py | 224 + .../modules/dellemc_idrac_storage_volume.py | 505 + .../modules/dellemc_system_lockdown_mode.py | 216 + .../openmanage/plugins/modules/idrac_attributes.py | 524 + .../openmanage/plugins/modules/idrac_bios.py | 820 + .../openmanage/plugins/modules/idrac_boot.py | 563 + .../plugins/modules/idrac_certificates.py | 521 + .../openmanage/plugins/modules/idrac_firmware.py | 651 + .../plugins/modules/idrac_firmware_info.py | 144 + .../idrac_lifecycle_controller_job_status_info.py | 131 + .../modules/idrac_lifecycle_controller_jobs.py | 134 + .../modules/idrac_lifecycle_controller_logs.py | 223 + .../idrac_lifecycle_controller_status_info.py | 117 + .../openmanage/plugins/modules/idrac_network.py | 444 + .../plugins/modules/idrac_os_deployment.py | 165 + .../modules/idrac_redfish_storage_controller.py | 773 + .../openmanage/plugins/modules/idrac_reset.py | 132 + .../plugins/modules/idrac_server_config_profile.py | 666 + .../openmanage/plugins/modules/idrac_syslog.py | 202 + .../plugins/modules/idrac_system_info.py | 120 + .../plugins/modules/idrac_timezone_ntp.py | 259 + .../openmanage/plugins/modules/idrac_user.py | 429 + .../plugins/modules/idrac_virtual_media.py | 468 + .../plugins/modules/ome_active_directory.py | 457 + .../plugins/modules/ome_application_alerts_smtp.py | 265 + .../modules/ome_application_alerts_syslog.py | 260 + .../plugins/modules/ome_application_certificate.py | 212 + .../modules/ome_application_console_preferences.py | 669 + .../modules/ome_application_network_address.py | 751 + .../modules/ome_application_network_proxy.py | 254 + .../modules/ome_application_network_settings.py | 384 + .../modules/ome_application_network_time.py | 264 + .../modules/ome_application_network_webserver.py | 196 + .../modules/ome_application_security_settings.py | 360 + .../plugins/modules/ome_chassis_slots.py | 611 + .../ome_configuration_compliance_baseline.py | 842 + .../modules/ome_configuration_compliance_info.py | 244 + .../openmanage/plugins/modules/ome_device_group.py | 526 + .../openmanage/plugins/modules/ome_device_info.py | 433 + .../ome_device_local_access_configuration.py | 481 + .../plugins/modules/ome_device_location.py | 302 + .../plugins/modules/ome_device_mgmt_network.py | 778 + .../plugins/modules/ome_device_network_services.py | 398 + .../plugins/modules/ome_device_power_settings.py | 341 + .../plugins/modules/ome_device_quick_deploy.py | 674 + .../openmanage/plugins/modules/ome_devices.py | 445 + .../openmanage/plugins/modules/ome_diagnostics.py | 518 + .../openmanage/plugins/modules/ome_discovery.py | 1067 + .../plugins/modules/ome_domain_user_groups.py | 344 + .../openmanage/plugins/modules/ome_firmware.py | 653 + .../plugins/modules/ome_firmware_baseline.py | 550 + .../ome_firmware_baseline_compliance_info.py | 420 + .../plugins/modules/ome_firmware_baseline_info.py | 155 + .../plugins/modules/ome_firmware_catalog.py | 644 + .../openmanage/plugins/modules/ome_groups.py | 452 + .../plugins/modules/ome_identity_pool.py | 603 + .../openmanage/plugins/modules/ome_job_info.py | 210 + .../plugins/modules/ome_network_port_breakout.py | 283 + .../openmanage/plugins/modules/ome_network_vlan.py | 349 + .../plugins/modules/ome_network_vlan_info.py | 263 + .../openmanage/plugins/modules/ome_powerstate.py | 277 + .../openmanage/plugins/modules/ome_profile.py | 863 + .../modules/ome_server_interface_profile_info.py | 262 + .../modules/ome_server_interface_profiles.py | 425 + .../openmanage/plugins/modules/ome_smart_fabric.py | 735 + .../plugins/modules/ome_smart_fabric_uplink.py | 544 + .../openmanage/plugins/modules/ome_template.py | 993 + .../plugins/modules/ome_template_identity_pool.py | 193 + .../plugins/modules/ome_template_info.py | 168 + .../plugins/modules/ome_template_network_vlan.py | 448 + .../dellemc/openmanage/plugins/modules/ome_user.py | 264 + .../openmanage/plugins/modules/ome_user_info.py | 169 + .../plugins/modules/redfish_event_subscription.py | 335 + .../openmanage/plugins/modules/redfish_firmware.py | 219 + .../plugins/modules/redfish_powerstate.py | 263 + .../plugins/modules/redfish_storage_volume.py | 633 + .../dellemc/openmanage/requirements.txt | 2 + .../dellemc/openmanage/requirements.yml | 2 + .../dellemc/openmanage/tests/.gitignore | 4 + .../dellemc/openmanage/tests/README.md | 54 + .../dellemc/openmanage/tests/__init__.py | 0 .../dellemc/openmanage/tests/requirements.txt | 9 + .../openmanage/tests/sanity/ignore-2.10.txt | 3 + .../openmanage/tests/sanity/ignore-2.11.txt | 3 + .../openmanage/tests/sanity/ignore-2.12.txt | 3 + .../dellemc/openmanage/tests/sanity/ignore-2.9.txt | 7 + .../dellemc/openmanage/tests/unit/__init__.py | 0 .../openmanage/tests/unit/plugins/__init__.py | 0 .../tests/unit/plugins/module_utils/__init__.py | 0 .../tests/unit/plugins/module_utils/test_ome.py | 284 + .../tests/unit/plugins/modules/__init__.py | 0 .../tests/unit/plugins/modules/common.py | 81 + .../tests/unit/plugins/modules/conftest.py | 89 + .../test_dellemc_configure_idrac_eventing.py | 237 + .../test_dellemc_configure_idrac_services.py | 254 + .../modules/test_dellemc_get_firmware_inventory.py | 108 + .../modules/test_dellemc_get_system_inventory.py | 75 + .../modules/test_dellemc_idrac_lc_attributes.py | 185 + .../modules/test_dellemc_idrac_storage_volume.py | 437 + .../modules/test_dellemc_system_lockdown_mode.py | 126 + .../unit/plugins/modules/test_idrac_attributes.py | 307 + .../tests/unit/plugins/modules/test_idrac_bios.py | 587 + .../tests/unit/plugins/modules/test_idrac_boot.py | 256 + .../plugins/modules/test_idrac_certificates.py | 298 + .../unit/plugins/modules/test_idrac_firmware.py | 625 + .../plugins/modules/test_idrac_firmware_info.py | 77 + ...t_idrac_lifecycle_controller_job_status_info.py | 78 + .../test_idrac_lifecycle_controller_jobs.py | 91 + .../test_idrac_lifecycle_controller_logs.py | 108 + .../test_idrac_lifecycle_controller_status_info.py | 82 + .../unit/plugins/modules/test_idrac_network.py | 286 + .../plugins/modules/test_idrac_os_deployment.py | 166 + .../test_idrac_redfish_storage_controller.py | 316 + .../tests/unit/plugins/modules/test_idrac_reset.py | 95 + .../modules/test_idrac_server_config_profile.py | 356 + .../unit/plugins/modules/test_idrac_syslog.py | 197 + .../unit/plugins/modules/test_idrac_system_info.py | 78 + .../plugins/modules/test_idrac_timezone_ntp.py | 228 + .../tests/unit/plugins/modules/test_idrac_user.py | 350 + .../plugins/modules/test_idrac_virtual_media.py | 251 + .../plugins/modules/test_ome_active_directory.py | 250 + .../modules/test_ome_application_alerts_smtp.py | 457 + .../modules/test_ome_application_alerts_syslog.py | 248 + .../modules/test_ome_application_certificate.py | 122 + .../test_ome_application_console_preferences.py | 2240 +++ .../test_ome_application_network_address.py | 425 + .../modules/test_ome_application_network_proxy.py | 297 + .../test_ome_application_network_settings.py | 381 + .../modules/test_ome_application_network_time.py | 584 + .../test_ome_application_network_webserver.py | 143 + .../test_ome_application_security_settings.py | 400 + .../unit/plugins/modules/test_ome_chassis_slots.py | 297 + .../test_ome_configuration_compliance_baseline.py | 1195 ++ .../test_ome_configuration_compliance_info.py | 87 + .../unit/plugins/modules/test_ome_device_group.py | 602 + .../unit/plugins/modules/test_ome_device_info.py | 281 + .../test_ome_device_local_access_configuration.py | 135 + .../plugins/modules/test_ome_device_location.py | 130 + .../modules/test_ome_device_mgmt_network.py | 408 + .../modules/test_ome_device_network_services.py | 185 + .../modules/test_ome_device_power_settings.py | 122 + .../modules/test_ome_device_quick_deploy.py | 173 + .../tests/unit/plugins/modules/test_ome_devices.py | 467 + .../unit/plugins/modules/test_ome_diagnostics.py | 300 + .../unit/plugins/modules/test_ome_discovery.py | 460 + .../plugins/modules/test_ome_domain_user_groups.py | 198 + .../unit/plugins/modules/test_ome_firmware.py | 554 + .../plugins/modules/test_ome_firmware_baseline.py | 554 + .../test_ome_firmware_baseline_compliance_info.py | 537 + .../modules/test_ome_firmware_baseline_info.py | 136 + .../plugins/modules/test_ome_firmware_catalog.py | 864 + .../tests/unit/plugins/modules/test_ome_groups.py | 274 + .../unit/plugins/modules/test_ome_identity_pool.py | 1346 ++ .../unit/plugins/modules/test_ome_job_info.py | 106 + .../modules/test_ome_network_port_breakout.py | 243 + .../unit/plugins/modules/test_ome_network_vlan.py | 210 + .../plugins/modules/test_ome_network_vlan_info.py | 195 + .../unit/plugins/modules/test_ome_powerstate.py | 436 + .../tests/unit/plugins/modules/test_ome_profile.py | 547 + .../test_ome_server_interface_profile_info.py | 95 + .../modules/test_ome_server_interface_profiles.py | 699 + .../unit/plugins/modules/test_ome_smart_fabric.py | 1892 ++ .../modules/test_ome_smart_fabric_uplink.py | 386 + .../unit/plugins/modules/test_ome_template.py | 602 + .../modules/test_ome_template_identity_pool.py | 160 + .../unit/plugins/modules/test_ome_template_info.py | 98 + .../modules/test_ome_template_network_vlan.py | 349 + .../tests/unit/plugins/modules/test_ome_user.py | 191 + .../unit/plugins/modules/test_ome_user_info.py | 99 + .../modules/test_redfish_event_subscription.py | 452 + .../unit/plugins/modules/test_redfish_firmware.py | 272 + .../plugins/modules/test_redfish_powerstate.py | 475 + .../plugins/modules/test_redfish_storage_volume.py | 610 + .../os10/.github/workflows/ansible-test.yml | 33 + ansible_collections/dellemc/os10/.gitignore | 387 + ansible_collections/dellemc/os10/FILES.json | 8951 +++++++++ ansible_collections/dellemc/os10/LICENSE | 674 + ansible_collections/dellemc/os10/MANIFEST.json | 38 + ansible_collections/dellemc/os10/README.md | 93 + .../dellemc/os10/changelogs/CHANGELOG.rst | 116 + .../dellemc/os10/changelogs/changelog.yaml | 124 + .../dellemc/os10/changelogs/config.yaml | 30 + .../os10/docs/dellemc.os10.os10_command_module.rst | 446 + .../os10/docs/dellemc.os10.os10_config_module.rst | 606 + .../os10/docs/dellemc.os10.os10_facts_module.rst | 511 + ansible_collections/dellemc/os10/docs/os10_aaa.md | 136 + ansible_collections/dellemc/os10/docs/os10_acl.md | 130 + ansible_collections/dellemc/os10/docs/os10_bfd.md | 89 + ansible_collections/dellemc/os10/docs/os10_bgp.md | 729 + .../dellemc/os10/docs/os10_copy_config.md | 131 + ansible_collections/dellemc/os10/docs/os10_dns.md | 125 + ansible_collections/dellemc/os10/docs/os10_ecmp.md | 78 + .../dellemc/os10/docs/os10_fabric_summary.md | 119 + .../dellemc/os10/docs/os10_flow_monitor.md | 152 + .../dellemc/os10/docs/os10_image_upgrade.md | 73 + .../dellemc/os10/docs/os10_interface.md | 178 + ansible_collections/dellemc/os10/docs/os10_lag.md | 103 + ansible_collections/dellemc/os10/docs/os10_lldp.md | 149 + .../dellemc/os10/docs/os10_logging.md | 97 + .../dellemc/os10/docs/os10_network_validation.md | 304 + ansible_collections/dellemc/os10/docs/os10_ntp.md | 124 + .../dellemc/os10/docs/os10_prefix_list.md | 104 + ansible_collections/dellemc/os10/docs/os10_qos.md | 90 + .../dellemc/os10/docs/os10_raguard.md | 126 + .../dellemc/os10/docs/os10_route_map.md | 190 + ansible_collections/dellemc/os10/docs/os10_snmp.md | 269 + .../dellemc/os10/docs/os10_system.md | 126 + .../dellemc/os10/docs/os10_template.md | 75 + .../dellemc/os10/docs/os10_uplink.md | 109 + .../dellemc/os10/docs/os10_users.md | 89 + ansible_collections/dellemc/os10/docs/os10_vlan.md | 123 + ansible_collections/dellemc/os10/docs/os10_vlt.md | 108 + ansible_collections/dellemc/os10/docs/os10_vrf.md | 143 + ansible_collections/dellemc/os10/docs/os10_vrrp.md | 139 + .../dellemc/os10/docs/os10_vxlan.md | 259 + ansible_collections/dellemc/os10/docs/os10_xstp.md | 196 + ansible_collections/dellemc/os10/docs/roles.rst | 193 + ansible_collections/dellemc/os10/meta/runtime.yml | 8 + .../os10/playbooks/clos_fabric_ebgp/README.md | 37 + .../playbooks/clos_fabric_ebgp/datacenter.yaml | 11 + .../os10/playbooks/clos_fabric_ebgp/group_vars/all | 9 + .../clos_fabric_ebgp/group_vars/spine.yaml | 85 + .../clos_fabric_ebgp/host_vars/leaf1.yaml | 77 + .../clos_fabric_ebgp/host_vars/leaf2.yaml | 81 + .../clos_fabric_ebgp/host_vars/leaf3.yaml | 81 + .../clos_fabric_ebgp/host_vars/leaf4.yaml | 77 + .../clos_fabric_ebgp/host_vars/spine1.yaml | 61 + .../clos_fabric_ebgp/host_vars/spine2.yaml | 60 + .../os10/playbooks/clos_fabric_ebgp/inventory.yaml | 20 + .../dellemc/os10/playbooks/vxlan_evpn/README.md | 63 + .../os10/playbooks/vxlan_evpn/datacenter.yaml | 16 + .../playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml | 210 + .../playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml | 194 + .../playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml | 206 + .../playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml | 200 + .../playbooks/vxlan_evpn/host_vars/spine1.yaml | 95 + .../playbooks/vxlan_evpn/host_vars/spine2.yaml | 95 + .../os10/playbooks/vxlan_evpn/inventory.yaml | 20 + .../playbooks/vxlan_evpn/vxlan_evpn_topology.png | Bin 0 -> 101145 bytes .../dellemc/os10/plugins/action/os10.py | 94 + .../dellemc/os10/plugins/action/textfsm_parser.py | 81 + .../dellemc/os10/plugins/cliconf/os10.py | 88 + .../dellemc/os10/plugins/doc_fragments/os10.py | 75 + .../os10/plugins/module_utils/network/__init__.py | 0 .../module_utils/network/base_network_show.py | 42 + .../os10/plugins/module_utils/network/os10.py | 146 + .../dellemc/os10/plugins/modules/__init__.py | 0 .../os10/plugins/modules/base_xml_to_dict.py | 124 + .../dellemc/os10/plugins/modules/bgp_validate.py | 303 + .../dellemc/os10/plugins/modules/mtu_validate.py | 220 + .../dellemc/os10/plugins/modules/os10_command.py | 229 + .../dellemc/os10/plugins/modules/os10_config.py | 346 + .../dellemc/os10/plugins/modules/os10_facts.py | 505 + .../plugins/modules/show_system_network_summary.py | 145 + .../dellemc/os10/plugins/modules/vlt_validate.py | 236 + .../os10/plugins/modules/wiring_validate.py | 246 + .../dellemc/os10/plugins/terminal/os10.py | 81 + .../dellemc/os10/roles/os10_aaa/LICENSE | 674 + .../dellemc/os10/roles/os10_aaa/README.md | 136 + .../dellemc/os10/roles/os10_aaa/defaults/main.yml | 16 + .../dellemc/os10/roles/os10_aaa/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_aaa/meta/main.yml | 17 + .../dellemc/os10/roles/os10_aaa/tasks/main.yml | 17 + .../os10/roles/os10_aaa/templates/os10_aaa.j2 | 148 + .../os10/roles/os10_aaa/tests/inventory.yaml | 22 + .../os10/roles/os10_aaa/tests/main.os10.yaml | 35 + .../dellemc/os10/roles/os10_aaa/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_aaa/vars/main.yml | 1 + .../dellemc/os10/roles/os10_acl/LICENSE | 674 + .../dellemc/os10/roles/os10_acl/README.md | 130 + .../dellemc/os10/roles/os10_acl/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_acl/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_acl/meta/main.yml | 17 + .../dellemc/os10/roles/os10_acl/tasks/main.yml | 16 + .../os10/roles/os10_acl/templates/os10_acl.j2 | 212 + .../os10/roles/os10_acl/tests/inventory.yaml | 22 + .../os10/roles/os10_acl/tests/main.os10.yaml | 33 + .../dellemc/os10/roles/os10_acl/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_acl/vars/main.yml | 2 + .../dellemc/os10/roles/os10_bfd/LICENSE | 674 + .../dellemc/os10/roles/os10_bfd/README.md | 89 + .../dellemc/os10/roles/os10_bfd/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_bfd/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_bfd/meta/main.yml | 18 + .../dellemc/os10/roles/os10_bfd/tasks/main.yml | 16 + .../os10/roles/os10_bfd/templates/os10_bfd.j2 | 34 + .../os10/roles/os10_bfd/tests/inventory.yaml | 22 + .../dellemc/os10/roles/os10_bfd/tests/main.yaml | 11 + .../dellemc/os10/roles/os10_bfd/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_bfd/vars/main.yml | 2 + .../dellemc/os10/roles/os10_bgp/LICENSE | 674 + .../dellemc/os10/roles/os10_bgp/README.md | 729 + .../dellemc/os10/roles/os10_bgp/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_bgp/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_bgp/meta/main.yml | 18 + .../dellemc/os10/roles/os10_bgp/tasks/main.yml | 16 + .../os10/roles/os10_bgp/templates/os10_bgp.j2 | 1244 ++ .../os10/roles/os10_bgp/tests/inventory.yaml | 22 + .../os10/roles/os10_bgp/tests/main.os10.yaml | 384 + .../dellemc/os10/roles/os10_bgp/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_bgp/vars/main.yml | 2 + .../dellemc/os10/roles/os10_copy_config/LICENSE | 674 + .../dellemc/os10/roles/os10_copy_config/README.md | 131 + .../os10/roles/os10_copy_config/defaults/main.yml | 2 + .../os10/roles/os10_copy_config/handlers/main.yml | 2 + .../os10/roles/os10_copy_config/meta/main.yml | 19 + .../os10/roles/os10_copy_config/tasks/main.yml | 7 + .../os10/roles/os10_copy_config/templates/leaf1.j2 | 3 + .../os10/roles/os10_copy_config/tests/inventory | 2 + .../os10/roles/os10_copy_config/tests/main.yml | 1 + .../os10/roles/os10_copy_config/tests/test.yml | 5 + .../os10/roles/os10_copy_config/vars/main.yml | 2 + .../dellemc/os10/roles/os10_dns/LICENSE | 674 + .../dellemc/os10/roles/os10_dns/README.md | 125 + .../dellemc/os10/roles/os10_dns/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_dns/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_dns/meta/main.yml | 17 + .../dellemc/os10/roles/os10_dns/tasks/main.yml | 17 + .../os10/roles/os10_dns/templates/os10_dns.j2 | 101 + .../os10/roles/os10_dns/tests/inventory.yaml | 22 + .../dellemc/os10/roles/os10_dns/tests/main.yaml | 43 + .../dellemc/os10/roles/os10_dns/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_dns/vars/main.yml | 2 + .../dellemc/os10/roles/os10_ecmp/LICENSE | 674 + .../dellemc/os10/roles/os10_ecmp/README.md | 78 + .../dellemc/os10/roles/os10_ecmp/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_ecmp/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_ecmp/meta/main.yml | 17 + .../dellemc/os10/roles/os10_ecmp/tasks/main.yml | 17 + .../os10/roles/os10_ecmp/templates/os10_ecmp.j2 | 25 + .../os10/roles/os10_ecmp/tests/inventory.yaml | 22 + .../os10/roles/os10_ecmp/tests/main.os10.yaml | 7 + .../dellemc/os10/roles/os10_ecmp/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_ecmp/vars/main.yml | 2 + .../dellemc/os10/roles/os10_fabric_summary/LICENSE | 674 + .../os10/roles/os10_fabric_summary/README.md | 119 + .../os10/roles/os10_fabric_summary/meta/main.yml | 18 + .../os10/roles/os10_fabric_summary/tasks/main.yml | 20 + .../tests/host_vars/site1-spine1 | 12 + .../tests/host_vars/site1-spine2 | 12 + .../tests/host_vars/site2-spine1 | 12 + .../tests/host_vars/site2-spine2 | 12 + .../roles/os10_fabric_summary/tests/inventory.yaml | 14 + .../os10/roles/os10_fabric_summary/tests/main.yaml | 11 + .../os10/roles/os10_fabric_summary/tests/test.yml | 7 + .../dellemc/os10/roles/os10_flow_monitor/LICENSE | 674 + .../dellemc/os10/roles/os10_flow_monitor/README.md | 152 + .../os10/roles/os10_flow_monitor/defaults/main.yml | 2 + .../os10/roles/os10_flow_monitor/handlers/main.yml | 2 + .../os10/roles/os10_flow_monitor/meta/main.yml | 18 + .../os10/roles/os10_flow_monitor/tasks/main.yml | 16 + .../templates/os10_flow_monitor.j2 | 86 + .../roles/os10_flow_monitor/tests/inventory.yaml | 22 + .../os10/roles/os10_flow_monitor/tests/main.yaml | 33 + .../os10/roles/os10_flow_monitor/tests/test.yaml | 5 + .../os10/roles/os10_flow_monitor/vars/main.yml | 2 + .../dellemc/os10/roles/os10_image_upgrade/LICENSE | 674 + .../os10/roles/os10_image_upgrade/README.md | 73 + .../roles/os10_image_upgrade/defaults/main.yml | 2 + .../roles/os10_image_upgrade/handlers/main.yml | 2 + .../os10/roles/os10_image_upgrade/meta/main.yml | 17 + .../os10/roles/os10_image_upgrade/tasks/main.yml | 37 + .../os10/roles/os10_image_upgrade/tests/inventory | 2 + .../os10/roles/os10_image_upgrade/tests/main.yml | 9 + .../os10/roles/os10_image_upgrade/tests/test.yml | 5 + .../os10/roles/os10_image_upgrade/vars/main.yml | 2 + .../dellemc/os10/roles/os10_interface/LICENSE | 674 + .../dellemc/os10/roles/os10_interface/README.md | 178 + .../os10/roles/os10_interface/defaults/main.yml | 2 + .../os10/roles/os10_interface/handlers/main.yml | 2 + .../os10/roles/os10_interface/meta/main.yml | 18 + .../os10/roles/os10_interface/tasks/main.yml | 16 + .../os10_interface/templates/os10_interface.j2 | 258 + .../os10/roles/os10_interface/tests/inventory.yaml | 22 + .../os10/roles/os10_interface/tests/main.os10.yaml | 72 + .../os10/roles/os10_interface/tests/test.yaml | 5 + .../os10/roles/os10_interface/vars/main.yml | 2 + .../dellemc/os10/roles/os10_lag/LICENSE | 674 + .../dellemc/os10/roles/os10_lag/README.md | 103 + .../dellemc/os10/roles/os10_lag/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_lag/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_lag/meta/main.yml | 18 + .../dellemc/os10/roles/os10_lag/tasks/main.yml | 16 + .../os10/roles/os10_lag/templates/os10_lag.j2 | 89 + .../os10/roles/os10_lag/tests/inventory.yaml | 22 + .../os10/roles/os10_lag/tests/main.os10.yaml | 15 + .../dellemc/os10/roles/os10_lag/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_lag/vars/main.yml | 2 + .../dellemc/os10/roles/os10_lldp/LICENSE | 674 + .../dellemc/os10/roles/os10_lldp/README.md | 149 + .../dellemc/os10/roles/os10_lldp/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_lldp/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_lldp/meta/main.yml | 19 + .../dellemc/os10/roles/os10_lldp/tasks/main.yml | 16 + .../os10/roles/os10_lldp/templates/os10_lldp.j2 | 195 + .../os10/roles/os10_lldp/tests/inventory.yaml | 22 + .../os10/roles/os10_lldp/tests/main.os10.yaml | 48 + .../dellemc/os10/roles/os10_lldp/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_lldp/vars/main.yml | 2 + .../dellemc/os10/roles/os10_logging/LICENSE | 674 + .../dellemc/os10/roles/os10_logging/README.md | 97 + .../os10/roles/os10_logging/defaults/main.yml | 2 + .../os10/roles/os10_logging/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_logging/meta/main.yml | 18 + .../dellemc/os10/roles/os10_logging/tasks/main.yml | 16 + .../roles/os10_logging/templates/os10_logging.j2 | 67 + .../os10/roles/os10_logging/tests/inventory.yaml | 22 + .../os10/roles/os10_logging/tests/main.os10.yaml | 15 + .../os10/roles/os10_logging/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_logging/vars/main.yml | 2 + .../os10/roles/os10_network_validation/LICENSE | 674 + .../os10/roles/os10_network_validation/README.md | 304 + .../roles/os10_network_validation/meta/main.yml | 21 + .../tasks/bgp_validation.yaml | 33 + .../roles/os10_network_validation/tasks/main.yaml | 9 + .../tasks/mtu_validation.yaml | 32 + .../tasks/vlt_validation.yaml | 44 + .../tasks/wiring_validation.yaml | 24 + .../os10_network_validation/tests/group_vars/all | 30 + .../tests/host_vars/site1-spine1 | 11 + .../tests/host_vars/site1-spine2 | 11 + .../tests/host_vars/site2-spine1 | 11 + .../tests/host_vars/site2-spine2 | 11 + .../os10_network_validation/tests/inventory.yaml | 14 + .../roles/os10_network_validation/tests/main.yaml | 11 + .../roles/os10_network_validation/tests/test.yaml | 56 + .../dellemc/os10/roles/os10_ntp/LICENSE | 674 + .../dellemc/os10/roles/os10_ntp/README.md | 124 + .../dellemc/os10/roles/os10_ntp/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_ntp/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_ntp/meta/main.yml | 18 + .../dellemc/os10/roles/os10_ntp/tasks/main.yml | 41 + .../os10/roles/os10_ntp/templates/os10_ntp.j2 | 125 + .../os10/roles/os10_ntp/templates/os10_ntp_vrf.j2 | 18 + .../dellemc/os10/roles/os10_ntp/tests/inventory | 2 + .../os10/roles/os10_ntp/tests/main.os10.yaml | 25 + .../dellemc/os10/roles/os10_ntp/tests/test.yml | 5 + .../dellemc/os10/roles/os10_ntp/vars/main.yml | 2 + .../dellemc/os10/roles/os10_prefix_list/LICENSE | 674 + .../dellemc/os10/roles/os10_prefix_list/README.md | 104 + .../os10/roles/os10_prefix_list/defaults/main.yml | 2 + .../os10/roles/os10_prefix_list/handlers/main.yml | 2 + .../os10_prefix_list/meta/.galaxy_install_info | 1 + .../os10/roles/os10_prefix_list/meta/main.yml | 16 + .../os10/roles/os10_prefix_list/tasks/main.yml | 16 + .../os10_prefix_list/templates/os10_prefix_list.j2 | 95 + .../roles/os10_prefix_list/tests/inventory.yaml | 22 + .../roles/os10_prefix_list/tests/main.os10.yaml | 20 + .../os10/roles/os10_prefix_list/tests/test.yaml | 5 + .../os10/roles/os10_prefix_list/vars/main.yml | 2 + .../dellemc/os10/roles/os10_qos/LICENSE | 674 + .../dellemc/os10/roles/os10_qos/README.md | 90 + .../dellemc/os10/roles/os10_qos/defaults/main.yml | 5 + .../dellemc/os10/roles/os10_qos/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_qos/meta/main.yml | 17 + .../dellemc/os10/roles/os10_qos/tasks/main.yml | 16 + .../os10/roles/os10_qos/templates/os10_qos.j2 | 48 + .../dellemc/os10/roles/os10_qos/tests/inventory | 2 + .../os10/roles/os10_qos/tests/main.os10.yaml | 11 + .../dellemc/os10/roles/os10_qos/tests/test.yml | 5 + .../dellemc/os10/roles/os10_qos/vars/main.yml | 2 + .../dellemc/os10/roles/os10_raguard/LICENSE | 674 + .../dellemc/os10/roles/os10_raguard/README.md | 126 + .../os10/roles/os10_raguard/defaults/main.yml | 2 + .../os10/roles/os10_raguard/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_raguard/meta/main.yml | 18 + .../dellemc/os10/roles/os10_raguard/tasks/main.yml | 17 + .../roles/os10_raguard/templates/os10_raguard.j2 | 174 + .../os10/roles/os10_raguard/tests/inventory.yaml | 2 + .../os10/roles/os10_raguard/tests/main.os10.yaml | 56 + .../os10/roles/os10_raguard/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_raguard/vars/main.yml | 2 + .../dellemc/os10/roles/os10_route_map/LICENSE | 674 + .../dellemc/os10/roles/os10_route_map/README.md | 190 + .../os10/roles/os10_route_map/defaults/main.yml | 2 + .../os10/roles/os10_route_map/handlers/main.yml | 2 + .../os10/roles/os10_route_map/meta/main.yml | 17 + .../os10/roles/os10_route_map/tasks/main.yml | 16 + .../os10_route_map/templates/os10_route_map.j2 | 348 + .../os10/roles/os10_route_map/tests/inventory | 2 + .../os10/roles/os10_route_map/tests/main.yaml | 55 + .../os10/roles/os10_route_map/tests/test.yml | 5 + .../os10/roles/os10_route_map/vars/main.yml | 2 + .../dellemc/os10/roles/os10_snmp/LICENSE | 674 + .../dellemc/os10/roles/os10_snmp/README.md | 269 + .../dellemc/os10/roles/os10_snmp/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_snmp/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_snmp/meta/main.yml | 18 + .../dellemc/os10/roles/os10_snmp/tasks/main.yml | 16 + .../os10/roles/os10_snmp/templates/os10_snmp.j2 | 441 + .../os10/roles/os10_snmp/tests/inventory.yaml | 22 + .../os10/roles/os10_snmp/tests/main.os10.yaml | 29 + .../dellemc/os10/roles/os10_snmp/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_snmp/vars/main.yml | 2 + .../dellemc/os10/roles/os10_system/LICENSE | 674 + .../dellemc/os10/roles/os10_system/README.md | 126 + .../os10/roles/os10_system/defaults/main.yml | 2 + .../os10/roles/os10_system/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_system/meta/main.yml | 18 + .../dellemc/os10/roles/os10_system/tasks/main.yml | 16 + .../roles/os10_system/templates/os10_system.j2 | 130 + .../os10/roles/os10_system/tests/inventory.yaml | 22 + .../os10/roles/os10_system/tests/main.os10.yaml | 31 + .../dellemc/os10/roles/os10_system/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_system/vars/main.yml | 2 + .../dellemc/os10/roles/os10_template/LICENSE | 674 + .../dellemc/os10/roles/os10_template/README.md | 75 + .../dellemc/os10/roles/os10_template/meta/main.yml | 18 + .../os10/roles/os10_template/tasks/main.yml | 24 + .../os10_template/tasks/show_ip_bgp_summary.yaml | 21 + .../tasks/show_ip_interface_brief.yaml | 21 + .../roles/os10_template/tasks/show_ip_vrf.yaml | 21 + .../os10_template/tasks/show_lldp_neighbors.yaml | 21 + .../tasks/show_port-channel_summary.yaml | 21 + .../show_spanning_tree_compatibility_mode.yaml | 21 + .../os10/roles/os10_template/tasks/show_vlan.yaml | 21 + .../tasks/show_vlt_err_disabled_ports.yaml | 20 + .../os10/roles/os10_template/tasks/textfsm.yaml | 3 + .../templates/os10_show_ip_bgp_summary.template | 16 + .../os10_show_ip_interface_brief.template | 9 + .../templates/os10_show_ip_vrf.template | 7 + .../templates/os10_show_lldp_neighbors.template | 7 + .../os10_show_port-channel_summary.template | 9 + ..._show_spanning_tree_compatibility_mode.template | 6 + .../templates/os10_show_vlan.template | 12 + .../os10_show_vlt_err_disabled_ports.template | 5 + .../os10/roles/os10_template/tests/group_vars/all | 3 + .../os10/roles/os10_template/tests/inventory.yaml | 22 + .../os10/roles/os10_template/tests/main.yaml | 5 + .../os10/roles/os10_template/tests/test.yaml | 9 + .../dellemc/os10/roles/os10_uplink/LICENSE | 674 + .../dellemc/os10/roles/os10_uplink/README.md | 109 + .../os10/roles/os10_uplink/defaults/main.yml | 2 + .../os10/roles/os10_uplink/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_uplink/meta/main.yml | 18 + .../dellemc/os10/roles/os10_uplink/tasks/main.yml | 16 + .../roles/os10_uplink/templates/os10_uplink.j2 | 102 + .../os10/roles/os10_uplink/tests/inventory.yaml | 22 + .../dellemc/os10/roles/os10_uplink/tests/main.yaml | 16 + .../dellemc/os10/roles/os10_uplink/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_uplink/vars/main.yml | 2 + .../dellemc/os10/roles/os10_users/LICENSE | 674 + .../dellemc/os10/roles/os10_users/README.md | 89 + .../os10/roles/os10_users/defaults/main.yml | 2 + .../os10/roles/os10_users/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_users/meta/main.yml | 18 + .../dellemc/os10/roles/os10_users/tasks/main.yml | 16 + .../os10/roles/os10_users/templates/os10_users.j2 | 27 + .../os10/roles/os10_users/tests/inventory.yaml | 22 + .../os10/roles/os10_users/tests/main.os10.yaml | 13 + .../dellemc/os10/roles/os10_users/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_users/vars/main.yml | 2 + .../dellemc/os10/roles/os10_vlan/LICENSE | 674 + .../dellemc/os10/roles/os10_vlan/README.md | 123 + .../dellemc/os10/roles/os10_vlan/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_vlan/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_vlan/meta/main.yml | 18 + .../dellemc/os10/roles/os10_vlan/tasks/main.yml | 16 + .../os10/roles/os10_vlan/templates/os10_vlan.j2 | 129 + .../os10/roles/os10_vlan/tests/inventory.yaml | 22 + .../os10/roles/os10_vlan/tests/main.os10.yaml | 31 + .../dellemc/os10/roles/os10_vlan/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_vlan/vars/main.yml | 2 + .../dellemc/os10/roles/os10_vlt/LICENSE | 674 + .../dellemc/os10/roles/os10_vlt/README.md | 108 + .../dellemc/os10/roles/os10_vlt/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_vlt/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_vlt/meta/main.yml | 18 + .../dellemc/os10/roles/os10_vlt/tasks/main.yml | 16 + .../os10/roles/os10_vlt/templates/os10_vlt.j2 | 108 + .../os10/roles/os10_vlt/tests/inventory.yaml | 22 + .../os10/roles/os10_vlt/tests/main.os10.yaml | 17 + .../dellemc/os10/roles/os10_vlt/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_vlt/vars/main.yml | 2 + .../dellemc/os10/roles/os10_vrf/LICENSE | 674 + .../dellemc/os10/roles/os10_vrf/README.md | 143 + .../dellemc/os10/roles/os10_vrf/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_vrf/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_vrf/meta/main.yml | 18 + .../dellemc/os10/roles/os10_vrf/tasks/main.yml | 16 + .../os10/roles/os10_vrf/templates/os10_vrf.j2 | 122 + .../os10/roles/os10_vrf/tests/inventory.yaml | 22 + .../os10/roles/os10_vrf/tests/main.os10.yaml | 33 + .../dellemc/os10/roles/os10_vrf/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_vrf/vars/main.yml | 2 + .../dellemc/os10/roles/os10_vrrp/LICENSE | 674 + .../dellemc/os10/roles/os10_vrrp/README.md | 139 + .../dellemc/os10/roles/os10_vrrp/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_vrrp/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_vrrp/meta/main.yml | 19 + .../dellemc/os10/roles/os10_vrrp/tasks/main.yml | 16 + .../os10/roles/os10_vrrp/templates/os10_vrrp.j2 | 154 + .../os10/roles/os10_vrrp/tests/inventory.yaml | 22 + .../os10/roles/os10_vrrp/tests/main.os10.yaml | 45 + .../dellemc/os10/roles/os10_vrrp/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_vrrp/vars/main.yml | 2 + .../dellemc/os10/roles/os10_vxlan/LICENSE | 674 + .../dellemc/os10/roles/os10_vxlan/README.md | 259 + .../os10/roles/os10_vxlan/defaults/main.yml | 2 + .../os10/roles/os10_vxlan/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_vxlan/meta/main.yml | 20 + .../dellemc/os10/roles/os10_vxlan/tasks/main.yml | 16 + .../os10/roles/os10_vxlan/templates/os10_vxlan.j2 | 434 + .../os10/roles/os10_vxlan/tests/inventory.yaml | 22 + .../dellemc/os10/roles/os10_vxlan/tests/main.yaml | 112 + .../dellemc/os10/roles/os10_vxlan/tests/test.yml | 5 + .../dellemc/os10/roles/os10_vxlan/vars/main.yml | 2 + .../dellemc/os10/roles/os10_xstp/LICENSE | 674 + .../dellemc/os10/roles/os10_xstp/README.md | 196 + .../dellemc/os10/roles/os10_xstp/defaults/main.yml | 2 + .../dellemc/os10/roles/os10_xstp/handlers/main.yml | 2 + .../dellemc/os10/roles/os10_xstp/meta/main.yml | 18 + .../dellemc/os10/roles/os10_xstp/tasks/main.yml | 16 + .../os10/roles/os10_xstp/templates/os10_xstp.j2 | 398 + .../os10/roles/os10_xstp/tests/inventory.yaml | 22 + .../dellemc/os10/roles/os10_xstp/tests/main.yaml | 74 + .../dellemc/os10/roles/os10_xstp/tests/test.yaml | 5 + .../dellemc/os10/roles/os10_xstp/vars/main.yml | 2 + .../os10/tests/integration/target-prefixes.network | 1 + .../targets/os10_aaa_role/defaults/main.yaml | 2 + .../targets/os10_aaa_role/tasks/main.yaml | 3 + .../os10_aaa_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_aaa_role/tasks/tests.yaml | 14 + .../templates/aaa_basic/steps/01_aaa_configure.j2 | 15 + .../templates/aaa_basic/steps/02_aaa_update.j2 | 24 + .../aaa_basic/steps/03_aaa_empty_string.j2 | 24 + .../templates/aaa_basic/steps/04_aaa_absent.j2 | 23 + .../templates/aaa_basic/steps/05_aaa_stateless.j2 | 20 + .../templates/aaa_basic/steps/teardown.j2 | 23 + .../targets/os10_aaa_role/tests/aaa_basic.yaml | 0 .../targets/os10_aaa_role/vars/main.yaml | 2 + .../targets/os10_acl_role/defaults/main.yaml | 3 + .../targets/os10_acl_role/tasks/main.yaml | 3 + .../os10_acl_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_acl_role/tasks/tests.yaml | 14 + .../templates/acl_basic/steps/01_acl_config.j2 | 23 + .../templates/acl_basic/steps/02_acl_stage.j2 | 17 + .../templates/acl_basic/steps/03_acl_update.j2 | 31 + .../acl_basic/steps/04_acl_suboptions_absent.j2 | 30 + .../acl_basic/steps/05_acl_suboptions_stateless.j2 | 34 + .../templates/acl_basic/steps/06_acl_absent.j2 | 7 + .../templates/acl_basic/steps/07_acl_stateless.j2 | 24 + .../templates/acl_basic/steps/teardown.j2 | 29 + .../targets/os10_acl_role/tests/acl_basic.yaml | 0 .../targets/os10_acl_role/vars/main.yaml | 5 + .../targets/os10_bgp_role/defaults/main.yaml | 3 + .../targets/os10_bgp_role/tasks/main.yaml | 3 + .../os10_bgp_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_bgp_role/tasks/tests.yaml | 14 + .../bgp_vrf/steps/01_bgp_default_vrf_config.j2 | 191 + .../bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2 | 185 + .../bgp_vrf/steps/03_bgp_non_default_vrf_config.j2 | 153 + .../steps/04_bgp_non_default_vrf_unconfig.j2 | 147 + .../steps/05_bgp_default_non_default_vrf_config.j2 | 314 + .../06_bgp_default_non_default_vrf_unconfig.j2 | 313 + .../os10_bgp_role/templates/bgp_vrf/steps/setup.j2 | 13 + .../templates/bgp_vrf/steps/teardown.j2 | 13 + .../targets/os10_bgp_role/tests/bgp_vrf.yaml | 0 .../targets/os10_bgp_role/vars/main.yaml | 7 + .../targets/os10_command/defaults/main.yaml | 2 + .../targets/os10_command/tasks/cli.yaml | 16 + .../targets/os10_command/tasks/main.yaml | 2 + .../os10_command/tests/cli/bad_operator.yaml | 19 + .../targets/os10_command/tests/cli/contains.yaml | 19 + .../targets/os10_command/tests/cli/invalid.yaml | 26 + .../targets/os10_command/tests/cli/output.yaml | 27 + .../targets/os10_command/tests/cli/timeout.yaml | 18 + .../targets/os10_config/defaults/main.yaml | 2 + .../integration/targets/os10_config/tasks/cli.yaml | 15 + .../targets/os10_config/tasks/main.yaml | 2 + .../targets/os10_config/tests/cli/sublevel.yaml | 38 + .../os10_config/tests/cli/sublevel_block.yaml | 58 + .../os10_config/tests/cli/sublevel_exact.yaml | 62 + .../os10_config/tests/cli/sublevel_strict.yaml | 59 + .../targets/os10_config/tests/cli/toplevel.yaml | 33 + .../os10_config/tests/cli/toplevel_after.yaml | 40 + .../os10_config/tests/cli/toplevel_before.yaml | 40 + .../tests/cli/toplevel_nonidempotent.yaml | 35 + .../targets/os10_ecmp_role/defaults/main.yaml | 3 + .../targets/os10_ecmp_role/tasks/main.yaml | 3 + .../os10_ecmp_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_ecmp_role/tasks/tests.yaml | 14 + .../ecmp_basic/steps/01_ecmp_configure.j2 | 4 + .../templates/ecmp_basic/steps/02_ecmp_update.j2 | 4 + .../templates/ecmp_basic/steps/03_ecmp_unconfig.j2 | 4 + .../templates/ecmp_basic/steps/teardown.j2 | 3 + .../targets/os10_ecmp_role/tests/ecmp_basic.yaml | 0 .../targets/os10_ecmp_role/vars/main.yaml | 5 + .../targets/os10_facts/defaults/main.yaml | 2 + .../integration/targets/os10_facts/tasks/cli.yaml | 16 + .../integration/targets/os10_facts/tasks/main.yaml | 2 + .../targets/os10_facts/tests/cli/facts.yaml | 45 + .../os10_flow_monitor_role/defaults/main.yaml | 2 + .../targets/os10_flow_monitor_role/tasks/main.yaml | 3 + .../tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../os10_flow_monitor_role/tasks/tests.yaml | 14 + .../steps/01_flow_monitor_configure.j2 | 14 + .../steps/02_flow_monitory_update.j2 | 16 + .../steps/03_flow_monitor_options_default.j2 | 16 + .../templates/flow_monitor_basic/steps/setup.j2 | 3 + .../templates/flow_monitor_basic/steps/teardown.j2 | 23 + .../tests/flow_monitor_basic.yaml | 0 .../targets/os10_flow_monitor_role/vars/main.yaml | 3 + .../targets/os10_interface_role/defaults/main.yaml | 2 + .../targets/os10_interface_role/tasks/main.yaml | 3 + .../tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_interface_role/tasks/tests.yaml | 14 + .../interface_basic/steps/01_interface_config.j2 | 6 + .../steps/02_interface_dynamic_ip.j2 | 14 + .../steps/03_interface_static_ip.j2 | 12 + .../steps/04_interface_flowcontrol.j2 | 14 + .../steps/05_interface_switchport.j2 | 17 + .../interface_basic/steps/06_interface_vlan.j2 | 17 + .../interface_basic/steps/07_interface_range.j2 | 4 + .../templates/interface_basic/steps/teardown.j2 | 45 + .../os10_interface_role/tests/interface_basic.yaml | 0 .../targets/os10_interface_role/vars/main.yaml | 2 + .../targets/os10_lag_role/defaults/main.yaml | 2 + .../targets/os10_lag_role/tasks/main.yaml | 3 + .../os10_lag_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_lag_role/tasks/tests.yaml | 14 + .../templates/lag_basic/steps/01_lag_configure.j2 | 12 + .../templates/lag_basic/steps/02_lag_update.j2 | 12 + .../lag_basic/steps/03_lag_options_reset.j2 | 12 + .../templates/lag_basic/steps/04_lag_absent.j2 | 12 + .../templates/lag_basic/steps/teardown.j2 | 12 + .../targets/os10_lag_role/tests/lag_basic.yaml | 0 .../targets/os10_lag_role/vars/main.yaml | 2 + .../targets/os10_lldp_role/defaults/main.yaml | 2 + .../targets/os10_lldp_role/tasks/main.yaml | 3 + .../os10_lldp_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_lldp_role/tasks/tests.yaml | 14 + .../lldp_basic/steps/01_lldp_configure.j2 | 44 + .../templates/lldp_basic/steps/02_lldp_update.j2 | 44 + .../lldp_basic/steps/03_lldp_options_absent.j2 | 44 + .../templates/lldp_basic/steps/teardown.j2 | 46 + .../targets/os10_lldp_role/tests/lldp_basic.yaml | 0 .../targets/os10_lldp_role/vars/main.yaml | 2 + .../targets/os10_logging_role/defaults/main.yaml | 3 + .../targets/os10_logging_role/tasks/main.yaml | 3 + .../tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_logging_role/tasks/tests.yaml | 14 + .../logging_basic/steps/01_logging_enable.j2 | 6 + .../logging_basic/steps/02_logging_update.j2 | 6 + .../logging_basic/steps/03_logging_disable.j2 | 8 + .../logging_basic/steps/04_logging_server_add.j2 | 12 + .../logging_basic/steps/05_logging_server_del.j2 | 12 + .../templates/logging_basic/steps/teardown.j2 | 11 + .../os10_logging_role/tests/logging_basic.yaml | 0 .../targets/os10_logging_role/vars/main.yaml | 5 + .../targets/os10_ntp_role/defaults/main.yaml | 2 + .../targets/os10_ntp_role/tasks/main.yaml | 3 + .../os10_ntp_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_ntp_role/tasks/tests.yaml | 14 + .../templates/ntp_basic/steps/01_ntp_configure.j2 | 24 + .../templates/ntp_basic/steps/02_ntp_update.j2 | 20 + .../ntp_basic/steps/03_ntp_suboptions_absent.j2 | 14 + .../ntp_basic/steps/04_ntp_suboptions_stateless.j2 | 11 + .../ntp_basic/steps/05_ntp_empty_string.j2 | 16 + .../templates/ntp_basic/steps/setup.j2 | 4 + .../templates/ntp_basic/steps/teardown.j2 | 32 + .../targets/os10_ntp_role/tests/ntp_basic.yaml | 1 + .../targets/os10_ntp_role/vars/main.yaml | 3 + .../os10_prefix_list_role/defaults/main.yaml | 3 + .../targets/os10_prefix_list_role/tasks/main.yaml | 3 + .../tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_prefix_list_role/tasks/tests.yaml | 14 + .../steps/01_prefix_list_config.j2 | 17 + .../steps/02_prefix_list_update.j2 | 17 + .../steps/03_prefix_list_entry_absent.j2 | 17 + .../steps/04_prefix_list_absent.j2 | 16 + .../templates/prefix_list_basic/steps/teardown.j2 | 16 + .../tests/prefix_list_basic.yaml | 0 .../targets/os10_prefix_list_role/vars/main.yaml | 5 + .../targets/os10_qos_role/defaults/main.yaml | 3 + .../targets/os10_qos_role/tasks/main.yaml | 3 + .../os10_qos_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_qos_role/tasks/tests.yaml | 14 + .../templates/qos_basic/steps/01_qos_config.j2 | 10 + .../templates/qos_basic/steps/02_qos_update.j2 | 10 + .../templates/qos_basic/steps/03_qos_unconfig.j2 | 10 + .../templates/qos_basic/steps/teardown.j2 | 9 + .../targets/os10_qos_role/tests/qos_basic.yaml | 0 .../targets/os10_qos_role/vars/main.yaml | 5 + .../targets/os10_route_map_role/defaults/main.yaml | 3 + .../targets/os10_route_map_role/tasks/main.yaml | 3 + .../tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_route_map_role/tasks/tests.yaml | 14 + .../steps/01_route_map_configure.j2 | 48 + .../route_map_basic/steps/02_route_map_update.j2 | 48 + .../steps/03_route_map_options_unconfig.j2 | 48 + .../route_map_basic/steps/04_route_map_unconfig.j2 | 47 + .../templates/route_map_basic/steps/teardown.j2 | 47 + .../os10_route_map_role/tests/route_map_basic.yaml | 0 .../targets/os10_route_map_role/vars/main.yaml | 5 + .../targets/os10_snmp_role/defaults/main.yaml | 2 + .../targets/os10_snmp_role/tasks/main.yaml | 3 + .../os10_snmp_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_snmp_role/tasks/tests.yaml | 14 + .../snmp_basic/steps/01_snmp_configure.j2 | 135 + .../templates/snmp_basic/steps/02_snmp_update.j2 | 144 + .../templates/snmp_basic/steps/03_snmp_unconfig.j2 | 147 + .../templates/snmp_basic/steps/setup.j2 | 4 + .../templates/snmp_basic/steps/teardown.j2 | 147 + .../targets/os10_snmp_role/tests/snmp_basic.yaml | 0 .../targets/os10_snmp_role/vars/main.yaml | 3 + .../targets/os10_system_role/defaults/main.yaml | 2 + .../targets/os10_system_role/tasks/main.yaml | 3 + .../os10_system_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_system_role/tasks/tests.yaml | 14 + .../system_basic/steps/01_system_configure.j2 | 26 + .../system_basic/steps/02_system_update.j2 | 26 + .../system_basic/steps/03_system_unconfig.j2 | 26 + .../templates/system_basic/steps/teardown.j2 | 25 + .../os10_system_role/tests/system_basic.yaml | 0 .../targets/os10_system_role/vars/main.yaml | 2 + .../targets/os10_uplink_role/defaults/main.yaml | 2 + .../targets/os10_uplink_role/tasks/main.yaml | 3 + .../os10_uplink_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_uplink_role/tasks/tests.yaml | 14 + .../uplink_basic/steps/01_uplink_configure.j2 | 32 + .../uplink_basic/steps/02_uplink_update.j2 | 30 + .../uplink_basic/steps/03_uplink_absent.j2 | 30 + .../templates/uplink_basic/steps/setup.j2 | 5 + .../templates/uplink_basic/steps/teardown.j2 | 38 + .../os10_uplink_role/tests/uplink_basic.yaml | 0 .../targets/os10_uplink_role/vars/main.yaml | 3 + .../targets/os10_users_role/defaults/main.yaml | 2 + .../targets/os10_users_role/tasks/main.yaml | 3 + .../os10_users_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_users_role/tasks/tests.yaml | 14 + .../targets/os10_users_role/tasks_old/main.yaml | 15 + .../templates/users_basic/steps/01_users_add.j2 | 9 + .../templates/users_basic/steps/02_users_del.j2 | 5 + .../templates/users_basic/steps/teardown.j2 | 9 + .../targets/os10_users_role/tests/users_basic.yaml | 0 .../targets/os10_users_role/vars/main.yaml | 2 + .../targets/os10_vlan_role/defaults/main.yaml | 3 + .../targets/os10_vlan_role/tasks/main.yaml | 3 + .../os10_vlan_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_vlan_role/tasks/tests.yaml | 14 + .../vlan_basic/steps/01_vlan_configure.j2 | 17 + .../templates/vlan_basic/steps/02_vlan_update.j2 | 19 + .../vlan_basic/steps/03_vlan_member_port_range.j2 | 17 + .../templates/vlan_basic/steps/04_vlan_absent.j2 | 20 + .../templates/vlan_basic/steps/setup.j2 | 3 + .../templates/vlan_basic/steps/teardown.j2 | 25 + .../targets/os10_vlan_role/tests/vlan_basic.yaml | 0 .../targets/os10_vlan_role/vars/main.yaml | 6 + .../targets/os10_vlt_role/defaults/main.yaml | 3 + .../targets/os10_vlt_role/tasks/main.yaml | 3 + .../os10_vlt_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_vlt_role/tasks/tests.yaml | 14 + .../templates/vlt_basic/steps/01_vlt_configure.j2 | 15 + .../templates/vlt_basic/steps/02_vlt_update.j2 | 14 + .../templates/vlt_basic/steps/03_vlt_absent.j2 | 12 + .../templates/vlt_basic/steps/setup.j2 | 7 + .../templates/vlt_basic/steps/teardown.j2 | 23 + .../targets/os10_vlt_role/tests/vlt_basic.yaml | 0 .../targets/os10_vlt_role/vars/main.yaml | 7 + .../targets/os10_vrrp_role/defaults/main.yaml | 2 + .../targets/os10_vrrp_role/tasks/main.yaml | 3 + .../os10_vrrp_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_vrrp_role/tasks/tests.yaml | 14 + .../vrrp_basic/steps/01_vrrp_configure.j2 | 43 + .../templates/vrrp_basic/steps/02_vrrp_update.j2 | 41 + .../vrrp_basic/steps/03_vrrp_options_absent.j2 | 41 + .../templates/vrrp_basic/steps/04_vrrp_absent.j2 | 41 + .../templates/vrrp_basic/steps/setup.j2 | 3 + .../templates/vrrp_basic/steps/teardown.j2 | 47 + .../targets/os10_vrrp_role/tests/vrrp_basic.yaml | 0 .../targets/os10_vrrp_role/vars/main.yaml | 3 + .../targets/os10_xstp_role/defaults/main.yaml | 2 + .../targets/os10_xstp_role/tasks/main.yaml | 3 + .../os10_xstp_role/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../tasks/testcase/run_test_case.yaml | 62 + .../tasks/testcase/run_test_step.yaml | 26 + .../targets/os10_xstp_role/tasks/tests.yaml | 14 + .../xstp_basic/steps/01_xstp_rstp_configure.j2 | 14 + .../xstp_basic/steps/02_xstp_rstp_defaults.j2 | 12 + .../xstp_basic/steps/03_xstp_pvst_configure.j2 | 16 + .../xstp_basic/steps/04_xstp_pvst_defaults.j2 | 16 + .../xstp_basic/steps/05_xstp_mstp_configure.j2 | 24 + .../xstp_basic/steps/06_xstp_mstp_defaults.j2 | 24 + .../steps/07_xstp_interface_configure.j2 | 25 + .../xstp_basic/steps/08_xstp_interface_defaults.j2 | 25 + .../templates/xstp_basic/steps/setup.j2 | 4 + .../templates/xstp_basic/steps/teardown.j2 | 60 + .../targets/os10_xstp_role/tests/xstp_basic.yaml | 0 .../targets/os10_xstp_role/vars/main.yaml | 3 + .../integration/targets/role_test/tasks/main.yaml | 3 + .../role_test/tasks/prepare_test_facts.yaml | 14 + .../tasks/testcase/include_os10_role.yaml | 20 + .../role_test/tasks/testcase/run_test_case.yaml | 62 + .../role_test/tasks/testcase/run_test_step.yaml | 26 + .../integration/targets/role_test/tasks/tests.yaml | 14 + .../dellemc/os10/tests/sanity/ignore-2.10.txt | 20 + .../dellemc/os10/tests/sanity/ignore-2.11.txt | 13 + .../dellemc/os10/tests/sanity/ignore-2.9.txt | 3 + .../dellemc/os10/tests/sanity/requirements.txt | 4 + .../tests/unit/modules/network/os10/__init__.py | 0 .../network/os10/fixtures/os10_config_config.cfg | 13 + .../network/os10/fixtures/os10_config_src.cfg | 12 + .../os10/fixtures/show_interface__display-xml | 19467 +++++++++++++++++++ .../os10/fixtures/show_lldp_neighbors__display-xml | 855 + .../show_processes_node-id_1__grep_Mem_colon_ | 1 + .../network/os10/fixtures/show_running-config | 252 + .../network/os10/fixtures/show_system__display-xml | 194 + .../modules/network/os10/fixtures/show_version | 9 + .../os10/fixtures/show_version__display-xml | 27 + .../tests/unit/modules/network/os10/os10_module.py | 90 + .../unit/modules/network/os10/test_os10_command.py | 110 + .../unit/modules/network/os10/test_os10_config.py | 150 + .../unit/modules/network/os10/test_os10_facts.py | 110 + ansible_collections/dellemc/os6/.ansible-lint | 2 + .../dellemc/os6/.github/workflows/ansible-test.yml | 33 + ansible_collections/dellemc/os6/.gitignore | 387 + ansible_collections/dellemc/os6/COPYING | 675 + ansible_collections/dellemc/os6/FILES.json | 2987 +++ ansible_collections/dellemc/os6/LICENSE | 674 + ansible_collections/dellemc/os6/MANIFEST.json | 37 + ansible_collections/dellemc/os6/README.md | 98 + .../dellemc/os6/changelogs/CHANGELOG.rst | 98 + .../dellemc/os6/changelogs/changelog.yaml | 112 + .../dellemc/os6/changelogs/config.yaml | 30 + ansible_collections/dellemc/os6/docs/os6_aaa.md | 1 + ansible_collections/dellemc/os6/docs/os6_acl.md | 1 + ansible_collections/dellemc/os6/docs/os6_bgp.md | 1 + .../dellemc/os6/docs/os6_interface.md | 1 + ansible_collections/dellemc/os6/docs/os6_lag.md | 1 + ansible_collections/dellemc/os6/docs/os6_lldp.md | 1 + .../dellemc/os6/docs/os6_logging.md | 1 + ansible_collections/dellemc/os6/docs/os6_ntp.md | 1 + ansible_collections/dellemc/os6/docs/os6_qos.md | 1 + ansible_collections/dellemc/os6/docs/os6_snmp.md | 1 + ansible_collections/dellemc/os6/docs/os6_system.md | 1 + ansible_collections/dellemc/os6/docs/os6_users.md | 1 + ansible_collections/dellemc/os6/docs/os6_vlan.md | 1 + ansible_collections/dellemc/os6/docs/os6_vrrp.md | 1 + ansible_collections/dellemc/os6/docs/os6_xstp.md | 1 + ansible_collections/dellemc/os6/docs/roles.rst | 94 + ansible_collections/dellemc/os6/meta/runtime.yml | 8 + .../dellemc/os6/playbooks/ibgp/README.md | 21 + .../dellemc/os6/playbooks/ibgp/group_vars/all | 4 + .../os6/playbooks/ibgp/host_vars/switch1.yaml | 47 + .../os6/playbooks/ibgp/host_vars/switch2.yaml | 47 + .../dellemc/os6/playbooks/ibgp/inventory.yaml | 6 + .../dellemc/os6/playbooks/ibgp/os6switch.yaml | 13 + .../dellemc/os6/plugins/action/__init__.py | 0 .../dellemc/os6/plugins/action/os6.py | 95 + .../dellemc/os6/plugins/cliconf/__init__.py | 0 .../dellemc/os6/plugins/cliconf/os6.py | 88 + .../dellemc/os6/plugins/doc_fragments/__init__.py | 0 .../dellemc/os6/plugins/doc_fragments/os6.py | 73 + .../os6/plugins/module_utils/network/__init__.py | 0 .../os6/plugins/module_utils/network/os6.py | 278 + .../dellemc/os6/plugins/modules/__init__.py | 0 .../dellemc/os6/plugins/modules/os6_command.py | 225 + .../dellemc/os6/plugins/modules/os6_config.py | 410 + .../dellemc/os6/plugins/modules/os6_facts.py | 478 + .../dellemc/os6/plugins/terminal/__init__.py | 0 .../dellemc/os6/plugins/terminal/os6.py | 95 + .../dellemc/os6/roles/os6_aaa/LICENSE | 674 + .../dellemc/os6/roles/os6_aaa/README.md | 210 + .../dellemc/os6/roles/os6_aaa/defaults/main.yml | 16 + .../dellemc/os6/roles/os6_aaa/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_aaa/meta/main.yml | 19 + .../dellemc/os6/roles/os6_aaa/tasks/main.yml | 17 + .../dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2 | 437 + .../dellemc/os6/roles/os6_aaa/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_aaa/tests/main.os6.yaml | 111 + .../dellemc/os6/roles/os6_aaa/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_aaa/vars/main.yml | 2 + .../dellemc/os6/roles/os6_acl/LICENSE | 674 + .../dellemc/os6/roles/os6_acl/README.md | 118 + .../dellemc/os6/roles/os6_acl/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_acl/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_acl/meta/main.yml | 17 + .../dellemc/os6/roles/os6_acl/tasks/main.yml | 17 + .../dellemc/os6/roles/os6_acl/templates/os6_acl.j2 | 202 + .../dellemc/os6/roles/os6_acl/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_acl/tests/main.os6.yaml | 43 + .../dellemc/os6/roles/os6_acl/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_acl/vars/main.yml | 2 + .../dellemc/os6/roles/os6_bgp/LICENSE | 674 + .../dellemc/os6/roles/os6_bgp/README.md | 153 + .../dellemc/os6/roles/os6_bgp/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_bgp/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_bgp/meta/main.yml | 18 + .../dellemc/os6/roles/os6_bgp/tasks/main.yml | 17 + .../dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2 | 255 + .../dellemc/os6/roles/os6_bgp/tests/inventory.yaml | 7 + .../dellemc/os6/roles/os6_bgp/tests/main.os6.yaml | 47 + .../dellemc/os6/roles/os6_bgp/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_bgp/vars/main.yml | 2 + .../dellemc/os6/roles/os6_interface/LICENSE | 674 + .../dellemc/os6/roles/os6_interface/README.md | 110 + .../os6/roles/os6_interface/defaults/main.yml | 2 + .../os6/roles/os6_interface/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_interface/meta/main.yml | 18 + .../dellemc/os6/roles/os6_interface/tasks/main.yml | 17 + .../roles/os6_interface/templates/os6_interface.j2 | 94 + .../os6/roles/os6_interface/tests/inventory.yaml | 6 + .../os6/roles/os6_interface/tests/main.os6.yaml | 28 + .../os6/roles/os6_interface/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_interface/vars/main.yml | 2 + .../dellemc/os6/roles/os6_lag/LICENSE | 674 + .../dellemc/os6/roles/os6_lag/README.md | 96 + .../dellemc/os6/roles/os6_lag/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_lag/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_lag/meta/main.yml | 18 + .../dellemc/os6/roles/os6_lag/tasks/main.yml | 17 + .../dellemc/os6/roles/os6_lag/templates/os6_lag.j2 | 78 + .../dellemc/os6/roles/os6_lag/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_lag/tests/main.os6.yaml | 15 + .../dellemc/os6/roles/os6_lag/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_lag/vars/main.yml | 2 + .../dellemc/os6/roles/os6_lldp/LICENSE | 674 + .../dellemc/os6/roles/os6_lldp/README.md | 114 + .../dellemc/os6/roles/os6_lldp/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_lldp/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_lldp/meta/main.yml | 19 + .../dellemc/os6/roles/os6_lldp/tasks/main.yml | 17 + .../os6/roles/os6_lldp/templates/os6_lldp.j2 | 159 + .../os6/roles/os6_lldp/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_lldp/tests/main.os6.yaml | 26 + .../dellemc/os6/roles/os6_lldp/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_lldp/vars/main.yml | 2 + .../dellemc/os6/roles/os6_logging/LICENSE | 674 + .../dellemc/os6/roles/os6_logging/README.md | 89 + .../os6/roles/os6_logging/defaults/main.yml | 2 + .../os6/roles/os6_logging/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_logging/meta/main.yml | 18 + .../dellemc/os6/roles/os6_logging/tasks/main.yml | 17 + .../os6/roles/os6_logging/templates/os6_logging.j2 | 36 + .../os6/roles/os6_logging/tests/inventory.yaml | 6 + .../os6/roles/os6_logging/tests/main.os6.yaml | 10 + .../dellemc/os6/roles/os6_logging/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_logging/vars/main.yml | 2 + .../dellemc/os6/roles/os6_ntp/LICENSE | 674 + .../dellemc/os6/roles/os6_ntp/README.md | 82 + .../dellemc/os6/roles/os6_ntp/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_ntp/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_ntp/meta/main.yml | 18 + .../dellemc/os6/roles/os6_ntp/tasks/main.yml | 17 + .../dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2 | 27 + .../dellemc/os6/roles/os6_ntp/tests/inventory | 2 + .../dellemc/os6/roles/os6_ntp/tests/main.os6.yaml | 9 + .../dellemc/os6/roles/os6_ntp/tests/test.yml | 5 + .../dellemc/os6/roles/os6_ntp/vars/main.yml | 2 + .../dellemc/os6/roles/os6_qos/LICENSE | 674 + .../dellemc/os6/roles/os6_qos/README.md | 102 + .../dellemc/os6/roles/os6_qos/defaults/main.yml | 5 + .../dellemc/os6/roles/os6_qos/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_qos/meta/main.yml | 17 + .../dellemc/os6/roles/os6_qos/tasks/main.yml | 17 + .../dellemc/os6/roles/os6_qos/templates/os6_qos.j2 | 97 + .../dellemc/os6/roles/os6_qos/tests/inventory | 2 + .../dellemc/os6/roles/os6_qos/tests/main.os6.yaml | 73 + .../dellemc/os6/roles/os6_qos/tests/test.yml | 5 + .../dellemc/os6/roles/os6_qos/vars/main.yml | 2 + .../dellemc/os6/roles/os6_snmp/LICENSE | 674 + .../dellemc/os6/roles/os6_snmp/README.md | 108 + .../dellemc/os6/roles/os6_snmp/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_snmp/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_snmp/meta/main.yml | 18 + .../dellemc/os6/roles/os6_snmp/tasks/main.yml | 17 + .../os6/roles/os6_snmp/templates/os6_snmp.j2 | 94 + .../os6/roles/os6_snmp/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_snmp/tests/main.os6.yaml | 23 + .../dellemc/os6/roles/os6_snmp/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_snmp/vars/main.yml | 2 + .../dellemc/os6/roles/os6_system/LICENSE | 674 + .../dellemc/os6/roles/os6_system/README.md | 83 + .../dellemc/os6/roles/os6_system/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_system/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_system/meta/main.yml | 18 + .../dellemc/os6/roles/os6_system/tasks/main.yml | 17 + .../os6/roles/os6_system/templates/os6_system.j2 | 34 + .../os6/roles/os6_system/tests/inventory.yaml | 6 + .../os6/roles/os6_system/tests/main.os6.yaml | 9 + .../dellemc/os6/roles/os6_system/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_system/vars/main.yml | 2 + .../dellemc/os6/roles/os6_users/LICENSE | 674 + .../dellemc/os6/roles/os6_users/README.md | 93 + .../dellemc/os6/roles/os6_users/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_users/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_users/meta/main.yml | 18 + .../dellemc/os6/roles/os6_users/tasks/main.yml | 17 + .../os6/roles/os6_users/templates/os6_users.j2 | 37 + .../os6/roles/os6_users/tests/inventory.yaml | 6 + .../os6/roles/os6_users/tests/main.os6.yaml | 10 + .../dellemc/os6/roles/os6_users/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_users/vars/main.yml | 2 + .../dellemc/os6/roles/os6_vlan/LICENSE | 674 + .../dellemc/os6/roles/os6_vlan/README.md | 104 + .../dellemc/os6/roles/os6_vlan/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_vlan/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_vlan/meta/main.yml | 18 + .../dellemc/os6/roles/os6_vlan/tasks/main.yml | 17 + .../os6/roles/os6_vlan/templates/os6_vlan.j2 | 135 + .../os6/roles/os6_vlan/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_vlan/tests/main.os6.yaml | 21 + .../dellemc/os6/roles/os6_vlan/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_vlan/vars/main.yml | 2 + .../dellemc/os6/roles/os6_vrrp/LICENSE | 674 + .../dellemc/os6/roles/os6_vrrp/README.md | 92 + .../dellemc/os6/roles/os6_vrrp/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_vrrp/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_vrrp/meta/main.yml | 19 + .../dellemc/os6/roles/os6_vrrp/tasks/main.yml | 16 + .../os6/roles/os6_vrrp/templates/os6_vrrp.j2 | 72 + .../os6/roles/os6_vrrp/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml | 14 + .../dellemc/os6/roles/os6_vrrp/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_vrrp/vars/main.yml | 2 + .../dellemc/os6/roles/os6_xstp/LICENSE | 674 + .../dellemc/os6/roles/os6_xstp/README.md | 117 + .../dellemc/os6/roles/os6_xstp/defaults/main.yml | 2 + .../dellemc/os6/roles/os6_xstp/handlers/main.yml | 2 + .../dellemc/os6/roles/os6_xstp/meta/main.yml | 18 + .../dellemc/os6/roles/os6_xstp/tasks/main.yml | 17 + .../os6/roles/os6_xstp/templates/os6_xstp.j2 | 129 + .../os6/roles/os6_xstp/tests/inventory.yaml | 6 + .../dellemc/os6/roles/os6_xstp/tests/main.os6.yaml | 24 + .../dellemc/os6/roles/os6_xstp/tests/test.yaml | 5 + .../dellemc/os6/roles/os6_xstp/vars/main.yml | 2 + ansible_collections/dellemc/os6/tests/.gitignore | 1 + .../os6_command/os6_command/defaults/main.yaml | 2 + .../targets/os6_command/os6_command/tasks/cli.yaml | 13 + .../os6_command/os6_command/tasks/main.yaml | 2 + .../os6_command/tests/cli/bad_operator.yaml | 20 + .../os6_command/tests/cli/contains.yaml | 20 + .../os6_command/os6_command/tests/cli/invalid.yaml | 28 + .../os6_command/os6_command/tests/cli/output.yaml | 29 + .../os6_command/os6_command/tests/cli/timeout.yaml | 19 + .../os6_config/os6_config/defaults/main.yaml | 2 + .../targets/os6_config/os6_config/tasks/cli.yaml | 13 + .../targets/os6_config/os6_config/tasks/main.yaml | 2 + .../os6_config/os6_config/tests/cli/backup.yaml | 53 + .../os6_config/os6_config/tests/cli/basic.yaml | 38 + .../os6_config/os6_config/tests/cli/defaults.yaml | 43 + .../os6_config/os6_config/tests/cli/force.yaml | 41 + .../os6_config/os6_config/tests/cli/sublevel.yaml | 42 + .../os6_config/tests/cli/sublevel_block.yaml | 62 + .../os6_config/tests/cli/sublevel_exact.yaml | 66 + .../os6_config/tests/cli/sublevel_strict.yaml | 63 + .../os6_config/os6_config/tests/cli/toplevel.yaml | 37 + .../os6_config/tests/cli/toplevel_after.yaml | 44 + .../os6_config/tests/cli/toplevel_before.yaml | 44 + .../tests/cli/toplevel_nonidempotent.yaml | 39 + .../targets/os6_facts/os6_facts/defaults/main.yaml | 2 + .../targets/os6_facts/os6_facts/tasks/cli.yaml | 14 + .../targets/os6_facts/os6_facts/tasks/main.yaml | 2 + .../os6_facts/os6_facts/tests/cli/facts.yaml | 42 + .../dellemc/os6/tests/sanity/ignore-2.10.txt | 4 + .../dellemc/os6/tests/sanity/ignore-2.11.txt | 4 + .../dellemc/os6/tests/sanity/ignore-2.9.txt | 1 + .../dellemc/os6/tests/sanity/requirements.txt | 4 + .../os6/tests/unit/modules/network/os6/__init__.py | 0 .../network/os6/fixtures/os6_config_config.cfg | 16 + .../network/os6/fixtures/os6_config_src.cfg | 7 + .../modules/network/os6/fixtures/show_interfaces | 41 + .../network/os6/fixtures/show_interfaces_status | 48 + .../show_interfaces_transceiver_properties | 6 + .../unit/modules/network/os6/fixtures/show_ip_int | 15 + .../unit/modules/network/os6/fixtures/show_lldp | 11 + .../os6/fixtures/show_lldp_remote-device_all | 10 + .../modules/network/os6/fixtures/show_memory_cpu | 3 + .../network/os6/fixtures/show_running-config | 124 + .../fixtures/show_running-config__include_hostname | 3 + .../unit/modules/network/os6/fixtures/show_version | 17 + .../tests/unit/modules/network/os6/os6_module.py | 88 + .../unit/modules/network/os6/test_os6_command.py | 108 + .../unit/modules/network/os6/test_os6_config.py | 146 + .../unit/modules/network/os6/test_os6_facts.py | 105 + ansible_collections/dellemc/os9/.ansible-lint | 2 + .../dellemc/os9/.github/workflows/ansible-test.yml | 33 + ansible_collections/dellemc/os9/.gitignore | 387 + ansible_collections/dellemc/os9/COPYING | 675 + ansible_collections/dellemc/os9/FILES.json | 3953 ++++ ansible_collections/dellemc/os9/LICENSE | 674 + ansible_collections/dellemc/os9/MANIFEST.json | 37 + ansible_collections/dellemc/os9/README.md | 96 + .../dellemc/os9/changelogs/CHANGELOG.rst | 76 + .../dellemc/os9/changelogs/changelog.yaml | 107 + .../dellemc/os9/changelogs/config.yaml | 30 + ansible_collections/dellemc/os9/docs/os9_aaa.md | 1 + ansible_collections/dellemc/os9/docs/os9_acl.md | 1 + ansible_collections/dellemc/os9/docs/os9_bgp.md | 1 + .../dellemc/os9/docs/os9_copy_config.md | 1 + ansible_collections/dellemc/os9/docs/os9_dcb.md | 1 + ansible_collections/dellemc/os9/docs/os9_dns.md | 1 + ansible_collections/dellemc/os9/docs/os9_ecmp.md | 1 + .../dellemc/os9/docs/os9_interface.md | 1 + ansible_collections/dellemc/os9/docs/os9_lag.md | 1 + ansible_collections/dellemc/os9/docs/os9_lldp.md | 1 + .../dellemc/os9/docs/os9_logging.md | 1 + ansible_collections/dellemc/os9/docs/os9_ntp.md | 1 + .../dellemc/os9/docs/os9_prefix_list.md | 1 + ansible_collections/dellemc/os9/docs/os9_sflow.md | 1 + ansible_collections/dellemc/os9/docs/os9_snmp.md | 1 + ansible_collections/dellemc/os9/docs/os9_system.md | 1 + ansible_collections/dellemc/os9/docs/os9_users.md | 1 + ansible_collections/dellemc/os9/docs/os9_vlan.md | 1 + ansible_collections/dellemc/os9/docs/os9_vlt.md | 1 + ansible_collections/dellemc/os9/docs/os9_vrf.md | 1 + ansible_collections/dellemc/os9/docs/os9_vrrp.md | 1 + ansible_collections/dellemc/os9/docs/os9_xstp.md | 1 + ansible_collections/dellemc/os9/docs/roles.rst | 136 + ansible_collections/dellemc/os9/meta/runtime.yml | 8 + .../os9/playbooks/clos_fabric_ebgp/README.md | 35 + .../os9/playbooks/clos_fabric_ebgp/datacenter.yaml | 11 + .../os9/playbooks/clos_fabric_ebgp/group_vars/all | 10 + .../clos_fabric_ebgp/group_vars/spine.yaml | 64 + .../clos_fabric_ebgp/host_vars/leaf1.yaml | 61 + .../clos_fabric_ebgp/host_vars/leaf2.yaml | 65 + .../clos_fabric_ebgp/host_vars/leaf3.yaml | 65 + .../clos_fabric_ebgp/host_vars/leaf4.yaml | 61 + .../clos_fabric_ebgp/host_vars/spine1.yaml | 61 + .../clos_fabric_ebgp/host_vars/spine2.yaml | 60 + .../os9/playbooks/clos_fabric_ebgp/inventory.yaml | 20 + .../dellemc/os9/plugins/action/__init__.py | 0 .../dellemc/os9/plugins/action/os9.py | 95 + .../dellemc/os9/plugins/cliconf/__init__.py | 0 .../dellemc/os9/plugins/cliconf/os9.py | 88 + .../dellemc/os9/plugins/doc_fragments/__init__.py | 0 .../dellemc/os9/plugins/doc_fragments/os9.py | 73 + .../dellemc/os9/plugins/module_utils/__init__.py | 0 .../os9/plugins/module_utils/network/__init__.py | 0 .../os9/plugins/module_utils/network/os9.py | 146 + .../dellemc/os9/plugins/modules/__init__.py | 0 .../dellemc/os9/plugins/modules/os9_command.py | 232 + .../dellemc/os9/plugins/modules/os9_config.py | 350 + .../dellemc/os9/plugins/modules/os9_facts.py | 578 + .../dellemc/os9/plugins/terminal/__init__.py | 0 .../dellemc/os9/plugins/terminal/os9.py | 83 + .../dellemc/os9/roles/os9_aaa/LICENSE | 674 + .../dellemc/os9/roles/os9_aaa/README.md | 331 + .../dellemc/os9/roles/os9_aaa/defaults/main.yml | 16 + .../dellemc/os9/roles/os9_aaa/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_aaa/meta/main.yml | 19 + .../dellemc/os9/roles/os9_aaa/tasks/main.yml | 17 + .../dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2 | 680 + .../dellemc/os9/roles/os9_aaa/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_aaa/tests/main.os6.yaml | 133 + .../dellemc/os9/roles/os9_aaa/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_aaa/vars/main.yml | 2 + .../dellemc/os9/roles/os9_acl/LICENSE | 674 + .../dellemc/os9/roles/os9_acl/README.md | 134 + .../dellemc/os9/roles/os9_acl/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_acl/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_acl/meta/main.yml | 17 + .../dellemc/os9/roles/os9_acl/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_acl/templates/os9_acl.j2 | 277 + .../dellemc/os9/roles/os9_acl/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_acl/tests/main.os9.yaml | 88 + .../dellemc/os9/roles/os9_acl/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_acl/vars/main.yml | 2 + .../dellemc/os9/roles/os9_bgp/LICENSE | 674 + .../dellemc/os9/roles/os9_bgp/README.md | 224 + .../dellemc/os9/roles/os9_bgp/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_bgp/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_bgp/meta/main.yml | 18 + .../dellemc/os9/roles/os9_bgp/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2 | 351 + .../dellemc/os9/roles/os9_bgp/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_bgp/tests/main.os9.yaml | 97 + .../dellemc/os9/roles/os9_bgp/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_bgp/vars/main.yml | 2 + .../dellemc/os9/roles/os9_copy_config/LICENSE | 674 + .../dellemc/os9/roles/os9_copy_config/README.md | 131 + .../os9/roles/os9_copy_config/defaults/main.yml | 2 + .../os9/roles/os9_copy_config/handlers/main.yml | 2 + .../os9/roles/os9_copy_config/meta/main.yml | 19 + .../os9/roles/os9_copy_config/tasks/main.yml | 7 + .../os9_copy_config/templates/os9_copy_config.j2 | 3 + .../os9/roles/os9_copy_config/tests/inventory.yaml | 20 + .../os9/roles/os9_copy_config/tests/main.os9.yaml | 1 + .../os9/roles/os9_copy_config/tests/test.yaml | 5 + .../os9/roles/os9_copy_config/vars/main.yml | 2 + .../dellemc/os9/roles/os9_dcb/LICENSE | 674 + .../dellemc/os9/roles/os9_dcb/README.md | 133 + .../dellemc/os9/roles/os9_dcb/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_dcb/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_dcb/meta/main.yml | 19 + .../dellemc/os9/roles/os9_dcb/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2 | 216 + .../dellemc/os9/roles/os9_dcb/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_dcb/tests/main.os9.yaml | 38 + .../dellemc/os9/roles/os9_dcb/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_dcb/vars/main.yml | 2 + .../dellemc/os9/roles/os9_dns/LICENSE | 674 + .../dellemc/os9/roles/os9_dns/README.md | 94 + .../dellemc/os9/roles/os9_dns/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_dns/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_dns/meta/main.yml | 17 + .../dellemc/os9/roles/os9_dns/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_dns/templates/os9_dns.j2 | 111 + .../dellemc/os9/roles/os9_dns/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_dns/tests/main.os9.yaml | 40 + .../dellemc/os9/roles/os9_dns/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_dns/vars/main.yml | 2 + .../dellemc/os9/roles/os9_ecmp/LICENSE | 674 + .../dellemc/os9/roles/os9_ecmp/README.md | 89 + .../dellemc/os9/roles/os9_ecmp/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_ecmp/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_ecmp/meta/main.yml | 17 + .../dellemc/os9/roles/os9_ecmp/tasks/main.yml | 17 + .../os9/roles/os9_ecmp/templates/os9_ecmp.j2 | 62 + .../os9/roles/os9_ecmp/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml | 14 + .../dellemc/os9/roles/os9_ecmp/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_ecmp/vars/main.yml | 3 + .../dellemc/os9/roles/os9_interface/LICENSE | 674 + .../dellemc/os9/roles/os9_interface/README.md | 173 + .../os9/roles/os9_interface/defaults/main.yml | 2 + .../os9/roles/os9_interface/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_interface/meta/main.yml | 18 + .../dellemc/os9/roles/os9_interface/tasks/main.yml | 16 + .../roles/os9_interface/templates/os9_interface.j2 | 237 + .../os9/roles/os9_interface/tests/inventory.yaml | 20 + .../os9/roles/os9_interface/tests/main.os9.yaml | 50 + .../os9/roles/os9_interface/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_interface/vars/main.yml | 2 + .../dellemc/os9/roles/os9_lag/LICENSE | 674 + .../dellemc/os9/roles/os9_lag/README.md | 110 + .../dellemc/os9/roles/os9_lag/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_lag/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_lag/meta/main.yml | 18 + .../dellemc/os9/roles/os9_lag/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_lag/templates/os9_lag.j2 | 114 + .../dellemc/os9/roles/os9_lag/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_lag/tests/main.os9.yaml | 21 + .../dellemc/os9/roles/os9_lag/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_lag/vars/main.yml | 2 + .../dellemc/os9/roles/os9_lldp/LICENSE | 674 + .../dellemc/os9/roles/os9_lldp/README.md | 245 + .../dellemc/os9/roles/os9_lldp/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_lldp/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_lldp/meta/main.yml | 19 + .../dellemc/os9/roles/os9_lldp/tasks/main.yml | 16 + .../os9/roles/os9_lldp/templates/os9_lldp.j2 | 514 + .../os9/roles/os9_lldp/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_lldp/tests/main.os9.yaml | 94 + .../dellemc/os9/roles/os9_lldp/tests/test.yaml | 6 + .../dellemc/os9/roles/os9_lldp/vars/main.yml | 2 + .../dellemc/os9/roles/os9_logging/LICENSE | 674 + .../dellemc/os9/roles/os9_logging/README.md | 148 + .../os9/roles/os9_logging/defaults/main.yml | 2 + .../os9/roles/os9_logging/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_logging/meta/main.yml | 18 + .../dellemc/os9/roles/os9_logging/tasks/main.yml | 16 + .../os9/roles/os9_logging/templates/os9_logging.j2 | 198 + .../os9/roles/os9_logging/tests/inventory.yaml | 20 + .../os9/roles/os9_logging/tests/main.os9.yaml | 44 + .../dellemc/os9/roles/os9_logging/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_logging/vars/main.yml | 2 + .../dellemc/os9/roles/os9_ntp/LICENSE | 674 + .../dellemc/os9/roles/os9_ntp/README.md | 98 + .../dellemc/os9/roles/os9_ntp/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_ntp/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_ntp/meta/main.yml | 18 + .../dellemc/os9/roles/os9_ntp/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2 | 41 + .../dellemc/os9/roles/os9_ntp/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_ntp/tests/main.os9.yaml | 11 + .../dellemc/os9/roles/os9_ntp/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_ntp/vars/main.yml | 2 + .../dellemc/os9/roles/os9_prefix_list/LICENSE | 674 + .../dellemc/os9/roles/os9_prefix_list/README.md | 110 + .../os9/roles/os9_prefix_list/defaults/main.yml | 2 + .../os9/roles/os9_prefix_list/handlers/main.yml | 2 + .../os9/roles/os9_prefix_list/meta/main.yml | 17 + .../os9/roles/os9_prefix_list/tasks/main.yml | 16 + .../os9_prefix_list/templates/os9_prefix_list.j2 | 81 + .../os9/roles/os9_prefix_list/tests/inventory.yaml | 20 + .../os9/roles/os9_prefix_list/tests/main.os9.yaml | 33 + .../os9/roles/os9_prefix_list/tests/test.yaml | 5 + .../os9/roles/os9_prefix_list/vars/main.yml | 2 + .../dellemc/os9/roles/os9_sflow/LICENSE | 674 + .../dellemc/os9/roles/os9_sflow/README.md | 120 + .../dellemc/os9/roles/os9_sflow/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_sflow/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_sflow/meta/main.yml | 18 + .../dellemc/os9/roles/os9_sflow/tasks/main.yml | 16 + .../os9/roles/os9_sflow/templates/os9_sflow.j2 | 143 + .../os9/roles/os9_sflow/tests/inventory.yaml | 20 + .../os9/roles/os9_sflow/tests/main.os9.yaml | 35 + .../dellemc/os9/roles/os9_sflow/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_sflow/vars/main.yml | 2 + .../dellemc/os9/roles/os9_snmp/LICENSE | 674 + .../dellemc/os9/roles/os9_snmp/README.md | 192 + .../dellemc/os9/roles/os9_snmp/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_snmp/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_snmp/meta/main.yml | 18 + .../dellemc/os9/roles/os9_snmp/tasks/main.yml | 16 + .../os9/roles/os9_snmp/templates/os9_snmp.j2 | 524 + .../os9/roles/os9_snmp/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_snmp/tests/main.os9.yaml | 83 + .../dellemc/os9/roles/os9_snmp/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_snmp/vars/main.yml | 2 + .../dellemc/os9/roles/os9_system/LICENSE | 674 + .../dellemc/os9/roles/os9_system/README.md | 223 + .../dellemc/os9/roles/os9_system/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_system/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_system/meta/main.yml | 18 + .../dellemc/os9/roles/os9_system/tasks/main.yml | 16 + .../os9/roles/os9_system/templates/os9_system.j2 | 422 + .../os9/roles/os9_system/tests/inventory.yaml | 20 + .../os9/roles/os9_system/tests/main.os9.yaml | 74 + .../dellemc/os9/roles/os9_system/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_system/vars/main.yml | 2 + .../dellemc/os9/roles/os9_users/LICENSE | 674 + .../dellemc/os9/roles/os9_users/README.md | 109 + .../dellemc/os9/roles/os9_users/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_users/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_users/meta/main.yml | 18 + .../dellemc/os9/roles/os9_users/tasks/main.yml | 16 + .../os9/roles/os9_users/templates/os9_users.j2 | 141 + .../os9/roles/os9_users/tests/inventory.yaml | 20 + .../os9/roles/os9_users/tests/main.os9.yaml | 28 + .../dellemc/os9/roles/os9_users/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_users/vars/main.yml | 2 + .../dellemc/os9/roles/os9_vlan/LICENSE | 674 + .../dellemc/os9/roles/os9_vlan/README.md | 105 + .../dellemc/os9/roles/os9_vlan/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_vlan/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_vlan/meta/main.yml | 18 + .../dellemc/os9/roles/os9_vlan/tasks/main.yml | 16 + .../os9/roles/os9_vlan/templates/os9_vlan.j2 | 79 + .../os9/roles/os9_vlan/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_vlan/tests/main.os9.yaml | 20 + .../dellemc/os9/roles/os9_vlan/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_vlan/vars/main.yml | 2 + .../dellemc/os9/roles/os9_vlt/LICENSE | 674 + .../dellemc/os9/roles/os9_vlt/README.md | 132 + .../dellemc/os9/roles/os9_vlt/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_vlt/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_vlt/meta/main.yml | 18 + .../dellemc/os9/roles/os9_vlt/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2 | 217 + .../dellemc/os9/roles/os9_vlt/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_vlt/tests/main.os9.yaml | 39 + .../dellemc/os9/roles/os9_vlt/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_vlt/vars/main.yml | 2 + .../dellemc/os9/roles/os9_vrf/LICENSE | 674 + .../dellemc/os9/roles/os9_vrf/README.md | 125 + .../dellemc/os9/roles/os9_vrf/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_vrf/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_vrf/meta/main.yml | 18 + .../dellemc/os9/roles/os9_vrf/tasks/main.yml | 16 + .../dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2 | 68 + .../dellemc/os9/roles/os9_vrf/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_vrf/tests/main.os9.yaml | 15 + .../dellemc/os9/roles/os9_vrf/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_vrf/vars/main.yml | 2 + .../dellemc/os9/roles/os9_vrrp/LICENSE | 674 + .../dellemc/os9/roles/os9_vrrp/README.md | 148 + .../dellemc/os9/roles/os9_vrrp/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_vrrp/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_vrrp/meta/main.yml | 19 + .../dellemc/os9/roles/os9_vrrp/tasks/main.yml | 16 + .../os9/roles/os9_vrrp/templates/os9_vrrp.j2 | 218 + .../os9/roles/os9_vrrp/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml | 59 + .../dellemc/os9/roles/os9_vrrp/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_vrrp/vars/main.yml | 2 + .../dellemc/os9/roles/os9_xstp/LICENSE | 674 + .../dellemc/os9/roles/os9_xstp/README.md | 127 + .../dellemc/os9/roles/os9_xstp/defaults/main.yml | 2 + .../dellemc/os9/roles/os9_xstp/handlers/main.yml | 2 + .../dellemc/os9/roles/os9_xstp/meta/main.yml | 18 + .../dellemc/os9/roles/os9_xstp/tasks/main.yml | 16 + .../os9/roles/os9_xstp/templates/os9_xstp.j2 | 160 + .../os9/roles/os9_xstp/tests/inventory.yaml | 20 + .../dellemc/os9/roles/os9_xstp/tests/main.os9.yaml | 34 + .../dellemc/os9/roles/os9_xstp/tests/test.yaml | 5 + .../dellemc/os9/roles/os9_xstp/vars/main.yml | 2 + ansible_collections/dellemc/os9/tests/.gitignore | 1 + .../os9/tests/integration/targets/__init__.py | 0 .../os9_command/os9_command/defaults/main.yaml | 2 + .../targets/os9_command/os9_command/tasks/cli.yaml | 14 + .../os9_command/os9_command/tasks/main.yaml | 2 + .../os9_command/os9_command/tests/cli/bad_operator | 20 + .../os9_command/os9_command/tests/cli/contains | 20 + .../os9_command/os9_command/tests/cli/invalid | 28 + .../os9_command/os9_command/tests/cli/output | 29 + .../os9_command/tests/cli/show_commands.yaml | 74 + .../os9_command/os9_command/tests/cli/timeout | 19 + .../os9_config/os9_config/defaults/main.yaml | 2 + .../targets/os9_config/os9_config/tasks/cli.yaml | 13 + .../targets/os9_config/os9_config/tasks/main.yaml | 2 + .../os9_config/tests/cli/configcommands.yaml | 134 + .../os9_config/os9_config/tests/cli/toplevel.yaml | 37 + .../os9_config/tests/cli/vlan_config.txt | 9 + .../targets/os9_facts/os9_facts/defaults/main.yaml | 2 + .../targets/os9_facts/os9_facts/tasks/cli.yaml | 13 + .../targets/os9_facts/os9_facts/tasks/main.yaml | 2 + .../os9_facts/tests/cli/testcases_facts.yaml | 55 + .../dellemc/os9/tests/sanity/ignore-2.10.txt | 4 + .../dellemc/os9/tests/sanity/ignore-2.11.txt | 4 + .../dellemc/os9/tests/sanity/ignore-2.9.txt | 1 + .../dellemc/os9/tests/sanity/requirements.txt | 4 + .../os9/tests/unit/modules/network/os9/__init__.py | 0 .../network/os9/fixtures/os9_config_config.cfg | 13 + .../network/os9/fixtures/os9_config_src.cfg | 12 + .../modules/network/os9/fixtures/show_file-systems | 10 + .../modules/network/os9/fixtures/show_interfaces | 1259 ++ .../modules/network/os9/fixtures/show_inventory | 19 + .../network/os9/fixtures/show_ipv6_interface | 26 + .../os9/fixtures/show_lldp_neighbors_detail | 35 + .../os9/fixtures/show_memory__except_Processor | 4 + .../network/os9/fixtures/show_running-config | 238 + .../fixtures/show_running-config__grep_hostname | 1 + .../unit/modules/network/os9/fixtures/show_version | 18 + .../tests/unit/modules/network/os9/os9_module.py | 88 + .../unit/modules/network/os9/test_os9_command.py | 108 + .../unit/modules/network/os9/test_os9_config.py | 148 + .../unit/modules/network/os9/test_os9_facts.py | 106 + .../dellemc/powerflex/CHANGELOG.rst | 95 + ansible_collections/dellemc/powerflex/FILES.json | 530 + ansible_collections/dellemc/powerflex/LICENSE | 674 + .../dellemc/powerflex/MANIFEST.json | 39 + .../dellemc/powerflex/MODULE-LICENSE | 201 + ansible_collections/dellemc/powerflex/README.md | 60 + .../powerflex/changelogs/.plugin-cache.yaml | 67 + .../dellemc/powerflex/changelogs/changelog.yaml | 82 + .../dellemc/powerflex/changelogs/config.yaml | 33 + .../dellemc/powerflex/docs/ADOPTERS.md | 11 + .../dellemc/powerflex/docs/BRANCHING.md | 32 + .../dellemc/powerflex/docs/CODE_OF_CONDUCT.md | 137 + .../dellemc/powerflex/docs/COMMITTER_GUIDE.md | 49 + .../dellemc/powerflex/docs/CONTRIBUTING.md | 173 + .../dellemc/powerflex/docs/INSTALLATION.md | 106 + .../dellemc/powerflex/docs/ISSUE_TRIAGE.md | 306 + .../dellemc/powerflex/docs/MAINTAINERS.md | 19 + .../dellemc/powerflex/docs/MAINTAINER_GUIDE.md | 38 + .../dellemc/powerflex/docs/Product Guide.md | 5437 ++++++ .../dellemc/powerflex/docs/Release Notes.md | 73 + .../dellemc/powerflex/docs/SECURITY.md | 22 + .../dellemc/powerflex/docs/SUPPORT.md | 12 + .../powerflex/meta/execution-environment.yml | 5 + .../dellemc/powerflex/meta/runtime.yml | 39 + .../powerflex/plugins/doc_fragments/powerflex.py | 61 + .../plugins/module_utils/storage/dell/__init__.py | 0 .../module_utils/storage/dell/logging_handler.py | 24 + .../plugins/module_utils/storage/dell/utils.py | 186 + .../dellemc/powerflex/plugins/modules/device.py | 1105 ++ .../dellemc/powerflex/plugins/modules/info.py | 1495 ++ .../powerflex/plugins/modules/mdm_cluster.py | 1333 ++ .../powerflex/plugins/modules/protection_domain.py | 1122 ++ .../modules/replication_consistency_group.py | 907 + .../dellemc/powerflex/plugins/modules/sdc.py | 365 + .../dellemc/powerflex/plugins/modules/sds.py | 1160 ++ .../dellemc/powerflex/plugins/modules/snapshot.py | 1285 ++ .../powerflex/plugins/modules/storagepool.py | 914 + .../dellemc/powerflex/plugins/modules/volume.py | 1599 ++ .../dellemc/powerflex/requirements.txt | 4 + .../dellemc/powerflex/requirements.yml | 3 + .../dellemc/powerflex/tests/requirements.txt | 7 + .../dellemc/powerflex/tests/sanity/ignore-2.12.txt | 10 + .../dellemc/powerflex/tests/sanity/ignore-2.13.txt | 10 + .../dellemc/powerflex/tests/sanity/ignore-2.14.txt | 10 + .../dellemc/powerflex/tests/unit/__init__.py | 0 .../powerflex/tests/unit/plugins/__init__.py | 0 .../plugins/module_utils/mock_api_exception.py | 14 + .../unit/plugins/module_utils/mock_info_api.py | 235 + .../plugins/module_utils/mock_mdm_cluster_api.py | 403 + .../module_utils/mock_protection_domain_api.py | 68 + .../mock_replication_consistency_group_api.py | 70 + .../unit/plugins/module_utils/mock_sdk_response.py | 15 + .../plugins/module_utils/mock_storagepool_api.py | 467 + .../unit/plugins/module_utils/mock_volume_api.py | 548 + .../tests/unit/plugins/modules/__init__.py | 0 .../tests/unit/plugins/modules/test_info.py | 130 + .../tests/unit/plugins/modules/test_mdm_cluster.py | 636 + .../unit/plugins/modules/test_protection_domain.py | 236 + .../modules/test_replication_consistency_group.py | 344 + .../tests/unit/plugins/modules/test_storagepool.py | 72 + .../tests/unit/plugins/modules/test_volume.py | 81 + ansible_collections/dellemc/unity/CHANGELOG.rst | 146 + ansible_collections/dellemc/unity/FILES.json | 600 + ansible_collections/dellemc/unity/LICENSE | 674 + ansible_collections/dellemc/unity/MANIFEST.json | 41 + ansible_collections/dellemc/unity/MODULE-LICENSE | 201 + ansible_collections/dellemc/unity/README.md | 71 + .../dellemc/unity/changelogs/.plugin-cache.yaml | 102 + .../dellemc/unity/changelogs/changelog.yaml | 152 + .../dellemc/unity/changelogs/config.yaml | 33 + ansible_collections/dellemc/unity/docs/ADOPTERS.md | 11 + .../dellemc/unity/docs/BRANCHING.md | 32 + .../dellemc/unity/docs/CODE_OF_CONDUCT.md | 137 + .../dellemc/unity/docs/COMMITTER_GUIDE.md | 49 + .../dellemc/unity/docs/CONTRIBUTING.md | 173 + .../dellemc/unity/docs/INSTALLATION.md | 100 + .../dellemc/unity/docs/ISSUE_TRIAGE.md | 308 + .../dellemc/unity/docs/MAINTAINERS.md | 19 + .../dellemc/unity/docs/MAINTAINER_GUIDE.md | 38 + .../dellemc/unity/docs/Product Guide.md | 8662 +++++++++ .../dellemc/unity/docs/Release Notes.md | 76 + ansible_collections/dellemc/unity/docs/SECURITY.md | 22 + ansible_collections/dellemc/unity/docs/SUPPORT.md | 12 + .../dellemc/unity/meta/execution-environment.yml | 5 + ansible_collections/dellemc/unity/meta/runtime.yml | 79 + .../dellemc/unity/plugins/doc_fragments/unity.py | 53 + .../plugins/module_utils/storage/dell/__init__.py | 0 .../module_utils/storage/dell/logging_handler.py | 25 + .../plugins/module_utils/storage/dell/utils.py | 254 + .../dellemc/unity/plugins/modules/cifsserver.py | 629 + .../unity/plugins/modules/consistencygroup.py | 1516 ++ .../dellemc/unity/plugins/modules/filesystem.py | 1889 ++ .../unity/plugins/modules/filesystem_snapshot.py | 772 + .../dellemc/unity/plugins/modules/host.py | 1025 + .../dellemc/unity/plugins/modules/info.py | 1784 ++ .../dellemc/unity/plugins/modules/interface.py | 521 + .../dellemc/unity/plugins/modules/nasserver.py | 1151 ++ .../dellemc/unity/plugins/modules/nfs.py | 1667 ++ .../dellemc/unity/plugins/modules/nfsserver.py | 494 + .../dellemc/unity/plugins/modules/smbshare.py | 877 + .../dellemc/unity/plugins/modules/snapshot.py | 751 + .../unity/plugins/modules/snapshotschedule.py | 1002 + .../dellemc/unity/plugins/modules/storagepool.py | 879 + .../dellemc/unity/plugins/modules/tree_quota.py | 708 + .../dellemc/unity/plugins/modules/user_quota.py | 1013 + .../dellemc/unity/plugins/modules/volume.py | 1256 ++ ansible_collections/dellemc/unity/requirements.txt | 3 + ansible_collections/dellemc/unity/requirements.yml | 3 + .../dellemc/unity/tests/requirements.txt | 7 + .../dellemc/unity/tests/sanity/ignore-2.12.txt | 23 + .../dellemc/unity/tests/sanity/ignore-2.13.txt | 19 + .../dellemc/unity/tests/sanity/ignore-2.14.txt | 19 + .../plugins/module_utils/mock_api_exception.py | 19 + .../plugins/module_utils/mock_cifsserver_api.py | 200 + .../module_utils/mock_consistencygroup_api.py | 122 + .../plugins/module_utils/mock_filesystem_api.py | 68 + .../unit/plugins/module_utils/mock_host_api.py | 154 + .../plugins/module_utils/mock_interface_api.py | 122 + .../plugins/module_utils/mock_nasserver_api.py | 64 + .../unit/plugins/module_utils/mock_nfs_api.py | 139 + .../plugins/module_utils/mock_nfsserver_api.py | 259 + .../unit/plugins/module_utils/mock_sdk_response.py | 32 + .../plugins/module_utils/mock_storagepool_api.py | 168 + .../tests/unit/plugins/modules/test_cifsserver.py | 169 + .../unit/plugins/modules/test_consistencygroup.py | 193 + .../tests/unit/plugins/modules/test_filesystem.py | 94 + .../unity/tests/unit/plugins/modules/test_host.py | 143 + .../tests/unit/plugins/modules/test_interface.py | 350 + .../tests/unit/plugins/modules/test_nasserver.py | 112 + .../unity/tests/unit/plugins/modules/test_nfs.py | 180 + .../tests/unit/plugins/modules/test_nfsserver.py | 226 + .../tests/unit/plugins/modules/test_storagepool.py | 132 + 2394 files changed, 343872 insertions(+) create mode 100644 ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/.gitignore create mode 100644 ansible_collections/dellemc/enterprise_sonic/.yamllint create mode 100644 ansible_collections/dellemc/enterprise_sonic/FILES.json create mode 100644 ansible_collections/dellemc/enterprise_sonic/LICENSE create mode 100644 ansible_collections/dellemc/enterprise_sonic/MANIFEST.json create mode 100644 ansible_collections/dellemc/enterprise_sonic/README.md create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/100-prefix_lists-resource-module.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/101-bgp-prefix_lists.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/102-playbook-example-updates.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/103-standard-interface-naming-and-other-bugfixes.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/105-vxlan-regression-test-fix.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/2.0.0.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/53-oc-yang-compliance.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/58-vxlan-primary-ip.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/60-bgp-regression-test_fixes.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/62-bgp-vxlan-primary-ip.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/63-bgp_af-route_advertise_list.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/67-bgp_neighbors-auth_pwd-and-neighbor_description.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/72-bgp_neighbors-add-attributes.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/76-CLI-command-prompt-answer-fix.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/78-aaa-regression-fix.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/81-add-bgp-peer-group-attributes.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/82-static_routes.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/85-regression-test-fixes.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/88-port_breakout-and-execution-environment.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/90-aaa-and-users-bugfix.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/98-vlans-description.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/99-ntp-resource-module.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/meta/execution-environment.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/clos-fabric.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/all.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/leaf.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/spine.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/inventory.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/templates/clos_fabric_cfg.j2 create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/hosts create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/interface_naming.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/patch.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_aaa.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_api.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp_communities.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_command.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_config.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_facts.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_interfaces_config.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_system.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_vxlans_config.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/src.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/action/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/action/sonic.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/httpapi/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/httpapi/sonic.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors/bgp_neighbors.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/bgp_neighbors_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/prefix_lists/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/prefix_lists/prefix_lists.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_api.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_command.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/terminal/__init__.py create mode 100644 ansible_collections/dellemc/enterprise_sonic/plugins/terminal/sonic.py create mode 100755 ansible_collections/dellemc/enterprise_sonic/rebuild.sh create mode 100644 ansible_collections/dellemc/enterprise_sonic/requirements.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/image-upgrade.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/action.facts.report.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.contains.test.facts.report.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.test.facts.report.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli_tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/idempotent.facts.report.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/single.run.facts.report.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template.j2 create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template1.j2 create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template_del.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/invalid.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/patch.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_delete.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_get.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_patch.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_post.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_put.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/action_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/defaults/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/bad_operator.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/cli_command.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/contains.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/invalid.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/output.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/prompt.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/test_local.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/timeout.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/backup.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/match_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/prompt.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/replace_tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/snmp.j2 create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/src.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template_del.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template_del.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/templates/cli_test_case_01.cfg create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template_del.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template_del.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/templates/cli_test_case_01.cfg create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/single_run_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/templates/cli_test_case_01.cfg create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/cleanup_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/preparation_tests.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/tasks_template.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/templates/cli_test_case_01.cfg create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/defaults/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/meta/main.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/tasks/main.yml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2 create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.12.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.13.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.14.txt create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.9.txt create mode 100755 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/run_test_cases.sh create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_01_dict_diff_with_key_name.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_02_dict_diff_with_key_other.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_03_dict_diff_without_key.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_04_dict_diff_with_similar_dict.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_05_dict_diff_left_only.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_06_dict_diff_left_only_with_none.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_07_dict_diff_skeleton_only.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_08_list_diff_with_key_name.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_09_list_diff_with_multi_keys.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_10_list_diff_with_key_other.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_11_list_diff_with_similar_list.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_12_list_diff_with_left_only.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_13_list_diff_with_left_only_with_none.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_14_list_diff_skeleton_only.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_15_list_of_list_diff.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_16_complex_list_with_dict_diff.yaml create mode 100644 ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_diff_util.py create mode 100644 ansible_collections/dellemc/openmanage/.github/CODEOWNERS create mode 100644 ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/ask_a_question.md create mode 100644 ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/config.yml create mode 100644 ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 ansible_collections/dellemc/openmanage/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml create mode 100644 ansible_collections/dellemc/openmanage/.gitignore create mode 100644 ansible_collections/dellemc/openmanage/CHANGELOG.rst create mode 100644 ansible_collections/dellemc/openmanage/FILES.json create mode 100644 ansible_collections/dellemc/openmanage/LICENSE create mode 100644 ansible_collections/dellemc/openmanage/MANIFEST.json create mode 100644 ansible_collections/dellemc/openmanage/README.md create mode 100644 ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml create mode 100644 ansible_collections/dellemc/openmanage/changelogs/changelog.yaml create mode 100644 ansible_collections/dellemc/openmanage/changelogs/config.yaml create mode 100644 ansible_collections/dellemc/openmanage/docs/ADDITIONAL_INFORMATION.md create mode 100644 ansible_collections/dellemc/openmanage/docs/BRANCHING.md create mode 100644 ansible_collections/dellemc/openmanage/docs/CODE_OF_CONDUCT.md create mode 100644 ansible_collections/dellemc/openmanage/docs/COMMITTER_GUIDE.md create mode 100644 ansible_collections/dellemc/openmanage/docs/CONTRIBUTING.md create mode 100644 ansible_collections/dellemc/openmanage/docs/DEBUG.md create mode 100644 ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md create mode 100644 ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md create mode 100644 ansible_collections/dellemc/openmanage/docs/ISSUE_TRIAGE.md create mode 100644 ansible_collections/dellemc/openmanage/docs/MAINTAINERS.md create mode 100644 ansible_collections/dellemc/openmanage/docs/README.md create mode 100644 ansible_collections/dellemc/openmanage/docs/SECURITY.md create mode 100644 ansible_collections/dellemc/openmanage/docs/SUPPORT.md create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst create mode 100644 ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst create mode 100644 ansible_collections/dellemc/openmanage/meta/execution-environment.yml create mode 100644 ansible_collections/dellemc/openmanage/meta/runtime.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml create mode 100644 ansible_collections/dellemc/openmanage/plugins/README.md create mode 100644 ansible_collections/dellemc/openmanage/plugins/doc_fragments/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/module_utils/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py create mode 100644 ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py create mode 100644 ansible_collections/dellemc/openmanage/requirements.txt create mode 100644 ansible_collections/dellemc/openmanage/requirements.yml create mode 100644 ansible_collections/dellemc/openmanage/tests/.gitignore create mode 100644 ansible_collections/dellemc/openmanage/tests/README.md create mode 100644 ansible_collections/dellemc/openmanage/tests/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/tests/requirements.txt create mode 100644 ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt create mode 100644 ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/__init__.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py create mode 100644 ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py create mode 100644 ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml create mode 100644 ansible_collections/dellemc/os10/.gitignore create mode 100644 ansible_collections/dellemc/os10/FILES.json create mode 100644 ansible_collections/dellemc/os10/LICENSE create mode 100644 ansible_collections/dellemc/os10/MANIFEST.json create mode 100644 ansible_collections/dellemc/os10/README.md create mode 100644 ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst create mode 100644 ansible_collections/dellemc/os10/changelogs/changelog.yaml create mode 100644 ansible_collections/dellemc/os10/changelogs/config.yaml create mode 100644 ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst create mode 100644 ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst create mode 100644 ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst create mode 100644 ansible_collections/dellemc/os10/docs/os10_aaa.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_acl.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_bfd.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_bgp.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_copy_config.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_dns.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_ecmp.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_fabric_summary.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_flow_monitor.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_image_upgrade.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_interface.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_lag.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_lldp.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_logging.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_network_validation.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_ntp.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_prefix_list.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_qos.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_raguard.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_route_map.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_snmp.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_system.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_template.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_uplink.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_users.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_vlan.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_vlt.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_vrf.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_vrrp.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_vxlan.md create mode 100644 ansible_collections/dellemc/os10/docs/os10_xstp.md create mode 100644 ansible_collections/dellemc/os10/docs/roles.rst create mode 100644 ansible_collections/dellemc/os10/meta/runtime.yml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.png create mode 100644 ansible_collections/dellemc/os10/plugins/action/os10.py create mode 100644 ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py create mode 100644 ansible_collections/dellemc/os10/plugins/cliconf/os10.py create mode 100644 ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py create mode 100644 ansible_collections/dellemc/os10/plugins/module_utils/network/__init__.py create mode 100644 ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py create mode 100644 ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/__init__.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/bgp_validate.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/mtu_validate.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/os10_command.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/os10_config.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/os10_facts.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py create mode 100644 ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py create mode 100644 ansible_collections/dellemc/os10/plugins/terminal/os10.py create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine1 create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine1 create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine1 create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine1 create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/README.md create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2 create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml create mode 100644 ansible_collections/dellemc/os10/tests/integration/target-prefixes.network create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tests/acl_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tests/interface_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tests/lag_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tests/logging_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tests/qos_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tests/system_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tests/users_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2 create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml create mode 100644 ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml create mode 100644 ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/dellemc/os10/tests/sanity/requirements.txt create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/__init__.py create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_ create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py create mode 100644 ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py create mode 100644 ansible_collections/dellemc/os6/.ansible-lint create mode 100644 ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml create mode 100644 ansible_collections/dellemc/os6/.gitignore create mode 100644 ansible_collections/dellemc/os6/COPYING create mode 100644 ansible_collections/dellemc/os6/FILES.json create mode 100644 ansible_collections/dellemc/os6/LICENSE create mode 100644 ansible_collections/dellemc/os6/MANIFEST.json create mode 100644 ansible_collections/dellemc/os6/README.md create mode 100644 ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst create mode 100644 ansible_collections/dellemc/os6/changelogs/changelog.yaml create mode 100644 ansible_collections/dellemc/os6/changelogs/config.yaml create mode 100644 ansible_collections/dellemc/os6/docs/os6_aaa.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_acl.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_bgp.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_interface.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_lag.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_lldp.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_logging.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_ntp.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_qos.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_snmp.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_system.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_users.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_vlan.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_vrrp.md create mode 100644 ansible_collections/dellemc/os6/docs/os6_xstp.md create mode 100644 ansible_collections/dellemc/os6/docs/roles.rst create mode 100644 ansible_collections/dellemc/os6/meta/runtime.yml create mode 100644 ansible_collections/dellemc/os6/playbooks/ibgp/README.md create mode 100644 ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all create mode 100644 ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml create mode 100644 ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml create mode 100644 ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml create mode 100644 ansible_collections/dellemc/os6/plugins/action/__init__.py create mode 100644 ansible_collections/dellemc/os6/plugins/action/os6.py create mode 100644 ansible_collections/dellemc/os6/plugins/cliconf/__init__.py create mode 100644 ansible_collections/dellemc/os6/plugins/cliconf/os6.py create mode 100644 ansible_collections/dellemc/os6/plugins/doc_fragments/__init__.py create mode 100644 ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py create mode 100644 ansible_collections/dellemc/os6/plugins/module_utils/network/__init__.py create mode 100644 ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py create mode 100644 ansible_collections/dellemc/os6/plugins/modules/__init__.py create mode 100644 ansible_collections/dellemc/os6/plugins/modules/os6_command.py create mode 100644 ansible_collections/dellemc/os6/plugins/modules/os6_config.py create mode 100644 ansible_collections/dellemc/os6/plugins/modules/os6_facts.py create mode 100644 ansible_collections/dellemc/os6/plugins/terminal/__init__.py create mode 100644 ansible_collections/dellemc/os6/plugins/terminal/os6.py create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/tests/inventory create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/tests/inventory create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/README.md create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2 create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml create mode 100644 ansible_collections/dellemc/os6/tests/.gitignore create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml create mode 100644 ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/dellemc/os6/tests/sanity/requirements.txt create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/__init__.py create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py create mode 100644 ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py create mode 100644 ansible_collections/dellemc/os9/.ansible-lint create mode 100644 ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml create mode 100644 ansible_collections/dellemc/os9/.gitignore create mode 100644 ansible_collections/dellemc/os9/COPYING create mode 100644 ansible_collections/dellemc/os9/FILES.json create mode 100644 ansible_collections/dellemc/os9/LICENSE create mode 100644 ansible_collections/dellemc/os9/MANIFEST.json create mode 100644 ansible_collections/dellemc/os9/README.md create mode 100644 ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst create mode 100644 ansible_collections/dellemc/os9/changelogs/changelog.yaml create mode 100644 ansible_collections/dellemc/os9/changelogs/config.yaml create mode 100644 ansible_collections/dellemc/os9/docs/os9_aaa.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_acl.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_bgp.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_copy_config.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_dcb.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_dns.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_ecmp.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_interface.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_lag.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_lldp.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_logging.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_ntp.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_prefix_list.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_sflow.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_snmp.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_system.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_users.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_vlan.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_vlt.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_vrf.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_vrrp.md create mode 100644 ansible_collections/dellemc/os9/docs/os9_xstp.md create mode 100644 ansible_collections/dellemc/os9/docs/roles.rst create mode 100644 ansible_collections/dellemc/os9/meta/runtime.yml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml create mode 100644 ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/plugins/action/__init__.py create mode 100644 ansible_collections/dellemc/os9/plugins/action/os9.py create mode 100644 ansible_collections/dellemc/os9/plugins/cliconf/__init__.py create mode 100644 ansible_collections/dellemc/os9/plugins/cliconf/os9.py create mode 100644 ansible_collections/dellemc/os9/plugins/doc_fragments/__init__.py create mode 100644 ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py create mode 100644 ansible_collections/dellemc/os9/plugins/module_utils/__init__.py create mode 100644 ansible_collections/dellemc/os9/plugins/module_utils/network/__init__.py create mode 100644 ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py create mode 100644 ansible_collections/dellemc/os9/plugins/modules/__init__.py create mode 100644 ansible_collections/dellemc/os9/plugins/modules/os9_command.py create mode 100644 ansible_collections/dellemc/os9/plugins/modules/os9_config.py create mode 100644 ansible_collections/dellemc/os9/plugins/modules/os9_facts.py create mode 100644 ansible_collections/dellemc/os9/plugins/terminal/__init__.py create mode 100644 ansible_collections/dellemc/os9/plugins/terminal/os9.py create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/README.md create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2 create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml create mode 100644 ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml create mode 100644 ansible_collections/dellemc/os9/tests/.gitignore create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/__init__.py create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml create mode 100644 ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml create mode 100644 ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/dellemc/os9/tests/sanity/requirements.txt create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/__init__.py create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py create mode 100644 ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py create mode 100644 ansible_collections/dellemc/powerflex/CHANGELOG.rst create mode 100644 ansible_collections/dellemc/powerflex/FILES.json create mode 100644 ansible_collections/dellemc/powerflex/LICENSE create mode 100644 ansible_collections/dellemc/powerflex/MANIFEST.json create mode 100644 ansible_collections/dellemc/powerflex/MODULE-LICENSE create mode 100644 ansible_collections/dellemc/powerflex/README.md create mode 100644 ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml create mode 100644 ansible_collections/dellemc/powerflex/changelogs/changelog.yaml create mode 100644 ansible_collections/dellemc/powerflex/changelogs/config.yaml create mode 100644 ansible_collections/dellemc/powerflex/docs/ADOPTERS.md create mode 100644 ansible_collections/dellemc/powerflex/docs/BRANCHING.md create mode 100644 ansible_collections/dellemc/powerflex/docs/CODE_OF_CONDUCT.md create mode 100644 ansible_collections/dellemc/powerflex/docs/COMMITTER_GUIDE.md create mode 100644 ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md create mode 100644 ansible_collections/dellemc/powerflex/docs/INSTALLATION.md create mode 100644 ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md create mode 100644 ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md create mode 100644 ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md create mode 100644 ansible_collections/dellemc/powerflex/docs/Product Guide.md create mode 100644 ansible_collections/dellemc/powerflex/docs/Release Notes.md create mode 100644 ansible_collections/dellemc/powerflex/docs/SECURITY.md create mode 100644 ansible_collections/dellemc/powerflex/docs/SUPPORT.md create mode 100644 ansible_collections/dellemc/powerflex/meta/execution-environment.yml create mode 100644 ansible_collections/dellemc/powerflex/meta/runtime.yml create mode 100644 ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/__init__.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/logging_handler.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/device.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/info.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/sdc.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/sds.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py create mode 100644 ansible_collections/dellemc/powerflex/plugins/modules/volume.py create mode 100644 ansible_collections/dellemc/powerflex/requirements.txt create mode 100644 ansible_collections/dellemc/powerflex/requirements.yml create mode 100644 ansible_collections/dellemc/powerflex/tests/requirements.txt create mode 100644 ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt create mode 100644 ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt create mode 100644 ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/__init__.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/__init__.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_api_exception.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_mdm_cluster_api.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdk_response.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/__init__.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py create mode 100644 ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py create mode 100644 ansible_collections/dellemc/unity/CHANGELOG.rst create mode 100644 ansible_collections/dellemc/unity/FILES.json create mode 100644 ansible_collections/dellemc/unity/LICENSE create mode 100644 ansible_collections/dellemc/unity/MANIFEST.json create mode 100644 ansible_collections/dellemc/unity/MODULE-LICENSE create mode 100644 ansible_collections/dellemc/unity/README.md create mode 100644 ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml create mode 100644 ansible_collections/dellemc/unity/changelogs/changelog.yaml create mode 100644 ansible_collections/dellemc/unity/changelogs/config.yaml create mode 100644 ansible_collections/dellemc/unity/docs/ADOPTERS.md create mode 100644 ansible_collections/dellemc/unity/docs/BRANCHING.md create mode 100644 ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md create mode 100644 ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md create mode 100644 ansible_collections/dellemc/unity/docs/CONTRIBUTING.md create mode 100644 ansible_collections/dellemc/unity/docs/INSTALLATION.md create mode 100644 ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md create mode 100644 ansible_collections/dellemc/unity/docs/MAINTAINERS.md create mode 100644 ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md create mode 100644 ansible_collections/dellemc/unity/docs/Product Guide.md create mode 100644 ansible_collections/dellemc/unity/docs/Release Notes.md create mode 100644 ansible_collections/dellemc/unity/docs/SECURITY.md create mode 100644 ansible_collections/dellemc/unity/docs/SUPPORT.md create mode 100644 ansible_collections/dellemc/unity/meta/execution-environment.yml create mode 100644 ansible_collections/dellemc/unity/meta/runtime.yml create mode 100644 ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py create mode 100644 ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/__init__.py create mode 100644 ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py create mode 100644 ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/cifsserver.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/filesystem.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/host.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/info.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/interface.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/nasserver.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/nfs.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/nfsserver.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/smbshare.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/snapshot.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/storagepool.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/tree_quota.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/user_quota.py create mode 100644 ansible_collections/dellemc/unity/plugins/modules/volume.py create mode 100644 ansible_collections/dellemc/unity/requirements.txt create mode 100644 ansible_collections/dellemc/unity/requirements.yml create mode 100644 ansible_collections/dellemc/unity/tests/requirements.txt create mode 100644 ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt create mode 100644 ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt create mode 100644 ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py create mode 100644 ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py (limited to 'ansible_collections/dellemc') diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml new file mode 100644 index 00000000..c2ae1dd0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml @@ -0,0 +1,42 @@ +name: CI +on: + # Run CI against all pushes (direct commits, also merged PRs), Pull Requests + push: + pull_request: + # Run CI once per day (at 06:00 UTC) + # This ensures that even if there haven't been commits that we are still testing against latest version of ansible-test for each ansible-base version + schedule: + - cron: '0 6 * * *' +jobs: + sanity: + name: Sanity (${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - stable-2.13 + - devel + runs-on: ubuntu-latest + steps: + + - name: Check out code + uses: actions/checkout@v1 + with: + path: ansible_collections/dellemc/enterprise_sonic + + - name: Set up Python 3.9 + uses: actions/setup-python@v1 + with: + python-version: 3.9 + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Install ansible_collections.ansible.netcommon + run: ansible-galaxy collection install ansible.netcommon -p ../../ + + - name: Run sanity tests + run: ansible-test sanity --docker -v --color diff --git a/ansible_collections/dellemc/enterprise_sonic/.gitignore b/ansible_collections/dellemc/enterprise_sonic/.gitignore new file mode 100644 index 00000000..c6fc14ad --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/.gitignore @@ -0,0 +1,387 @@ + +# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv + +### dotenv ### +.env + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# + +### Linux ### + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### pydev ### +.pydevproject + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### WebStorm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### WebStorm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/**/sonarlint/ + +# SonarQube Plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator/ + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/ansible_collections/dellemc/enterprise_sonic/.yamllint b/ansible_collections/dellemc/enterprise_sonic/.yamllint new file mode 100644 index 00000000..6f0d12b3 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/.yamllint @@ -0,0 +1,14 @@ +--- +extends: default + +ignore: | + .tox + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable diff --git a/ansible_collections/dellemc/enterprise_sonic/FILES.json b/ansible_collections/dellemc/enterprise_sonic/FILES.json new file mode 100644 index 00000000..39a1d634 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/FILES.json @@ -0,0 +1,4702 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47a1db3b7c5d49f80307023f42c78c5efdf5efa0b1f507f1106b96179b64dc4c", + "format": 1 + }, + { + "name": "tests/regression/hosts", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22f4ec1024a7ff0f9d471735392d71eac3378f5c7ddc7cf1e0c42a958def8e78", + "format": 1 + }, + { + "name": "tests/regression/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33ef239ff7b87fa17a8e41edd5cfcba01913ed7f29ae9220d1fcd294f7b231d7", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0709445b8b0c43b7436aae58dd21ece6fc1f5c1b63346e4cf7f663382278c97", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/tasks/action_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "119fd02d958cb737e829d957a02c45b92e19dd44150723f75a9b77bdd098acb4", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d898a4d581cee3c6dfdf32cb63c0e4d0a553e7bf894ea821faca6cb6f5b533fd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5984f513be79d50fcd8ee692355ea36cde13ff8a0261bb8ec9a4d80099310514", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d53284cc72caddd5bc84a507bb7330af983d44c35427ac7fbe623a29aa7d47f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b8363d6259e43af603abde938ed159d1a3a083cbef49fa37095bdc779133921", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40edc63e0538af91144b80f5a699aaaa241151458d0f4a556953b240d97ef068", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf32b77246c5b9050e568364312b5b812eefbb1e4123dd527381614b7620e2f6", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d4dac61e6125e688485a484cc6e62a60ca1350ca656f448a6d3064be98c74c2b", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7198a5cabb43600d45494873192676174231f617e4bd15e13ba4e8f10b6944dc", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_lag_interfaces/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5116cc9727de8d455b371ae1876ebb2e874b3bae27e31aa18e29b53aed41f535", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/test_get.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2f3508fde7b2945a3bf1465952903d3e75c3aa05aabe36cef40c1cd4b360ff6", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/test_delete.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7bc736ed91ef8290a3c45dfe9b8d350a01996eff4b103c1d4e90852b23a80095", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/test_post.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d11c6cb5b64a6b2a4b3edd715092a67e28597ded5da11073ff1ec47db284a6b", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/test_patch.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7c5c4dab020223f06f75446e5939d0a4c486d9884e961f564b839483f713327", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eac1646289012eda25f0d8bca465f95a23a0850f8a1d091097ccb072f4085b0c", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/test_put.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "261a7ba9a567b95b01df14b7e871846c2c3225bea9c04bdd946f16f3edbb017e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/invalid.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ee03a07e59693b9e1d817500caadabf8a4aec1e6c738d37d161510072e6cade", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a246f4c1b1644f52f5904f83b028181aef1a2cce126f245f4f5b9befa8924e55", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/tasks/patch.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c419a4942f8ef3b29bdd307b3ac95c2e516285c7a63e7fdb02bf41937f13aa1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bca81357e79e2b9e2ecb820a7f32d0f1da5266e69ef6f5d729d8dbb023eedb1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_api/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9c25024f5d908d307a288172699971c215b190bda0d6092793202822eed1fe6", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57d68154ace90d7cdf438b926ac4ff8d4197f1865019473d3078c2a48555b103", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c28676c6385f511e2875acca9d5f3ffc05418b4fc6220838fd8c7588272d93d2", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19a1f1cb2ed8eeea3d1e48936ac12afcd8a8e3f2c50318a99cc4fb0167a26416", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf66c8d3e6ab8abfe99759ba204cf9bc33e8108248489ad13dfcbc8b6e8b1484", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_af/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d9e4c73c7952f36475c79fbf88ed2e5866588bafe4acec029dfc50071b75e5a", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54286b592a5d524eae86898f55b0d50583409bad0022ab7c1a136b98664ae714", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e40faf23f810bdd4ac7c98dc1c7004625abf54db17a9799898d35be0092b6a22", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e3c20b43609a7a0a867ef1f51dd5b891e6e8b891f5f07d04aada1883d1b6c96", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_interfaces/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/templates/cli_test_case_01.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99b3e8c1065b4d95c39957ea69fe09994c59af516a58101ac68b931bd788a98e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a84c7d4404bbb768b0634e9321d7a083bc962efb448821d56e10703c582e8bef", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2abb0cea9c4eb807effba8647f512ff4e4fdd2cd5252b5d60fae4f6ffc9e060c", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31c383fa30c2abeca617c48be2fc75db4d060cbc9236e883367dd2ca2304e009", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/tasks/tasks_template_del.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e752456ebc7460997bb44a41ea07ebc6a3fedadbb2982d04df82456d0cb8e1f2", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2800620b593938fecb1b8a7354c680bcc3f1fcbb59381ca25889d2424b82198", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_tacacs_server/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/test_local.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34965a77f9bac6ffd2653154ed6557afe435b6c6d0ad6756eb7aed5628c7d6f5", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/bad_operator.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3373786cbc0c28195672a291d5b1d6a2448a3bef439403a1379ccc0ea89db16e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/output.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eedd0d5419877e550dc7c78533a383b251a37355cacc754fe6f718a0e8bc17cf", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/timeout.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d800cc09a8f6d98aae25410d7238fabe1cd7a8e0785dbae6fa83a8d9096b6f54", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/contains.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cdc7492975a756c71eb807b856492e6880b93ed4c9b39a5e05b7fc3019e371c", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/cli_command.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "542bb18b9c65a5368b080c2784cc4f16ab167609dd661cf9d33588c62f90b2a5", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/invalid.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74b8fc35c0e680a8624044c863392393c8aa353d93f3a4a5c17e2b125c72e1bd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "770573da53e590ea699b0b226a5a91dcc1457b192b5759490033fb86c71ad2e0", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/tasks/prompt.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "366447b1b94627afdbcf11ad0a6e23ed74a4b6ef3f75b34ceb33d1d0645b571f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5b387bd0d09356a40ee76b1d0a90106ad3766c7e8692c81cac72d55e183e31f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_command/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5cbf2e2e7af16e77a326ef68a4978c1c765ed72d750e58f9f70ed732ef43669", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f2c19cc1d5d88c8ab54c56bac91d31ca61f168a9b2c9cc5e92701f1c0c51937", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e40faf23f810bdd4ac7c98dc1c7004625abf54db17a9799898d35be0092b6a22", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f72f840292265f4c4a9d96c0ec18aec1f73302bcd870b4b351ed319286eddd3", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_as_paths/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7bf6d628ad50843b81ac4b8221bd23398c18a2bcbe92c98e8c7da7d24eec798", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9acb095a9607444ec0702ae6a45672173b6cacec6e0a417bdbcfe9fd604ffb6", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c07e16779a411ed03160eec075e1e68ffafa845a1fb7ee61395279df34e0f868", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f64b5c536fe544240ce9c38f0cb6cd324c5759d627a5c290bf0347fd2740512", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vlans/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f0bff3d9b88e7d50ba7d3b8cc320b341d90fe84aedef4a841c2bfd08ea07ee8", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5eef5b94c6fc079ac62361205017ccae7ff45227fd22ab16599376c9834c27b", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d77cf2a8aac0500bc814f27970125522820fe871b8dac2779a2318a126ccdb6e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc014f65407813d0e13ee7f4f778ee7e2f79bf26e4089c21a332a425395637ec", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04127c5fbb729e1e955d2aeaadb9ec67d2df28afaa7cd9918a836942fbdaed9a", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_prefix_lists/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876", + "format": 1 + }, + { + "name": "tests/regression/roles/common", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/common/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/common/templates/task_template.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37b12abf0655a42fd64fdea6887fde54768b767bd7462c7012f380a31f88e9a2", + "format": 1 + }, + { + "name": "tests/regression/roles/common/templates/task_template1.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f4cef7b0dc8762cbfa1d9de0ce48d24aedaf0154866fcdcb674e57d337a2d41", + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks/single.run.facts.report.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51afd3cab10d587776002beedfc327d9856842d346fb3b958639c5e18a9c56c9", + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks/cli.contains.test.facts.report.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "091ee6abaef30f2c589fc21284a35cf328bcb7de1e363ed764efce289f1143d9", + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks/idempotent.facts.report.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f228e22999e5b13f192bb7e5064c46e4f8b8dcd817bccb6e1a1d4bf33368c30c", + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks/cli.test.facts.report.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef3d5981a08ff0adb1ca89e06e6741c7b8c284b4c1a93eca38758c2984a51047", + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks/cli_tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2dff2da6f27a91052f22a4f0eeacd562333b1feb240dbce16b0ba03913f029e9", + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b", + "format": 1 + }, + { + "name": "tests/regression/roles/common/tasks/action.facts.report.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d51ee6515850e5d44140d451c2027d0037dc9b294fd0b5e16986f8fda94803e", + "format": 1 + }, + { + "name": "tests/regression/roles/common/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/common/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f82f73da84954bb7f19f2dceb7105bb98e34f83943514a99533dcde30707c6c", + "format": 1 + }, + { + "name": "tests/regression/roles/common/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/common/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcdccb453965a51bf1740643f331108bfd38f1dabd58e303847ea0a616a2d72e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b85a04c09ea8cfe7fecc4f16df9e0b176a60912f0e10763ebe9670abf28526cd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e58c61f59e06878c38a38d9496c6ed895cd2abeda2af54f0c0443f21ca77fba1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3adf9189fd52548ab34379294ae73202e3736016a8cfb7985be05668b0d19b1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d975800ac27a2a441997aa994bba5a04f0fb1794d252bb282eeb8302b018547", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f86c7979a8ba1918a426d3a04d8eea9ca1d1f0bb0e9470e9be5a1a426b0e1aa4", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_ntp/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/templates/cli_test_case_01.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a96aabcabc3d254e8325892587b996901af87114287fef39cdd36185e46e6af", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09c098dc215af4b393250ae3d4c505193e70a2852d8032f2fbc62be6d74582a3", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b4579fd763f8f7516ebebf80381c0f526b5d107994da1047ba96329973424dc", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93bbfc9186c081b624cc9e5e1f5d891046c2e93638e08ade2bf9bca970b3b5a0", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/tasks/tasks_template_del.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a4e4f54ba5b2c1a99608e0e4c42da660ec2e7bcb0cc1000b4510ca803410108", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f4b8ef73168fc7cdbf0582a176198e4034c0c8301120a9e65a170c7e394bde6", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_radius_server/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/templates/cli_test_case_01.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27f81e38d26fecf36640fe5ce73aaf390bb9ad9fc98e5b10149c44148c5bc444", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f63bf8a1f623c7626ccc7131205a93e63d1850f705e8b939c6fbd33edf6d0f8f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ac0c1d087259eefbdb96467fc90f40d9e326094623648ecd845c4945967e97", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9505adb80d90b058a0afa8e2ac8bae2e4e8f18b6d5fb8141d1a5350ed6fe7c73", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "181074d3ddf903f23c2065bbee708f2c5dfb575af70c0061685cb359b9f2cc2b", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebd7db1ebfdb114eb0280e5ff41adaffff92caa2df2f74cc7dc98a33214d36ad", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vxlan/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f16453f6d7fd07d75a5ff1909a51efdd02316b84331c30780dee2258cb462540", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "429e9498f56bd56705727f4b34b5b77c457220a51f7f2c43ff74f86b5e54eddf", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59f11ea9543380e101f9d2d9bd11d47f0ec0225aac74b706647b7bb6babad758", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/tasks/tasks_template_del.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6dbf73cf30a5e17226e7ef3ae721b8f04642195ab7139cb66c0c95e7c8c49b43", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a3fdc2249981d95b911544645bc81439e0bab922c06eb72d9a53457122539ca", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_mclag/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/templates/cli_test_case_01.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d4729194e8b87f49e0b444373c1b3b2ad2a0b50a0f0ebd894c8abaec1075d706", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/tasks/single_run_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a04afddc08a265c6589fdce4d84bf70aebe36072ef0475503e65b70af97c8137", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa927e1b108c8b0f9744cfc79244782d7e0cef0518db83faab3a34373ffbed04", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6ee0b650f4b18b6530df7383f4bf2168d8ee13ca49946f3f37e415dce05aff37", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/tasks/cli_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37f946b76a42a134a1e8bd8bd337760512e44038b418be2d3943a2c05459f6f2", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "895f8900d570fdfc3c187e09c688ceefb21cd13bcd8af8b78beb2dc7b59c5a4b", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29e2bb2399e1576c889e4d1a2c915773ca57c3b0befa939be5c1f30bd422f6c1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_users/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e05b1c592caa3ffa0aef7d93d7f1e12d42c1eb98ce4f6b5b8736ac698ebd351", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0a4218ce9437e1a54f2edc499296591ffdf35f861f5d12eee0f53b76292032f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4f683f060d44c641f18b803daa9e74f8d6eba27f2831b3fd56768c2679e3334", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a640983377723baab16e6b9a3fd9b5f68fd947abd94282237c8591a396fefc94", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f27630400987fa257da883e08c8fc908510f51e2dea4bbba00219d329fe3eae4", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31e02192de52bb9e3299115d5392687e7baf808d0224e168e426bf1fd98a6048", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_vrfs/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63fc9c716055d7d4f82e422447281cc8abb5a04246d72f2c39d9263005be9df8", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce433fedccb55543967ac45623afcdbff87204bd9644ba9837455656014869e5", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce1db9efc87c607d309aa70caf435631ddcab6ac40ad5c7011f2e95346a021e9", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88c9dd3878632528a559747ecfb71bcf3e0664de2a5da122e016d4ec013c610d", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/tasks/tasks_template_del.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "351f58c0c80bb45715a3bb406182187784093bc9e3611d08142cc290309d7d92", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d09922e1001ecaedc3e5326bd7f1c4e9133357ba2eba8df4c8fe8e9ffb9eb4b", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_system/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/templates/src.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f34c81b433f45db921128f31abcd6b7292fe74b83b00e6afea2248d04180ae3f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/templates/snmp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6266bb7ac15be37fb065fafa521e12a666b9804d4518a395119fa8036b0cbf29", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a007378ceca161a4eb1db9203784be6954e3f461c5fd78b888ffab8704c9fb02", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks/replace_tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e7ab0a5a93310cfbee8b1af86f2f0762fb2bef823e7db61c0a46582f0f93744", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83bff7c7bea4a71d7f170f2d9b1986bd0c7599a93734900e8f33aae7c2c58188", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks/prompt.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d6c986e2e9e1cd87f431968150a20f814a98571d907184d6d52be627ca7cab3", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1da61c5de56ece70ce75e17370d095c770e7d38e405130f2d2630f5667e312c", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks/match_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8f50c7e79553dc6c817ad9877d33a1f81b2263d33dd2f41357e482b7aad9cd9", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/tasks/backup.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d5b51e2b07f37b5b249f996f8a96eac0fa5d039b61878f955a713e87d0a62b5", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2730670a3b92845afba70ef5f44310c2f0d2e00786e5b31cb5fc7232f10f13dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_config/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e524bd719ff9f90f29484d75dac7524b7cc1be6ee3cbb968c4e7b1a6c5794d3b", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ec3be9a9befda131f6bb0f763bd36fb5e42fc826611408f8e9a0b9428a0b558", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05c972841c89a97abfeffe3ea0ce4df26d90a7ea0e4b12a43e94391dc8f4a8a1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "162e1ee53ad29b43871e3319c81a8a897c3fa5e431c3adfa11225c39819c1b68", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2df5842ca274f91f4c478ebfa10e895039cffe8e44eff6eb846a0206feb04192", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_static_routes/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876", + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/templates/regression_html_report.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba914f5e928e8d590b975bc1392def64bf9724d0a405a17e20c2a40ff297ff34", + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c2f62bacde14dae48a0123e1512b5e1b99b3f1fd4163698e67c8e3fd77b471e", + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/test_reports/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcdccb453965a51bf1740643f331108bfd38f1dabd58e303847ea0a616a2d72e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d9027febd06263adb0f67629fb7b0527fc899eb65e5f6a9f51975014f6e7cf8", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d61e40a8240f2b86ff932423f91581a859ec4dcdaa8f9cdf827fbe50220d23b5", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e94c4316918dfed7d0c87b5b4571e9a12b8d788a630915ba3b284fbb91144a53", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9d8034be99bc0d49696016feafc04a35c2e1d57c78507fe48b7a970e08b59b0", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c74c59e8e666909f5dc072cafbfb243973612317c3a2aef51c9474ddb84c505", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_neighbors_af/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e61f468a3341e060c6f5022a306918865c7b844316956ecaca28d57a5daab261", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9cf66706c721d3eea2222ebb0aba2c6fe1b4cdba36cd8bbdbffb39882f551d51", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e40faf23f810bdd4ac7c98dc1c7004625abf54db17a9799898d35be0092b6a22", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8ded0a1e8c9fa73380e115371f1cfc30a59fbe4520fddf1fec4c09f8317ad4d", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l3_interfaces/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94eb56dccb6322b64d45eec3612397a2a008682177606216e39491da17cd53ee", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc7a2c3172e30fa52832d171c90691a412bbb3c73ca1909580e99aa4a30200ee", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d0de7954ee326560a6ae01bfe2109c8ed1ce0fcb4e62167d00f3ff642f61151", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/tasks/tasks_template_del.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7de7b7896a20012c48a078b7a6d98d3921017dac84e73a7f444723221879f615", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "696ae45b0e5d370c8b2330355a266ecb910da3d29073a6892ca3f756c18af4c8", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_aaa/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "073811d763dcb3be1f2c0d57104df45f46beeab9bb3536bbc2e2f30aefa4ef73", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cad38a766eec3a35af5750b608e7fd59d8ab450b489b0ff9018ba4c9979c1a78", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c07e16779a411ed03160eec075e1e68ffafa845a1fb7ee61395279df34e0f868", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9f8c4a3db4f35e73942f4a63a109181a274470176e955ab0b9a4403446f743f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_l2_interfaces/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a37d04c718145cde6606600895ad433696a4dc3b6b310b04ab2e9f58edebf30", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dae166d577c0cd9eba123111b44403e2144b64f1b5d3cdc99019855ae47c60f4", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bace46482617b72b8d31da031c3196edd8baba912e5314d8453d073897a25189", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af2a290eb466df24bdb922fea12f682d747f282f863e490f20c5e52b04257c87", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_ext_communities/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d4f4ca7215bfc805c826f3d98811131a6b0d2a4edd3e7e7ebb98ecac9fbf9ac", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fa4dc0183d6c8a679bc13428976c0398b8a87bec0d1032fc6b9d29dc2e4a72a", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee8893139926d4625752da4613cdf0615613a69ad298a5adbf58641af1249f1e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "487f3c966b63215abc807841742b426ee59657562296a0dfbeab298bf8cd8c7f", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "547f1a09ce0a20c3f9e1ad4b189618dfc6f260b23ddd9c19d29228d08409f228", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9ba4af225692807f8176182acc8f195d083e9be5c9efa46b6dcba30c8e21f04", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_port_breakout/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96df216263dfc8ce5cd25ba67e58fd506f99536d0b72ea98f2bf08dd07a787f4", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "516ce145037ca601b5913a25a07e14e9b3a7d46692ec81bf1e38274e0ff70734", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ba3b2a9e01ac02e139d1bd1a178e4ec3056645044c3358e8ef7ed9ecb525022", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc406ecec44fdd088a078456ed4385c7df08875bbc1b12bc216a8ce5b88da5c5", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp_communities/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa7d7ab0948506ca0f3bf4deba39bc70898dc04b0374a4cce76c8683b4017c53", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/tasks/cleanup_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e2e15fed3ac641fa8ba9f1e38738208075baeea21eb1aa10b0841fb41f41e45", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/tasks/tasks_template.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1b65fd3d5bdec60096b7d91bd758d977b2eed46c249b4df10265401e8dfe6a2", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/tasks/preparation_tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16512a75dbf2fc6cfa66cb442290d1547bc3642a7c045205178b96081b25c75e", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0496cb1c3516588ee23d24020b55400241530bfa142471bcc79d25a07064f11", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba7db7bb0225c59771f2c8809ba3de33ec974de3821518f70faea65cc24cdfd5", + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/regression/roles/sonic_bgp/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd", + "format": 1 + }, + { + "name": "tests/regression/image-upgrade.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84e1170d9f27767c3a0d2e7b907d96bb10d74f57f1bb2248236f6a49dc755ce7", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/utils/test_08_list_diff_with_key_name.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53df5bef4ac87a55b8cd11579629df0e6e4ee57792a5d58f59244b66cbb92658", + "format": 1 + }, + { + "name": "tests/unit/utils/test_06_dict_diff_left_only_with_none.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "577edce2fe3f5b3573baca8e1598d03472d94f13022312a9889856d97f76374e", + "format": 1 + }, + { + "name": "tests/unit/utils/test_16_complex_list_with_dict_diff.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2a792a7e39c57489071ab25856b424634aeac2af43667aa4fd3ab7cb13c7d40", + "format": 1 + }, + { + "name": "tests/unit/utils/test_14_list_diff_skeleton_only.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcddcdb3e97ce114fa86f7f962d696168cf148edcbf0cddc8d36df9641ad379b", + "format": 1 + }, + { + "name": "tests/unit/utils/test_07_dict_diff_skeleton_only.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9760ac5089e77476a52ee1efbd4232f38e177b62885ee2e9631f1076272d276c", + "format": 1 + }, + { + "name": "tests/unit/utils/test_15_list_of_list_diff.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98fe9982ace3f968b67242e35f064d8d9f9a25f73b63f694657b2808275990bf", + "format": 1 + }, + { + "name": "tests/unit/utils/test_09_list_diff_with_multi_keys.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c884c6143486c1ee318eac464482e1b6ec8a025f6baab4c55223714d2cc1d13", + "format": 1 + }, + { + "name": "tests/unit/utils/test_01_dict_diff_with_key_name.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f11925fd9829a7a35c04d3e20c65f0b11d430085ea17dc723abfae4ac4c7793", + "format": 1 + }, + { + "name": "tests/unit/utils/test_03_dict_diff_without_key.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bffd27e737421b22c04880d10937e46f090a9689258c62a00795aeaf5346dec", + "format": 1 + }, + { + "name": "tests/unit/utils/test_12_list_diff_with_left_only.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95311358af54019d9a50561343234568ffe3efb0bedc7a494777364bde5811cb", + "format": 1 + }, + { + "name": "tests/unit/utils/run_test_cases.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef4f165634c7ea1bf73582720f09afb8bd36b659634d87cecb32106f489ee228", + "format": 1 + }, + { + "name": "tests/unit/utils/test_13_list_diff_with_left_only_with_none.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c612be85eaa3e52fd91fea15165a1dd59b099150388454c5860b88963760e84d", + "format": 1 + }, + { + "name": "tests/unit/utils/test_11_list_diff_with_similar_list.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c7344834b8311f648c471bc75751cce4dbccc694d0ad3ee305fea43b48c4445", + "format": 1 + }, + { + "name": "tests/unit/utils/test_05_dict_diff_left_only.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9479c46d12e27f6034cb554d54d7ff56fcb87c73379a8e66812b845e9aa178e9", + "format": 1 + }, + { + "name": "tests/unit/utils/test_10_list_diff_with_key_other.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "85209545034d4a4cc26b1d77ee75501d604ea0f7c5184284ef8384f275e22555", + "format": 1 + }, + { + "name": "tests/unit/utils/test_02_dict_diff_with_key_other.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6acd991d85424f67810eef7c4be94d87645937b2914b229089372e25de8c8214", + "format": 1 + }, + { + "name": "tests/unit/utils/test_04_dict_diff_with_similar_dict.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb4977c361463a2502b6d4b6a62e97ca29aafa7771fb7bb3fd78c7665ec3bab0", + "format": 1 + }, + { + "name": "tests/unit/utils/test_diff_util.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d11c8e7447e4b27ec23d8b524c08edb2e57939c46231644e0999f9a542bf5fab", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9e65f5f05a2180c6335355f0497ff6ec97bc6fe7b58a8e3bd9c3442899cbf8e", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0c306452265a20c55ab0327c7a69da747859d5073f753fc2be7d3eaa675348f", + "format": 1 + }, + { + "name": "changelogs/archive_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/67-bgp_neighbors-auth_pwd-and-neighbor_description.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dac2cd591c6738698dee9a0e587493d0a4c61ee79d2f6fe64c60333f8ae73f88", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/88-port_breakout-and-execution-environment.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d742b197a152cc147e3bd6477a11a747deef7fd516eda9c7f3c2e4d9000fe991", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/101-bgp-prefix_lists.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f26b66c8066853c84fe32450518d9760adcce95ad683df715a833af6fea111b6", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/63-bgp_af-route_advertise_list.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40e2227d2e3667f49f14757ba091aeb1379c914eea2063f7259191cf4491f3fa", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/105-vxlan-regression-test-fix.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87de2069e628fda750dea1586bd5e9f40276a526935e6d01f78773ac13b70c56", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/62-bgp-vxlan-primary-ip.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad15905abe1a87c8bb5fe6c34ead669762f069a3171d17bab816e35a7046244f", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/100-prefix_lists-resource-module.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "309e213c2a3687e8dc57a65fbf7cacf2f032418766122b0192886aa1d17627ea", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/98-vlans-description.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f25fc69f892ccdc4eb886e11181fe3ab2ceaea79e5be5cfd035b0da61e4643ab", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/81-add-bgp-peer-group-attributes.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "537e421e4799a214763280bfd3a4802a5857347a77f462fba03022066ad2dc2d", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/102-playbook-example-updates.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43c855cff9a6bd74b6f1f0bab83f700a82b3e4626319362915f44158ee5863db", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/60-bgp-regression-test_fixes.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6dbdf5dbece77482ae924fe1388c60e66438d707f880b7646fc692a3eeb25a75", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/76-CLI-command-prompt-answer-fix.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21aebb22452bbef55d94d81657cfabaa27c42b1145d73e36329a9c44f63dbd89", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/99-ntp-resource-module.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab72a4cbd13109b6ddc4cc36db329291c06434d8d6d5d9928d7a29d49dccffe2", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/53-oc-yang-compliance.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "656920f3c577cb86e2fd9f95a80ac4edf1713f7738513cd44fbb676576b196fd", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/90-aaa-and-users-bugfix.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36925781ddb1e95d03ab80af6ab298cf103ef5acc7c6fe2be4cc9e3b6b03fe98", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/85-regression-test-fixes.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bae305ef61589f0d470b25fb76346f12bacf39dfb1a0ecd75448e30a289626de", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/72-bgp_neighbors-add-attributes.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "611ec7ba89300979ad81cae95fc64100fcf4d58ae917b103aca26b69c3924dfb", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/103-standard-interface-naming-and-other-bugfixes.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16cc77693ecc9ea440d549c006c6a85c6c738ef8277ffe58e0d17f2c525c1a31", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/58-vxlan-primary-ip.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee968bb88126aaf9166905b000beca43714c32c4594e1cd5e828df00940bfb40", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/2.0.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fbf43f6b1763d2c3987a38732f999753bb8733d1ad3f78e06cf1515ef77497b2", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/78-aaa-regression-fix.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a6a225719cda02382197d27da9b4ce375d10f08572dadca5c6094a4cc995b01", + "format": 1 + }, + { + "name": "changelogs/archive_fragments/2.0.0/82-static_routes.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b86f183d989887e885b0292789df77ed4d789d0fec1f9a97436032c82466a6f2", + "format": 1 + }, + { + "name": "changelogs/CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c07bc10602bb49ad903484c5c58ecd1bedf2197a6f320da391592823b891cfd", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "85862c6553f33777c8fd2f7fe3cfbe2d840deafc202b404052c6c829fb64495e", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ad2a65538878b7704f2cae149e38beebaf5bc92d3ff7d245b6dbd9ef4762877", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/ansible-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0dc2fd66d9bb583a726caf8281f456be2130992b6f93d3155a7bc1a711b855fc", + "format": 1 + }, + { + "name": ".yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f8b92f9b2c5645d9086cdb2d96378b445156e01d5ebeab075f0ac76da82e7c1", + "format": 1 + }, + { + "name": "playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/group_vars/leaf.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60eef68dd51d94525aa1cea56aa6dbaae87a46a3d5fc4efe7879b3b5e79655f2", + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/group_vars/all.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a6c90571d07e82c8a66b09d45260ced0939a233d550f69e7f68dd48dc641ac8", + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/group_vars/spine.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44381e20e833f6a7604b513f0ffc0cc6af6f36a30b1f7a4033c5695971da6201", + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2e9dd7b920b95fac532f8c8ac27bb8faf3c673c95d9195b1f0cb304c4b20339", + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/clos-fabric.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ab1475beffd968cc128f78d5c3c1f2a6a0cfc4af57c360844c50d70e7b33949", + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/bgp_l3_fabric/templates/clos_fabric_cfg.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e43955991fc5aa4ed6e3eb8d6f754950ae86d73085dd609e00b575f438f14863", + "format": 1 + }, + { + "name": "playbooks/common_examples", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3cc666ac7573cda9a7fb31ec0f6c9522e4b519df6e90b19708ec3d09e676ac04", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_bgp_communities.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf0f646176e380267d83130d1ddff118a8f02a5b3deb72734b0036a119d99f87", + "format": 1 + }, + { + "name": "playbooks/common_examples/interface_naming.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c50115b3792373e596572f74850d719d9ff19cc6cdf0f6487786f1dd39c8d4c", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_vxlans_config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6ee295aca02d2b75dfc9e0f9c45ffdf00303290b5bcd8f694fb003762598830c", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_system.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73cb798413beaf4faef806b166154564e2f519d72d1d7caecbca4c5564f8725f", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9cf5193bd7b653e8a023070bf4f3c5cd6f1bb12d703345c36250c8a6500739ab", + "format": 1 + }, + { + "name": "playbooks/common_examples/src.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "689fd13743ca803cec2985835752fb0d32b2edeacd10b987c74764da29e02a18", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_command.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ae21487112d3a6ac66d0cd917ac689fb53d8743a8d09571e3337a8b5dc4e25b", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_interfaces_config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c108d1f66b3f6eb47bebd4c547725d8446c61fc2b335ae4b0ae0751c532d2b63", + "format": 1 + }, + { + "name": "playbooks/common_examples/hosts", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "475f5c931add33d3a98320a72d77f626fac1bcb7c1ed0dccb2e0076f82271002", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_bgp.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1203ff6b38fc784f480b0cc61c12e0f61a035ae5143f1cdc344c30f16e7ab5eb", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_api.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3881bace49d2a88accf2297d8c01675617ee1b17517322a3d837f2b002a1c4bb", + "format": 1 + }, + { + "name": "playbooks/common_examples/sonic_aaa.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ea8077dce4c37d517434b92715be316de2b23c4e3b783dd881a841a6e86eabe", + "format": 1 + }, + { + "name": "playbooks/common_examples/patch.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bdce7dfd3911b2d741667a57e3cb96e51a78cced38dd0a5f52a0bf37409f498a", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vxlans", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vxlans/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vxlans/vxlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c0ce8d53537f8d62d93cb6eab94f613269a9abb3fd6089a251344f29ace1581", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/l3_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "175568e94084854f2f4338d76b8fe62871258d200dd422db6b77998a1b534c6b", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/l3_interfaces/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ce8bd508d419b7a1c80302d56ca3b23b30f0dfaaca99fb5af06b27369077f6d", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/users", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/users/users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa4a5a009e2bd6b77a97c254e0f7f462fe556d08028902740e9707f8804bbd13", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/aaa", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/aaa/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/aaa/aaa.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef4b054752c855757ba2727a9838b5aa61cf88699765c9b842eecc9727048ff5", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/static_routes", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/static_routes/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/static_routes/static_routes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78d654467abcbf68095d79cb59c29ba6470ca4e5ac426a9595b7e3f37fc931ae", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/port_breakout", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "494bdf331a5806f341f0125b6ba46ff888a935c4aff4d5b896f405764fc59a8d", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vrfs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vrfs/vrfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ed445c1fa06bfb70ed56605171012c5b5fd9c13b1f0cd3fb81af6214ddc3ae0", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/interfaces/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/interfaces/interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2eccbe6d74967f1198f8d1b5858a259a09850226ecc4d940d016be5e44b6e81", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_communities/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41c01d93815ca5cbb62cd6ac2918041e5a67b792d11dfe291932f0f64b2ce4e1", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_ext_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_ext_communities/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6322ce72c60c1098c7b04ba2c05dc2be5c70ff1760373f131005369995efa440", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/lag_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/lag_interfaces/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "abc6c91a997dd3a42dcac331eb3d7b9b8ffc91fd2d80dc389752ef8e5d7e9d37", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/prefix_lists", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/prefix_lists/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/prefix_lists/prefix_lists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76cef8ee665598ede7762fb8c87be50b1b189c52818a25e233da7504f24273e5", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp/bgp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48d0915a4d169eb7accb4d921fa2956c2019e7e757fd4747bea093d2c6637b4f", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_neighbors", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_neighbors/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7daff29f60266f474a5ecb39569367146eee672c8bc7b5882cedd713d7c32ef", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/ntp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/ntp/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/ntp/ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8142bfefd6b7392123b7785a0c90ed43e6bf24ad4881fbd6d1df42c0c43fa27", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/system", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/system/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/system/system.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7716880b9ab1847ad3a9a4c5f70fdfd7176c33853d4ee9b4711d94bcbf51cc29", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_as_paths", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_as_paths/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f2c4b27f8142c32eaa6219d00b261145ba5990b73ee96f2113ce3b8714b44c2", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vlans", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vlans/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/vlans/vlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "571ecaf2bef5d15c91f39a0b22922282757d9b32305d4221e9967815bc195a39", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/tacacs_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f8c0dc71948a994b45b2ab55601891f26665cc538b23a01e55f8f947afc859bd", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/mclag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/mclag/mclag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd027811ad981ae9b946f7247b648baccab1cbc9b877784ddc9cdb578fe68cc3", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_af/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58292688b6f354c7e19bfc59bdcc91f76fc68cdf7ce4e0968c36ba0fde3e7334", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/l2_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58fb25ed857a13d6af43a2609b90f09ac135694ce44997fd428242815e145b5f", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/radius_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/radius_server/radius_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3f05a591387d9c24cb7dc066621ac75229b9fb5f871e165546daea9fa2d6a87", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_neighbors_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_neighbors_af/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35667a83682027b3b6051ab7729a43847de0092318b8645a4910b9b2ac003d39", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/facts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/facts/facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4e95f45024b8de1d0898d87a33fa01311168b82f0de6ec7efa012b927f3fe08", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/facts/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vxlans", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vxlans/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de5a9c713994b11f97572c67af335fef9ef883c5ae28bbe8b3d29509f1479708", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/l3_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea892c0cb638d806819a81b730c8bf272d1d22f5a785feab6bb7899d5d8290f5", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/l3_interfaces/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/users", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/users/users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7016795dc961f47c1363b62c733419ed2f45bb51083b8c13a9cdcdc78393b80", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/aaa", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/aaa/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/aaa/aaa.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "099cb25564ace85aff3f9b665f0e8291cf30cc566393aa73fe7fd7495adb75b5", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/static_routes", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/static_routes/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "664fa1bfe5e35df445395c8d97c3664d61b3616e46a1e566dfae8dffe5d3cd51", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/port_breakout", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed14043c2d3655852bddd40ee00d07bb3731e109d738a8df15cdfe8c859dfcc3", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vrfs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac08fa8db8afd66c4a6856700ca38615bcf8b8fba6d712981493f5452ea61c62", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/interfaces/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9858adae2a8083270cc1d513b77fce9fe93c9789c5968c8e843b843ed940bfab", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_communities/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64e16da1a2cc3af929d265a73536e9ce637861ecd8723c0043bc7536ca9acbba", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_ext_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_ext_communities/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "880c709b98414d38453eadce51b2771ed5d6c8bdd3fb6cbde45b60191e4568f9", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/lag_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/lag_interfaces/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "980ce16a72689da30138007e45e755e9f32b020769e884b8b856505acf8d9863", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/prefix_lists", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/prefix_lists/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3047173ed7946102928cd2b23bfd7117ad4574d86f2a6330bb5a74ffaccd6368", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp/bgp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28692ff70358779f277a4bb9f5c2dc2b9b53ab4288b2102e01ae5394c4a12cc9", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_neighbors", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_neighbors/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_neighbors/bgp_neighbors.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94a1aaed1dd4bd1ed6965983a5a40e33f68a9af96d4610fed1ff318effa0724c", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/ntp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/ntp/ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "027ba50a79f0347df9702030f8849258711e516df33da4b8949b51cad88d3c86", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/system", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/system/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/system/system.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53707e1f9e6d035be3514ea21d61a0fec4a0dd75ebeb4e44a803f63a3020be7d", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_as_paths", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_as_paths/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9ccf45561ffa483c765a8c0ce00a2818e271c43134f9c46b3dfb3aee80f12c7", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vlans", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vlans/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/vlans/vlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0f599c09eaf915361ca18d5898de652abea87835ababe1ac726f8367a6fabd5", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/tacacs_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4dcd58e947b120be713fa3f571f6c5485cab6bdfe7efc75a4e90cec626a46a71", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/mclag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/mclag/mclag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df57beee6331242c7c1cced8fea388ec78291d8ce2b32c20c5e0e50738bfb9f2", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_af/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb90a1d43f0acc3ab981e9579533505e4c23fc7d5c045b3266e13ad41b4606fa", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/l2_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3762f248b6f01066931467b74ffbe37b8d8c535fc97fe571f6d01acb1669f74c", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/radius_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00a2640dd2eacd73a6ee61c7f00f913964e9a3072f0550682f6d4732ff119c58", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_neighbors_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/bgp_neighbors_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c4ef73270de54df0b340f4fefb1ba3ec2b7668fd5260a8013a9efc9cead738e", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/utils/bgp_utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "efbf8c72fe520e860020fceef08d8e3a9c3aaa10fb3601999d96d604aceb09a7", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/utils/interfaces_util.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "356468c4f1fdb40153c2b220ddb2c5441feecd4a177067aef748edc824feaa1a", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/utils/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b6f8897c68aaff47fd0bc497e5d99bbf71d5a78837c000f2b0d4ae719c44300", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/sonic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2852d2baa75d3b3a9260f156c500738eb2d37ef2861a6d63e20290c93bc7c8c1", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/vxlans", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/vxlans/vxlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "751480b64c2188fb97cc3eabde60a7d328fc780bb82dac9dbb215ce75da58b3e", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/l3_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79d2b8680dc3f023e536ab624ad00c3b36c2d592a22f9cde98ffa66bc95fa2c8", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/users", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/users/users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45344f3b1a07ad06af3e0cda474f78bebde079b4a929d27a9e99a888aa3231fc", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/aaa", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/aaa/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/aaa/aaa.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eed3f6afdc941372f55212449b300bdfc5fa0150eb5068dd98848133ae8eb7ff", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/static_routes", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/static_routes/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/static_routes/static_routes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "146d536b99180f3f99e0c0e9c0d0b97487b2e1d6b736c85e448bb17b6dbe96f5", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/port_breakout", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1b925177c3c5e66b32066d1c16d45c73878da5e19f7f14d309aeabebe90ef8c8", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/vrfs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/vrfs/vrfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "121221ad6e55fffad0b00ca0a5e7cd58b35a4e7a575481dfd074c6b41411ad31", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/interfaces/interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4289ea78fef1e900174cf355a7093f5ea144573abdf7bcab63879430d8a1d3d", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_communities/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "873dc9442732042397ac3346598dbfc8040eff5b388a6a6c5462d156ea6c999e", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_ext_communities", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_ext_communities/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a66f6fa13607ce0aae48f7ec944cc7410fd2443efa696fc59d254803402aea99", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/lag_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42ea3ce2e2c15ec958f3072986def9bb6bbeee7e523a54e0cde7923ab7e855cb", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/prefix_lists", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/prefix_lists/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fad647ed365fd0991abcb3f70efb94c443dc8ab0fe19751690cfb5b529467763", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp/bgp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "294427bd5c538c82eca5212bcfb57967230aa64892c5e2073ad12bf889381a97", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_neighbors", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f94c033887e9c1065c0a8155bb7e6f36888e2e66c7e475d86400dc87660a6489", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/ntp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/ntp/ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a19fdad43d518a26619fc9fb39a2d6ef3362cd1cc89252d74f69dc760ed815b3", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/system", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/system/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/system/system.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a59efae25b1106a79087ecdfcb268fe2c6566ea61af795cce9d806fbe9f89e8", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_as_paths", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fff98321324ac64feb8edc5f8042f16bd4fa145c36781d53e86978d6edd9637", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/vlans", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/vlans/vlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f26c8eeb4cd0b4de7fd058ef2a7d1e7f728cd5d4acb0f78f19d5ea729569255", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/tacacs_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6019047cfa54495d68c7ddca3b1b39bdb90d0e8854dd7835d8a0ad31fe55226a", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/mclag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/mclag/mclag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cb43549024ce183291b1dbfd8512ee25d9401a9413e631457cdc1852dbdf98e", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd007d97137f9f87e82a2e3b3e545cdc811908ff7bf3ae142d304b0732ddc8db", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/l2_interfaces", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbb277af89e7ebe04824f8c6c31cbfba3882f67a084861507153a2ef3c50c01c", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/radius_server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/radius_server/radius_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76ad9bce200ded760e69515a4694b1dbbd32347a2257f5b2841cd64644d48724", + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_neighbors_af", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b34c814844873084b563d07af9334c6ee2d5cb11ae805a05c4a039c08c1d03dc", + "format": 1 + }, + { + "name": "plugins/module_utils/network/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/action", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/action/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/action/sonic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e65e39c5bbe05db761122a44c5c3adb7941e0ee0a4500d9c4948ade1ef7602b", + "format": 1 + }, + { + "name": "plugins/httpapi", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/httpapi/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/httpapi/sonic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b2a6e27212e4bffa10e60b4f6b72df823ad8a84445a1039f75efe5296e0941e", + "format": 1 + }, + { + "name": "plugins/cliconf", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/cliconf/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/cliconf/sonic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fbeb05a4a01583778cad49752c4fa8f3562926b4f7cbb399700aa0f59b1168b", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/modules/sonic_command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6b8f8a44ea5ba6c0e6d4369a94cc35b856243595383b24a2c79d8b09dc0fa33", + "format": 1 + }, + { + "name": "plugins/modules/sonic_vrfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f65a8f6e26c21af62aca63db39f12c28226e173706e2475a336f5774c9d0fd6e", + "format": 1 + }, + { + "name": "plugins/modules/sonic_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "754a4198b765e73a9bb40f297a304e8c7c94d4e83839ee07b4a5602c0b9164a7", + "format": 1 + }, + { + "name": "plugins/modules/sonic_tacacs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a610ddc06cd5a8088ed2eae495f4e99f2adbbf3242aac9503b82317ee7dc1212", + "format": 1 + }, + { + "name": "plugins/modules/sonic_radius_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a64fe7ce6837f85d5ebdc5772ab8649ff4e07ee79c2ff56bf29dec4286cc260a", + "format": 1 + }, + { + "name": "plugins/modules/sonic_vlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0820305b7619cfc6080ec69893ea8093bef0224f7078833a9da4ee139e901257", + "format": 1 + }, + { + "name": "plugins/modules/sonic_aaa.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2a2be0a9b65b42e19f099adf045b875961c5876d7ef17457828bf1c7ffd2833", + "format": 1 + }, + { + "name": "plugins/modules/sonic_l2_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1ee00011322c29a0936114c42775a91932c6b46605109a0c7f53911018f3e85", + "format": 1 + }, + { + "name": "plugins/modules/sonic_ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92cfdb9b5e31bddf0ef61f01b255e1f4754c2fe6a1ce47571202ef3155b69d51", + "format": 1 + }, + { + "name": "plugins/modules/sonic_mclag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e55c254e12c77ea06035f9cdd87a4248e89f7cd9d0df068266e7226c78d5fd20", + "format": 1 + }, + { + "name": "plugins/modules/sonic_bgp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e531d9a1f281831911dba58b4e5568bea27ae13b5a46119fd0a50c234d80a4f", + "format": 1 + }, + { + "name": "plugins/modules/sonic_l3_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9c8607ca719658fa1a5f4bf20e1bf0b71f06618664b56aa3f3f771c0416d535", + "format": 1 + }, + { + "name": "plugins/modules/sonic_system.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "daf0edd6622a20743c5c9f51d4c4a7732389719390f512885cd3af012e65ad92", + "format": 1 + }, + { + "name": "plugins/modules/sonic_users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1104943f68ff99954e8481b915fefb206af930a9c6d6edc79f43aee233ea1db5", + "format": 1 + }, + { + "name": "plugins/modules/sonic_bgp_neighbors.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "417b1a3e3b325ffbef1b84db13dedef88c9e935d441d3b2f1cd8ed79eaa933bd", + "format": 1 + }, + { + "name": "plugins/modules/sonic_bgp_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "869e043591404007ed592618bcb282ed59c64c88e02e77b8fca083323bce30dd", + "format": 1 + }, + { + "name": "plugins/modules/sonic_port_breakout.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1896147e09a083d1fbce1900b21c29572b6dd992884947c2ffb4fb9379148e2f", + "format": 1 + }, + { + "name": "plugins/modules/sonic_bgp_ext_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0949a6e579a7eaef3d4b355ecaf328c8357c8b4d1ca7f8cf5624ed95582b81e", + "format": 1 + }, + { + "name": "plugins/modules/sonic_lag_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77d851e7f2915eb77407dccabfb7e9a84a02c740e3bde4067ae22ddd11932088", + "format": 1 + }, + { + "name": "plugins/modules/sonic_bgp_as_paths.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2c98cdd58166716bd27b7c658dd75849959877c0f987a1ad5f189d33762ee8e", + "format": 1 + }, + { + "name": "plugins/modules/sonic_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f596c49a79cc793a11a9f045602ae9dc226dd49b0e6ed5ebebb901814db2c3a", + "format": 1 + }, + { + "name": "plugins/modules/sonic_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66e57eab9be3a92fabd25d58d4b50ad4989b5ed82930b9fa26005a778d004aaa", + "format": 1 + }, + { + "name": "plugins/modules/sonic_bgp_communities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3053900903a2b5998e14d9a055cd0c907e9d4a1fcbcc76aff3efb616977ce038", + "format": 1 + }, + { + "name": "plugins/modules/sonic_bgp_neighbors_af.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06e2639cc7ce2b03bd29e3909bf26525cefbd47035d9cb28927a96ccb91a16ad", + "format": 1 + }, + { + "name": "plugins/modules/sonic_prefix_lists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7226bb99d6699bf1f974bafb6659d23949b94afb80cac714c374a2ffb126ad7", + "format": 1 + }, + { + "name": "plugins/modules/sonic_static_routes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0a90d0134fdcbb81da9195eadbd91821ba400199c32c6f8750c549a6653b58a", + "format": 1 + }, + { + "name": "plugins/modules/sonic_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d602d47c8e1bc50ba18d8929117ba98da1dade7d01b4099f7ddec558eeafba0", + "format": 1 + }, + { + "name": "plugins/modules/sonic_vxlans.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c10ca81fd5969960f897c93b2159f6ac6c9ecd05084272433da32ab156f176a", + "format": 1 + }, + { + "name": "plugins/terminal", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/terminal/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/terminal/sonic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2f1d5a838f3d51c0921f6d0cd260d83a81e045fba2f0d1ce0d23648b829a5d3", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6d28b94b5c9ff4aaf737e91c092a668381c47c05904b61b1f6f240d74b565a2", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": "rebuild.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6dd5b9c7962049017eb8e32db7111774b055979517146675d91a3b389c77bf9c", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d6e301db0d0e6b8d8df2dac7fe249f0d21f5deadb47edb23f31fc25a87ea704f", + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e30f905b015d1d8a17d3a540cb8892b479fcbf9cb873ac2ff0665fe499f318e", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/LICENSE b/ansible_collections/dellemc/enterprise_sonic/LICENSE new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/enterprise_sonic/MANIFEST.json b/ansible_collections/dellemc/enterprise_sonic/MANIFEST.json new file mode 100644 index 00000000..06fd002e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/MANIFEST.json @@ -0,0 +1,43 @@ +{ + "collection_info": { + "namespace": "dellemc", + "name": "enterprise_sonic", + "version": "2.0.0", + "authors": [ + "Senthil Kumar Ganesan ", + "Abirami ", + "Dhivya ", + "Mohamed Javeed ", + "Nirai Madai ", + "Shade Talabi ", + "Kerry Meyer ", + "Mingjun Zhang " + ], + "readme": "README.md", + "tags": [ + "dellemc", + "dell", + "networking", + "nos", + "sonic" + ], + "description": "Ansible Network Collection for Enterprise SONiC Distribution by Dell Technologies", + "license": [], + "license_file": "LICENSE", + "dependencies": { + "ansible.netcommon": ">=2.0.0" + }, + "repository": "https://github.com/ansible-collections/dellemc.enterprise_sonic", + "documentation": null, + "homepage": null, + "issues": "https://github.com/ansible-collections/dellemc.enterprise_sonic/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f419780826940a762216126025170e01d39987da0d8fb273560a2488307493f5", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/README.md b/ansible_collections/dellemc/enterprise_sonic/README.md new file mode 100644 index 00000000..4c17c9e9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/README.md @@ -0,0 +1,253 @@ +Ansible Network Collection for Enterprise SONiC Distribution by Dell Technologies +================================================================================= + +This collection includes Ansible core modules, network resource modules, and plugins needed to provision and manage Dell EMC PowerSwitch platforms running Enterprise SONiC Distribution by Dell Technologies. Sample playbooks and documentation are also included to show how the collection can be used. + +Supported connections +--------------------- +The SONiC Ansible collection supports network_cli and httpapi connections. + +Plugins +-------- +**CLICONF plugin** + +Name | Description +--- | --- +[network_cli](https://github.com/ansible-collections/dellemc.enterprise_sonic)|Use Ansible CLICONF to run commands on Enterprise SONiC + +**HTTPAPI plugin** + +Name | Description +--- | --- +[httpapi](https://github.com/ansible-collections/dellemc.enterprise_sonic)|Use Ansible HTTPAPI to run commands on Enterprise SONiC + +Collection core modules +------------------------ +Name | Description | Connection type +--- | --- | --- +[**sonic_command**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_command_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-command-module)|Run commands through the Management Framework CLI|network_cli +[**sonic_config**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_config_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-config-module)|Manage configuration through the Management Framework CLI|network_cli +[**sonic_api**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_api_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-api-module)|Perform REST operations through the Management Framework REST API|httpapi + +Collection network resource modules +----------------------------------- +Listed are the SONiC Ansible network resource modules which need ***httpapi*** as the connection type. Supported operations are ***merged*** and ***deleted***. + +| **Interfaces** | **BGP** | **VRF** | **Users** | +| -------------- | ------- | ------- | ------- | +| [**sonic_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-interfaces-module)|[**sonic_bgp**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-module)| [**sonic_vrfs**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vrfs_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vrfs-module)|[**sonic_users**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_users_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-users-module)| +| [**sonic_l2_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l2_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-l2-interfaces-module)| [**sonic_bgp_af**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_af_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-af-module)| **MCLAG** | **AAA** | +| [**sonic_l3_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l3_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-l3-interfaces-module) |[**sonic_bgp_as_paths**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_as_paths_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-as-paths-module)| [**sonic_mclag**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_mclag_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-mclag-module)| [**sonic_aaa**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_aaa_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-aaa-module)| +|**Port channel**|[**sonic_bgp_communities**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_communities_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-communities-module)| **VxLANs** |[**sonic_tacacs_server**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_tacacs_server_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-tacacs-server-module)| +|[**sonic_lag_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_lag_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-lag-interfaces-module)|[**sonic_bgp_ext_communities**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_ext_communities_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-ext-communities-module)| [**sonic_vxlans**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vxlans_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vxlans-module)|[**sonic_radius_server**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_radius_server_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-radius-server-module)| +|**VLANs**|[**sonic_bgp_neighbors**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_neighbors_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-neighbors-module)| **Port breakout** | **System** | +|[**sonic_vlans**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vlans_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vlans-module)|[**sonic_bgp_neighbors_af**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_neighbors_af_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-neighbors-af-module)|[**sonic_port_breakout**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_port_breakout_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-port-breakout-module) |[**sonic_system**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_system_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-system-module) | + +Sample use case playbooks +------------------------- +The playbooks directory includes this sample playbook that show end-to-end use cases. + +Name | Description +--- | --- +[**BGP Layer 3 fabric**](https://github.com/ansible-collections/dellemc.enterprise_sonic/tree/master/playbooks/bgp_l3_fabric)|Example playbook to build a Layer 3 leaf-spine fabric + +Version compatibility +---------------------- +* Recommended Ansible version 2.10 or higher +* Enterprise SONiC Distribution by Dell Technologies version 3.1 or higher +* Recommended Python 3.5 or higher, or Python 2.7 +* Dell Enterprise SONiC images for releases 3.1 - 3.5: Use Ansible Enterprise SONiC collection version 1.1.0 or later 1.m.n versions (from the 1.x branch of this repo) +* Dell Enterprise SONiC images for release 4.0 and later 4.x.y releases: Use Ansible Enterprise SONiC collection version 2.0.0 or later 2.m.n releases (from the "2.x" branch of this repo). +* In general: Dell Enterprise SONiC release versions "R.x.y" are supported by Ansible Enterprise SONiC collection versions "R-2.m.n" on branch "R-2.x". + + +> **NOTE**: Community SONiC versions that include the Management Framework container should work as well, however, this collection has not been tested nor validated + with community versions and is not supported. + +Installation of Ansible 2.11+ +----------------------------- +##### Dependencies for Ansible Enterprise SONiC collection + + pip3 install paramiko>=2.7 + pip3 install jinja2>=2.8 + pip3 install ansible-core + +Installation of Ansible 2.10+ +----------------------------- +##### Dependencies for Ansible Enterprise SONiC collection + + pip3 install paramiko>=2.7 + pip3 install jinja2>=2.8 + pip3 install ansible-base + + +Installation of Ansible 2.9 +--------------------------- +##### Dependencies for Ansible Enterprise SONiC collection + + pip3 install paramiko>=2.7 + pip3 install jinja2>=2.8 + pip3 install ansible + +##### Setting Environment Variables + +To use the Enterprise SONiC collection in Ansible 2.9, it is required to add one of the two available environment variables. + +Option 1: Add the environment variable while running the playbook. + + + ANSIBLE_NETWORK_GROUP_MODULES=sonic ansible-playbook sample_playbook.yaml -i inventory.ini + + +Option 2: Add the environment variable in user profile. + + + ANSIBLE_NETWORK_GROUP_MODULES=sonic + + +Installation of Enterprise SONiC collection from Ansible Galaxy +--------------------------------------------------------------- + +Install the latest version of the Enterprise SONiC collection from Ansible Galaxy. + + ansible-galaxy collection install dellemc.enterprise_sonic + +To install a specific version, specify a version range identifier. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0. + + ansible-galaxy collection install 'dellemc.enterprise_sonic:>=1.0.0,<2.0.0' + + +Sample playbooks +----------------- +**VLAN configuration using CLICONF** + +***sonic_network_cli.yaml*** + + --- + + - name: SONiC Management Framework CLI configuration examples + hosts: sonic_switches + gather_facts: no + connection: network_cli + tasks: + - name: Add VLAN entry + dellemc.enterprise_sonic.sonic_config: + commands: ['interface Vlan 700','exit'] + save: yes + register: config_op + - name: Test SONiC single command + dellemc.enterprise_sonic.sonic_command: + commands: 'show vlan' + register: cmd_op + +**VLAN configuration using HTTPAPI** + +***sonic_httpapi.yaml*** + + --- + + - name: SONiC Management Framework REST API examples + hosts: sonic_switches + gather_facts: no + connection: httpapi + tasks: + - name: Perform PUT operation to add a VLAN network instance + dellemc.enterprise_sonic.sonic_api: + url: data/openconfig-network-instance:network-instances/network-instance=Vlan100 + method: "PUT" + body: {"openconfig-network-instance:network-instance": [{"name": "Vlan100","config": {"name": "Vlan100"}}]} + status_code: 204 + - name: Perform GET operation to view VLAN network instance + dellemc.enterprise_sonic.sonic_api: + url: data/openconfig-network-instance:network-instances/network-instance=Vlan100 + method: "GET" + status_code: 200 + register: api_op + +**Configuration using network resource modules** + +***sonic_resource_modules.yaml*** + + --- + + - name: VLANs, Layer 2 and Layer 3 interfaces configuration using Enterprise SONiC resource modules + hosts: sonic_switches + gather_facts: no + connection: httpapi + tasks: + - name: Configure VLANs + dellemc.enterprise_sonic.sonic_vlans: + config: + - vlan_id: 701 + - vlan_id: 702 + - vlan_id: 703 + - vlan_id: 704 + state: merged + register: sonic_vlans_output + - name: Configure Layer 2 interfaces + dellemc.enterprise_sonic.sonic_l2_interfaces: + config: + - name: Eth1/2 + access: + vlan: 701 + trunk: + allowed_vlans: + - vlan: 702 + - vlan: 703 + state: merged + register: sonic_l2_interfaces_output + - name: Configure Layer 3 interfaces + dellemc.enterprise_sonic.sonic_l3_interfaces: + config: + - name: Eth1/3 + ipv4: + - address: 8.1.1.1/16 + ipv6: + - address: 3333::1/16 + state: merged + register: sonic_l3_interfaces_output + +***host_vars/sonic_sw1.yaml*** + + hostname: sonic_sw1 + + # Common parameters for connection type httpapi or network_cli: + ansible_user: xxxx + ansible_pass: xxxx + ansible_network_os: dellemc.enterprise_sonic.sonic + + # Additional parameters for connection type httpapi: + ansible_httpapi_use_ssl: true + ansible_httpapi_validate_certs: false + +***inventory.ini*** + + [sonic_sw1] + sonic_sw1 ansible_host=100.104.28.119 + + [sonic_sw2] + sonic_sw2 ansible_host=100.104.28.120 + + [sonic_switches:children] + sonic_sw1 + sonic_sw2 + +Releasing, Versioning and Deprecation +------------------------------------- + +This collection follows [Semantic Versioning](https://semver.org/). More details on versioning can be found [in the Ansible docs](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections.html#collection-versions). + +We plan to regularly release new minor or bugfix versions once new features or bugfixes have been implemented. + +Enterprise SONiC Ansible Modules deprecation cycle is aligned with [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html). + +Source control branches on Github: + - Released code versions are located on "release" branches with names of the form "M.x", where "M" specifies the "major" release version for releases residing on the branch. + - Unreleased and pre-release code versions are located on sub-branches of the "main" branch. This is a development branch, and is not intended for use in production environments. + +Code of Conduct +--------------- + +This repository adheres to the [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) + +(c) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved. diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml new file mode 100644 index 00000000..9d3d51d0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml @@ -0,0 +1,170 @@ +objects: + role: {} +plugins: + become: {} + cache: {} + callback: {} + cliconf: + sonic: + description: Use sonic cliconf to run command on Dell OS10 platform + name: sonic + version_added: null + connection: {} + httpapi: + sonic: + description: HttpApi Plugin for devices supporting Restconf SONIC API + name: sonic + version_added: 1.0.0 + inventory: {} + lookup: {} + module: + sonic_aaa: + description: Manage AAA and its parameters + name: sonic_aaa + namespace: '' + version_added: 1.1.0 + sonic_api: + description: Manages REST operations on devices running Enterprise SONiC + name: sonic_api + namespace: '' + version_added: 1.0.0 + sonic_bgp: + description: Manage global BGP and its parameters + name: sonic_bgp + namespace: '' + version_added: 1.0.0 + sonic_bgp_af: + description: Manage global BGP address-family and its parameters + name: sonic_bgp_af + namespace: '' + version_added: 1.0.0 + sonic_bgp_as_paths: + description: Manage BGP autonomous system path (or as-path-list) and its parameters + name: sonic_bgp_as_paths + namespace: '' + version_added: 1.0.0 + sonic_bgp_communities: + description: Manage BGP community and its parameters + name: sonic_bgp_communities + namespace: '' + version_added: 1.0.0 + sonic_bgp_ext_communities: + description: Manage BGP extended community-list and its parameters + name: sonic_bgp_ext_communities + namespace: '' + version_added: 1.0.0 + sonic_bgp_neighbors: + description: Manage a BGP neighbor and its parameters + name: sonic_bgp_neighbors + namespace: '' + version_added: 1.0.0 + sonic_bgp_neighbors_af: + description: Manage the BGP neighbor address-family and its parameters + name: sonic_bgp_neighbors_af + namespace: '' + version_added: 1.0.0 + sonic_command: + description: Runs commands on devices running Enterprise SONiC + name: sonic_command + namespace: '' + version_added: 1.0.0 + sonic_config: + description: Manages configuration sections on devices running Enterprise SONiC + name: sonic_config + namespace: '' + version_added: 1.0.0 + sonic_facts: + description: Collects facts on devices running Enterprise SONiC + name: sonic_facts + namespace: '' + version_added: 1.0.0 + sonic_interfaces: + description: Configure Interface attributes on interfaces such as, Eth, LAG, + VLAN, and loopback. (create a loopback interface if it does not exist.) + name: sonic_interfaces + namespace: '' + version_added: 1.0.0 + sonic_l2_interfaces: + description: Configure interface-to-VLAN association that is based on access + or trunk mode + name: sonic_l2_interfaces + namespace: '' + version_added: 1.0.0 + sonic_l3_interfaces: + description: Configure the IPv4 and IPv6 parameters on Interfaces such as, Eth, + LAG, VLAN, and loopback + name: sonic_l3_interfaces + namespace: '' + version_added: 1.0.0 + sonic_lag_interfaces: + description: Manage link aggregation group (LAG) interface parameters + name: sonic_lag_interfaces + namespace: '' + version_added: 1.0.0 + sonic_mclag: + description: Manage multi chassis link aggregation groups domain (MCLAG) and + its parameters + name: sonic_mclag + namespace: '' + version_added: 1.0.0 + sonic_ntp: + description: Manage NTP configuration on SONiC. + name: sonic_ntp + namespace: '' + version_added: 2.0.0 + sonic_port_breakout: + description: Configure port breakout settings on physical interfaces + name: sonic_port_breakout + namespace: '' + version_added: 1.0.0 + sonic_prefix_lists: + description: prefix list configuration handling for SONiC + name: sonic_prefix_lists + namespace: '' + version_added: 2.0.0 + sonic_radius_server: + description: Manage RADIUS server and its parameters + name: sonic_radius_server + namespace: '' + version_added: 1.0.0 + sonic_static_routes: + description: Manage static routes configuration on SONiC + name: sonic_static_routes + namespace: '' + version_added: 2.0.0 + sonic_system: + description: Configure system parameters + name: sonic_system + namespace: '' + version_added: 1.0.0 + sonic_tacacs_server: + description: Manage TACACS server and its parameters + name: sonic_tacacs_server + namespace: '' + version_added: 1.1.0 + sonic_users: + description: Manage users and its parameters + name: sonic_users + namespace: '' + version_added: 1.1.0 + sonic_vlans: + description: Manage VLAN and its parameters + name: sonic_vlans + namespace: '' + version_added: 1.0.0 + sonic_vrfs: + description: Manage VRFs and associate VRFs to interfaces such as, Eth, LAG, + VLAN, and loopback + name: sonic_vrfs + namespace: '' + version_added: 1.0.0 + sonic_vxlans: + description: Manage VxLAN EVPN and its parameters + name: sonic_vxlans + namespace: '' + version_added: 1.0.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 2.0.0 diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst new file mode 100644 index 00000000..c9a571cd --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst @@ -0,0 +1,119 @@ +====================================== +Dellemc.Enterprise_Sonic Release Notes +====================================== + +.. contents:: Topics + + +v2.0.0 +====== + +Release Summary +--------------- + +This release provides Dell SONiC Enterprise Ansible Collection support for SONiC 4.x images. It is the first release for the 2.x branch of the collection. Subsequent enhancements for support of SONiC 4.x images will also be provided as needed on the 2.x branch. This release also contains bugfixes and enhancements to supplement the Ansible functionality provided previously for SONiC 3.x images. The changelog describes changes made to the modules and plugins included in this collection since release 1.1.0. + + +Major Changes +------------- + +- Added 'static_routes' module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/82). +- Added a resource module for NTP support (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/99). +- Added a resource module for support of prefix lists (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/100). +- Updated backend REST API request formats in all applicable modules for compatibility with SONiC 4.x openconfig YANG compliant REST APIs. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/53) + +Minor Changes +------------- + +- Added an execution-environment.yml file to the "meta" directory to enable use of Ansible execution environment infrastructure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88). +- bgp_af - Added support for BGP options to configure usage and advertisement of vxlan primary IP address related attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/62). +- bgp_as_paths - updated module examples with 'permit' attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102) +- bgp_neighbors - Add BGP peer group support for multiple attributes. The added attributes correspond to the same set of attributes added for BGP neighbors with PR 72 (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81). +- bgp_neighbors - Add support for multiple attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72). +- bgp_neighbors - add an auth_pwd dictionary and nbr_description attribute to the argspec (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/67). +- bgp_neighbors - added prefix-list related peer-group attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101). +- bgp_neighbors_af - added prefix-list related neighbor attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101). +- playbook - updated examples to reflect module changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102) +- sonic_vxlans - Add configuration capability for the primary IP address of a vxlan vtep to facilitate vxlan path redundundancy (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/58). +- vlans - Added support for the vlan "description" attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/98). +- workflow - Added stable-2.13 to the sanity test matrix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90). + +Breaking Changes / Porting Guide +-------------------------------- + +- bgp_af - Add the route_advertise_list dictionary to the argspec to replace the deleted, obsolete advertise_prefix attribute used for SONiC 3.x images on the 1.x branch of this collection. This change corresponds to a SONiC 4.0 OC YANG REST compliance change for the BGP AF REST API. It enables specification of a route map in conjunction with each route advertisement prefix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/63). +- bgp_af - remove the obsolete 'advertise_prefix' attribute from argspec and config code. This and subsequent co-req replacement with the new route advertise list argument structure require corresponding changes in playbooks previoulsly used for configuring route advertise prefixes for SONiC 3.x images. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60) +- bgp_neighbors - Replace the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for useon SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72). +- bgp_neighbors - Replace, for BGP peer groups, the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for useon SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81). + +Bugfixes +-------- + +- Fixed regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103). +- Fixed regression test sequencing and other regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/85). +- aaa - Fixed a bug in facts gathering by providing required conditional branching (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90) +- aaa - Modify regression test sequencing to enable correct testing of the functionality for this module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/78). +- bgp_neighbors - remove string conversion of timer attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60) +- port_breakout - Fixed a bug in formulation of port breakout REST APIs (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88). +- sonic - Fix a bug in handling of interface names in standard interface naming mode (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103). +- sonic_command - Fix bugs in handling of CLI commands involving a prompt and answer sequence (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/76/files). +- users - Fixed a bug in facts gathering (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90). +- vxlan - update Vxlan test cases to comply with SONiC behavior (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/105). + +New Modules +----------- + +- dellemc.enterprise_sonic.sonic_ntp - Manage NTP configuration on SONiC. +- dellemc.enterprise_sonic.sonic_prefix_lists - prefix list configuration handling for SONiC +- dellemc.enterprise_sonic.sonic_static_routes - Manage static routes configuration on SONiC + +v1.1.0 +====== + +New Modules +----------- + +- dellemc.enterprise_sonic.sonic_aaa - AAA resource module. +- dellemc.enterprise_sonic.sonic_radius_server - RADIUS resource module. +- dellemc.enterprise_sonic.sonic_system - SYSTEM resource module. +- dellemc.enterprise_sonic.sonic_tacacs_server - TACACS Server resource module. + +v1.0.0 +====== + +New Plugins +----------- + +Cliconf +~~~~~~~ + +- dellemc.enterprise_sonic.sonic - Use Ansible CLICONF to run commands on Enterprise SONiC. + +Httpapi +~~~~~~~ + +- dellemc.enterprise_sonic.sonic - Use Ansible HTTPAPI to run commands on Enterprise SONiC. + +New Modules +----------- + +- dellemc.enterprise_sonic.sonic_api - Perform REST operations through the Management Framework REST API. +- dellemc.enterprise_sonic.sonic_bgp - BGP resource module. +- dellemc.enterprise_sonic.sonic_bgp_af - BGP AF resource module. +- dellemc.enterprise_sonic.sonic_bgp_as_paths - BGP AS path resource module. +- dellemc.enterprise_sonic.sonic_bgp_communities - BGP communities resource module. +- dellemc.enterprise_sonic.sonic_bgp_ext_communities - BGP Ext communities resource module. +- dellemc.enterprise_sonic.sonic_bgp_neighbors - BGP neighbors resource module. +- dellemc.enterprise_sonic.sonic_bgp_neighbors_af - BGP neighbors AF resource module. +- dellemc.enterprise_sonic.sonic_command - Run commands through Management Framework CLI. +- dellemc.enterprise_sonic.sonic_config - Manage configuration through the Management Framework CLI. +- dellemc.enterprise_sonic.sonic_interfaces - Interface resource module. +- dellemc.enterprise_sonic.sonic_l2_interfaces - Layer 2 interface resource module. +- dellemc.enterprise_sonic.sonic_l3_interfaces - Layer 3 interface resource module. +- dellemc.enterprise_sonic.sonic_lag_interfaces - Link aggregation (LAG) resource module. +- dellemc.enterprise_sonic.sonic_mclag - MCLAG resource module. +- dellemc.enterprise_sonic.sonic_port_breakout - port breakout resource module. +- dellemc.enterprise_sonic.sonic_users - USERS resource module. +- dellemc.enterprise_sonic.sonic_vlans - VLAN resource module. +- dellemc.enterprise_sonic.sonic_vrfs - VRF resource module. +- dellemc.enterprise_sonic.sonic_vxlans - VxLAN EVPN resource module. diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/100-prefix_lists-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/100-prefix_lists-resource-module.yaml new file mode 100644 index 00000000..25321ab1 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/100-prefix_lists-resource-module.yaml @@ -0,0 +1,2 @@ +major_changes: + - Added a resource module for support of prefix lists (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/100). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/101-bgp-prefix_lists.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/101-bgp-prefix_lists.yaml new file mode 100644 index 00000000..7a37ef89 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/101-bgp-prefix_lists.yaml @@ -0,0 +1,3 @@ +minor_changes: + - bgp_neighbors - added prefix-list related peer-group attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101). + - bgp_neighbors_af - added prefix-list related neighbor attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/102-playbook-example-updates.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/102-playbook-example-updates.yaml new file mode 100644 index 00000000..a82f06e0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/102-playbook-example-updates.yaml @@ -0,0 +1,3 @@ +minor_changes: + - bgp_as_paths - updated module examples with 'permit' attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102) + - playbook - updated examples to reflect module changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102) diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/103-standard-interface-naming-and-other-bugfixes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/103-standard-interface-naming-and-other-bugfixes.yaml new file mode 100644 index 00000000..ef97e26b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/103-standard-interface-naming-and-other-bugfixes.yaml @@ -0,0 +1,3 @@ +bugfixes: + - sonic - Fix a bug in handling of interface names in standard interface naming mode (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103). + - Fixed regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/105-vxlan-regression-test-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/105-vxlan-regression-test-fix.yaml new file mode 100644 index 00000000..480f11aa --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/105-vxlan-regression-test-fix.yaml @@ -0,0 +1,2 @@ +bugfixes: + - vxlan - update Vxlan test cases to comply with SONiC behavior (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/105). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/2.0.0.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/2.0.0.yaml new file mode 100644 index 00000000..c73fafa5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/2.0.0.yaml @@ -0,0 +1,2 @@ +release_summary: | + This release provides Dell SONiC Enterprise Ansible Collection support for SONiC 4.x images. It is the first release for the 2.x branch of the collection. Subsequent enhancements for support of SONiC 4.x images will also be provided as needed on the 2.x branch. This release also contains bugfixes and enhancements to supplement the Ansible functionality provided previously for SONiC 3.x images. The changelog describes changes made to the modules and plugins included in this collection since release 1.1.0. diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/53-oc-yang-compliance.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/53-oc-yang-compliance.yaml new file mode 100644 index 00000000..fba15e6b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/53-oc-yang-compliance.yaml @@ -0,0 +1,2 @@ +major_changes: +- Updated backend REST API request formats in all applicable modules for compatibility with SONiC 4.x openconfig YANG compliant REST APIs. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/53) diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/58-vxlan-primary-ip.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/58-vxlan-primary-ip.yaml new file mode 100644 index 00000000..e7bf9858 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/58-vxlan-primary-ip.yaml @@ -0,0 +1,2 @@ +minor_changes: + - sonic_vxlans - Add configuration capability for the primary IP address of a vxlan vtep to facilitate vxlan path redundundancy (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/58). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/60-bgp-regression-test_fixes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/60-bgp-regression-test_fixes.yaml new file mode 100644 index 00000000..44482602 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/60-bgp-regression-test_fixes.yaml @@ -0,0 +1,4 @@ +bugfixes: + - bgp_neighbors - remove string conversion of timer attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60) +breaking_changes: + - bgp_af - remove the obsolete 'advertise_prefix' attribute from argspec and config code. This and subsequent co-req replacement with the new route advertise list argument structure require corresponding changes in playbooks previoulsly used for configuring route advertise prefixes for SONiC 3.x images. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60) diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/62-bgp-vxlan-primary-ip.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/62-bgp-vxlan-primary-ip.yaml new file mode 100644 index 00000000..2d8b6ab9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/62-bgp-vxlan-primary-ip.yaml @@ -0,0 +1,2 @@ +minor_changes: +- bgp_af - Added support for BGP options to configure usage and advertisement of vxlan primary IP address related attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/62). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/63-bgp_af-route_advertise_list.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/63-bgp_af-route_advertise_list.yaml new file mode 100644 index 00000000..ea75c732 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/63-bgp_af-route_advertise_list.yaml @@ -0,0 +1,2 @@ +breaking_changes: + - bgp_af - Add the route_advertise_list dictionary to the argspec to replace the deleted, obsolete advertise_prefix attribute used for SONiC 3.x images on the 1.x branch of this collection. This change corresponds to a SONiC 4.0 OC YANG REST compliance change for the BGP AF REST API. It enables specification of a route map in conjunction with each route advertisement prefix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/63). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/67-bgp_neighbors-auth_pwd-and-neighbor_description.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/67-bgp_neighbors-auth_pwd-and-neighbor_description.yaml new file mode 100644 index 00000000..95e82941 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/67-bgp_neighbors-auth_pwd-and-neighbor_description.yaml @@ -0,0 +1,2 @@ +minor_changes: + - bgp_neighbors - add an auth_pwd dictionary and nbr_description attribute to the argspec (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/67). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/72-bgp_neighbors-add-attributes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/72-bgp_neighbors-add-attributes.yaml new file mode 100644 index 00000000..c92d315b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/72-bgp_neighbors-add-attributes.yaml @@ -0,0 +1,5 @@ +minor_changes: + - bgp_neighbors - Add support for multiple attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72). + +breaking_changes: + - bgp_neighbors - Replace the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for useon SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/76-CLI-command-prompt-answer-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/76-CLI-command-prompt-answer-fix.yaml new file mode 100644 index 00000000..762759e0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/76-CLI-command-prompt-answer-fix.yaml @@ -0,0 +1,2 @@ +bugfixes: + - sonic_command - Fix bugs in handling of CLI commands involving a prompt and answer sequence (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/76/files). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/78-aaa-regression-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/78-aaa-regression-fix.yaml new file mode 100644 index 00000000..941e9625 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/78-aaa-regression-fix.yaml @@ -0,0 +1,2 @@ +bugfixes: + - aaa - Modify regression test sequencing to enable correct testing of the functionality for this module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/78). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/81-add-bgp-peer-group-attributes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/81-add-bgp-peer-group-attributes.yaml new file mode 100644 index 00000000..f47ebe53 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/81-add-bgp-peer-group-attributes.yaml @@ -0,0 +1,5 @@ +minor_changes: + - bgp_neighbors - Add BGP peer group support for multiple attributes. The added attributes correspond to the same set of attributes added for BGP neighbors with PR 72 (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81). + +breaking_changes: + - bgp_neighbors - Replace, for BGP peer groups, the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for useon SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/82-static_routes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/82-static_routes.yaml new file mode 100644 index 00000000..2b9ccf88 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/82-static_routes.yaml @@ -0,0 +1,2 @@ +major_changes: + - Added 'static_routes' module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/82). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/85-regression-test-fixes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/85-regression-test-fixes.yaml new file mode 100644 index 00000000..fe54f7bc --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/85-regression-test-fixes.yaml @@ -0,0 +1,2 @@ +bugfixes: + - Fixed regression test sequencing and other regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/85). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/88-port_breakout-and-execution-environment.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/88-port_breakout-and-execution-environment.yaml new file mode 100644 index 00000000..6c5775f4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/88-port_breakout-and-execution-environment.yaml @@ -0,0 +1,5 @@ +bugfixes: + - port_breakout - Fixed a bug in formulation of port breakout REST APIs (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88). + +minor_changes: + - Added an execution-environment.yml file to the "meta" directory to enable use of Ansible execution environment infrastructure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/90-aaa-and-users-bugfix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/90-aaa-and-users-bugfix.yaml new file mode 100644 index 00000000..c61d09ad --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/90-aaa-and-users-bugfix.yaml @@ -0,0 +1,6 @@ +bugfixes: + - aaa - Fixed a bug in facts gathering by providing required conditional branching (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90) + - users - Fixed a bug in facts gathering (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90). + +minor_changes: + - workflow - Added stable-2.13 to the sanity test matrix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/98-vlans-description.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/98-vlans-description.yaml new file mode 100644 index 00000000..0895b5ea --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/98-vlans-description.yaml @@ -0,0 +1,2 @@ +minor_changes: + - vlans - Added support for the vlan "description" attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/98). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/99-ntp-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/99-ntp-resource-module.yaml new file mode 100644 index 00000000..f858bf8b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.0.0/99-ntp-resource-module.yaml @@ -0,0 +1,2 @@ +major_changes: + - Added a resource module for NTP support (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/99). diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml new file mode 100644 index 00000000..0ce34f5e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml @@ -0,0 +1,197 @@ +ancestor: null +releases: + 1.0.0: + modules: + - description: Perform REST operations through the Management Framework REST API. + name: sonic_api + namespace: '' + - description: BGP resource module. + name: sonic_bgp + namespace: '' + - description: BGP AF resource module. + name: sonic_bgp_af + namespace: '' + - description: BGP AS path resource module. + name: sonic_bgp_as_paths + namespace: '' + - description: BGP communities resource module. + name: sonic_bgp_communities + namespace: '' + - description: BGP Ext communities resource module. + name: sonic_bgp_ext_communities + namespace: '' + - description: BGP neighbors resource module. + name: sonic_bgp_neighbors + namespace: '' + - description: BGP neighbors AF resource module. + name: sonic_bgp_neighbors_af + namespace: '' + - description: Run commands through Management Framework CLI. + name: sonic_command + namespace: '' + - description: Manage configuration through the Management Framework CLI. + name: sonic_config + namespace: '' + - description: Interface resource module. + name: sonic_interfaces + namespace: '' + - description: Layer 2 interface resource module. + name: sonic_l2_interfaces + namespace: '' + - description: Layer 3 interface resource module. + name: sonic_l3_interfaces + namespace: '' + - description: Link aggregation (LAG) resource module. + name: sonic_lag_interfaces + namespace: '' + - description: MCLAG resource module. + name: sonic_mclag + namespace: '' + - description: port breakout resource module. + name: sonic_port_breakout + namespace: '' + - description: USERS resource module. + name: sonic_users + namespace: '' + - description: VLAN resource module. + name: sonic_vlans + namespace: '' + - description: VRF resource module. + name: sonic_vrfs + namespace: '' + - description: VxLAN EVPN resource module. + name: sonic_vxlans + namespace: '' + plugins: + cliconf: + - description: Use Ansible CLICONF to run commands on Enterprise SONiC. + name: sonic + namespace: null + httpapi: + - description: Use Ansible HTTPAPI to run commands on Enterprise SONiC. + name: sonic + namespace: null + release_date: '2020-12-07' + 1.1.0: + modules: + - description: AAA resource module. + name: sonic_aaa + namespace: '' + - description: RADIUS resource module. + name: sonic_radius_server + namespace: '' + - description: SYSTEM resource module. + name: sonic_system + namespace: '' + - description: TACACS Server resource module. + name: sonic_tacacs_server + namespace: '' + release_date: '2021-05-28' + 2.0.0: + changes: + breaking_changes: + - bgp_af - Add the route_advertise_list dictionary to the argspec to replace + the deleted, obsolete advertise_prefix attribute used for SONiC 3.x images + on the 1.x branch of this collection. This change corresponds to a SONiC 4.0 + OC YANG REST compliance change for the BGP AF REST API. It enables specification + of a route map in conjunction with each route advertisement prefix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/63). + - bgp_af - remove the obsolete 'advertise_prefix' attribute from argspec and + config code. This and subsequent co-req replacement with the new route advertise + list argument structure require corresponding changes in playbooks previoulsly + used for configuring route advertise prefixes for SONiC 3.x images. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60) + - bgp_neighbors - Replace the previously defined standalone "bfd" attribute + with a bfd dictionary containing multiple attributes. This change corresponds + to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks + previously using the bfd attributes for SONiC 3.x images must be modified + for useon SONiC 4.0 images to use the new definition for the bfd attribute + argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72). + - bgp_neighbors - Replace, for BGP peer groups, the previously defined standalone + "bfd" attribute with a bfd dictionary containing multiple attributes. This + change corresponds to the revised SONiC 4.x implementation of OC YANG compatible + REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images + must be modified for useon SONiC 4.0 images to use the new definition for + the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81). + bugfixes: + - Fixed regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103). + - Fixed regression test sequencing and other regression test bugs in multiple + modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/85). + - aaa - Fixed a bug in facts gathering by providing required conditional branching + (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90) + - aaa - Modify regression test sequencing to enable correct testing of the functionality + for this module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/78). + - bgp_neighbors - remove string conversion of timer attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60) + - port_breakout - Fixed a bug in formulation of port breakout REST APIs (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88). + - sonic - Fix a bug in handling of interface names in standard interface naming + mode (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103). + - sonic_command - Fix bugs in handling of CLI commands involving a prompt and + answer sequence (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/76/files). + - users - Fixed a bug in facts gathering (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90). + - vxlan - update Vxlan test cases to comply with SONiC behavior (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/105). + major_changes: + - Added 'static_routes' module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/82). + - Added a resource module for NTP support (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/99). + - Added a resource module for support of prefix lists (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/100). + - Updated backend REST API request formats in all applicable modules for compatibility + with SONiC 4.x openconfig YANG compliant REST APIs. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/53) + minor_changes: + - Added an execution-environment.yml file to the "meta" directory to enable + use of Ansible execution environment infrastructure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88). + - bgp_af - Added support for BGP options to configure usage and advertisement + of vxlan primary IP address related attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/62). + - bgp_as_paths - updated module examples with 'permit' attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102) + - bgp_neighbors - Add BGP peer group support for multiple attributes. The added + attributes correspond to the same set of attributes added for BGP neighbors + with PR 72 (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81). + - bgp_neighbors - Add support for multiple attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72). + - bgp_neighbors - add an auth_pwd dictionary and nbr_description attribute to + the argspec (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/67). + - bgp_neighbors - added prefix-list related peer-group attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101). + - bgp_neighbors_af - added prefix-list related neighbor attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101). + - playbook - updated examples to reflect module changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102) + - sonic_vxlans - Add configuration capability for the primary IP address of + a vxlan vtep to facilitate vxlan path redundundancy (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/58). + - vlans - Added support for the vlan "description" attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/98). + - workflow - Added stable-2.13 to the sanity test matrix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90). + release_summary: 'This release provides Dell SONiC Enterprise Ansible Collection + support for SONiC 4.x images. It is the first release for the 2.x branch of + the collection. Subsequent enhancements for support of SONiC 4.x images will + also be provided as needed on the 2.x branch. This release also contains bugfixes + and enhancements to supplement the Ansible functionality provided previously + for SONiC 3.x images. The changelog describes changes made to the modules + and plugins included in this collection since release 1.1.0. + + ' + fragments: + - 100-prefix_lists-resource-module.yaml + - 101-bgp-prefix_lists.yaml + - 102-playbook-example-updates.yaml + - 103-standard-interface-naming-and-other-bugfixes.yaml + - 105-vxlan-regression-test-fix.yaml + - 2.0.0.yaml + - 53-oc-yang-compliance.yaml + - 58-vxlan-primary-ip.yaml + - 60-bgp-regression-test_fixes.yaml + - 62-bgp-vxlan-primary-ip.yaml + - 63-bgp_af-route_advertise_list.yaml + - 67-bgp_neighbors-auth_pwd-and-neighbor_description.yaml + - 72-bgp_neighbors-add-attributes.yaml + - 76-CLI-command-prompt-answer-fix.yaml + - 78-aaa-regression-fix.yaml + - 81-add-bgp-peer-group-attributes.yaml + - 82-static_routes.yaml + - 85-regression-test-fixes.yaml + - 88-port_breakout-and-execution-environment.yaml + - 90-aaa-and-users-bugfix.yaml + - 98-vlans-description.yaml + - 99-ntp-resource-module.yaml + modules: + - description: Manage NTP configuration on SONiC. + name: sonic_ntp + namespace: '' + - description: prefix list configuration handling for SONiC + name: sonic_prefix_lists + namespace: '' + - description: Manage static routes configuration on SONiC + name: sonic_static_routes + namespace: '' + release_date: '2022-09-02' diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml new file mode 100644 index 00000000..b857f936 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml @@ -0,0 +1,33 @@ +changelog_filename_template: CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: false +archive_path_template: changelogs/archive_fragments/{version} +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Dellemc.Enterprise_Sonic +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/dellemc/enterprise_sonic/meta/execution-environment.yml b/ansible_collections/dellemc/enterprise_sonic/meta/execution-environment.yml new file mode 100644 index 00000000..283dbe33 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/meta/execution-environment.yml @@ -0,0 +1,3 @@ +dependencies: + python: requirements.txt +version: 1 diff --git a/ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml b/ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml new file mode 100644 index 00000000..d4f511c1 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml @@ -0,0 +1,53 @@ +requires_ansible: '>=2.9.10' +plugin_routing: + action: + sonic_config: + redirect: dellemc.enterprise_sonic.sonic + sonic_command: + redirect: dellemc.enterprise_sonic.sonic + sonic_api: + redirect: dellemc.enterprise_sonic.sonic + sonic_facts: + redirect: dellemc.enterprise_sonic.sonic + sonic_interfaces: + redirect: dellemc.enterprise_sonic.sonic + sonic_lag_interfaces: + redirect: dellemc.enterprise_sonic.sonic + sonic_l2_interfaces: + redirect: dellemc.enterprise_sonic.sonic + sonic_l3_interfaces: + redirect: dellemc.enterprise_sonic.sonic + sonic_vlans: + redirect: dellemc.enterprise_sonic.sonic + sonic_bgp: + redirect: dellemc.enterprise_sonic.sonic + sonic_bgp_af: + redirect: dellemc.enterprise_sonic.sonic + sonic_bgp_neighbors: + redirect: dellemc.enterprise_sonic.sonic + sonic_bgp_neighbors_af: + redirect: dellemc.enterprise_sonic.sonic + sonic_bgp_as_paths: + redirect: dellemc.enterprise_sonic.sonic + sonic_bgp_communities: + redirect: dellemc.enterprise_sonic.sonic + sonic_bgp_ext_communities: + redirect: dellemc.enterprise_sonic.sonic + sonic_vxlans: + redirect: dellemc.enterprise_sonic.sonic + sonic_mclag: + redirect: dellemc.enterprise_sonic.sonic + sonic_users: + redirect: dellemc.enterprise_sonic.sonic + sonic_port_breakout: + redirect: dellemc.enterprise_sonic.sonic + sonic_vrfs: + redirect: dellemc.enterprise_sonic.sonic + sonic_tacacs_server: + redirect: dellemc.enterprise_sonic.sonic + sonic_radius_server: + redirect: dellemc.enterprise_sonic.sonic + sonic_aaa: + redirect: dellemc.enterprise_sonic.sonic + sonic_system: + redirect: dellemc.enterprise_sonic.sonic diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/clos-fabric.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/clos-fabric.yaml new file mode 100644 index 00000000..27e20c08 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/clos-fabric.yaml @@ -0,0 +1,43 @@ +--- +- hosts: datacenter + connection: httpapi + gather_facts: no + collections: + - dellemc.enterprise_sonic + tasks: + - name: Create Interfaces + sonic_interfaces: + config: "{{sonic_interfaces}}" + when: sonic_interfaces is defined and sonic_interfaces + + - name: Create L3 Interfaces + sonic_l3_interfaces: + config: "{{sonic_l3_interfaces}}" + when: sonic_l3_interfaces is defined and sonic_l3_interfaces + + - name: "Push configs for clos-fabric usecase" + vars: + ansible_connection: network_cli + sonic_config: + src: clos_fabric_cfg.j2 + register: result + + - name: Create BGP configuration + sonic_bgp: + config: "{{sonic_bgp}}" + when: sonic_bgp is defined and sonic_bgp + + - name: Create BGP AF configuration + sonic_bgp_af: + config: "{{sonic_bgp_af}}" + when: sonic_bgp_af is defined and sonic_bgp_af + + - name: Create BGP Neighbors configuration + sonic_bgp_neighbors: + config: "{{sonic_bgp_neighbors}}" + when: sonic_bgp_neighbors is defined and sonic_bgp_neighbors + + - name: Create BGP Neighbors AF configuration + sonic_bgp_neighbors_af: + config: "{{sonic_bgp_neighbors_af}}" + when: sonic_bgp_neighbors_af is defined and sonic_bgp_neighbors_af diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/all.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/all.yaml new file mode 100644 index 00000000..526f29e2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/all.yaml @@ -0,0 +1,49 @@ +ansible_user: admin +ansible_password: admin +ansible_network_os: dellemc.enterprise_sonic.sonic + +# Changing the interface name here will be reflected in all places +# Spine to Leaf interface connections +spine_leaf1_inf1: Eth1/1 +spine_leaf2_inf2: Eth1/2 +spine_leaf25_inf3: Eth1/25 +spine_leaf26_inf4: Eth1/26 + +# Leaf to spine interface connections +leaf_spine1_inf1: Eth1/14 +leaf_spine2_inf2: Eth1/15 + +# Logical interface defintions +loopback_inf0: Loopback0 +loopback_inf1: Loopback1 + +host_data: + leaf1: + loopback0_ip: 10.0.2.1/32 + loopback1_ip: 10.10.10.1/32 + bgp_asn: 65001 + leaf2: + loopback0_ip: 10.0.2.2/32 + loopback1_ip: 10.10.10.2/32 + bgp_asn: 65001 + leaf25: + loopback0_ip: 10.0.2.25/32 + loopback1_ip: 10.10.10.25/32 + bgp_asn: 65025 + leaf26: + loopback0_ip: 10.0.2.26/32 + loopback1_ip: 10.10.10.26/32 + bgp_asn: 65025 + spine1: + loopback0_ip: 10.0.1.1/32 + bgp_asn: 65000 + spine2: + loopback0_ip: 10.0.1.2/32 + bgp_asn: 65000 + +ansible_httpapi_use_ssl: true +ansible_httpapi_validate_certs: false + +loopback0_ip: "{{ host_data[inventory_hostname].loopback0_ip }}" +loopback1_ip: "{{host_data[inventory_hostname].loopback1_ip}}" +bgp_asn: "{{ host_data[inventory_hostname].bgp_asn }}" diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/leaf.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/leaf.yaml new file mode 100644 index 00000000..ad524d42 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/leaf.yaml @@ -0,0 +1,103 @@ +sonic_interfaces: + - name: "{{ leaf_spine1_inf1 }}" + description: "{{ leaf_spine1_inf1 }} Spine1" + enabled: true + - name: "{{ leaf_spine2_inf2 }}" + description: "{{ leaf_spine2_inf2 }} Spine2" + enabled: true + - name: "{{ loopback_inf0 }}" + - name: "{{ loopback_inf1 }}" + +sonic_l3_interfaces: + - name: "{{ loopback_inf0 }}" + ipv4: + addresses: + - address: "{{ loopback0_ip }}" + - name: "{{ loopback_inf1 }}" + ipv4: + addresses: + - address: "{{ loopback1_ip }}" + - name: "{{ leaf_spine1_inf1 }}" + ipv6: + enabled: true + - name: "{{ leaf_spine2_inf2 }}" + ipv6: + enabled: true + +sonic_route_maps_cli: + - name: REDISTU + permit: true + seq: 10 + match_ip_prefix_list: UNDERLAY + - name: REDISTL3 + permit: true + seq: 10 + match_ip_prefix_list: L3OVERLAY + +sonic_prefix_list_cli: + - name: UNDERLAY + permit: true + entries: + - ip: 10.0.2.0/24 + condition: ge 32 le 32 + seq: 1 + - ip: 10.10.10.0/24 + condition: ge 32 le 32 + seq: 2 + - name: L3OVERLAY + permit: true + entries: + - ip: 192.168.50.0/24 + seq: 3 + +sonic_bgp: + - bgp_as: "{{ bgp_asn }}" + router_id: "{{ loopback0_ip.split('/')[0] }}" + bestpath: + as_path: + multipath_relax: true + +sonic_bgp_af: + - bgp_as: "{{ bgp_asn }}" + address_family: + afis: + - afi: l2vpn + safi: evpn + advertise_all_vni: true + - afi: ipv4 + safi: unicast + redistribute: + - protocol: connected + route_map: REDISTU + +sonic_bgp_neighbors: + - bgp_as: "{{ bgp_asn }}" + peer_group: + - name: SPINE + remote_as: + peer_type: external + advertisement_interval: 5 + timers: + keepalive: 3 + holdtime: 9 + bfd: + check_failure: true + enabled: true + profile: 'profile 1' + capability: + extended_nexthop: true + address_family: + afis: + - afi: l2vpn + safi: evpn + activate: true + - afi: ipv4 + safi: unicast + activate: true + allowas_in: + value: 2 + neighbors: + - neighbor: "{{ leaf_spine1_inf1 }}" + peer_group: SPINE + - neighbor: "{{ leaf_spine2_inf2 }}" + peer_group: SPINE diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/spine.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/spine.yaml new file mode 100644 index 00000000..e3116407 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/group_vars/spine.yaml @@ -0,0 +1,87 @@ +loopback_inf0: Loopback0 + +sonic_interfaces: + - name: "{{ spine_leaf1_inf1 }}" + description: "{{ spine_leaf1_inf1 }} Leaf1" + enabled: true + - name: "{{ spine_leaf2_inf2 }}" + description: "{{ spine_leaf2_inf2 }} Leaf2" + enabled: true + - name: "{{ spine_leaf25_inf3 }}" + description: "{{ spine_leaf25_inf3 }} Leaf25" + enabled: true + - name: "{{ spine_leaf26_inf4 }}" + description: "{{ spine_leaf26_inf4 }} Leaf26" + enabled: true + - name: "{{ loopback_inf0 }}" + +sonic_l3_interfaces: + - name: "{{ loopback_inf0 }}" + ipv4: + addresses: + - address: "{{ loopback0_ip }}" + - name: "{{ spine_leaf1_inf1 }}" + ipv6: + enabled: true + - name: "{{ spine_leaf2_inf2 }}" + ipv6: + enabled: true + - name: "{{ spine_leaf25_inf3 }}" + ipv6: + enabled: true + - name: "{{ spine_leaf26_inf4 }}" + ipv6: + enabled: true + +sonic_bgp: + - bgp_as: "{{ bgp_asn }}" + router_id: "{{ loopback0_ip.split('/')[0] }}" + bestpath: + as_path: + multipath_relax: true + +sonic_bgp_af: + - bgp_as: "{{ bgp_asn }}" + address_family: + afis: + - afi: l2vpn + safi: evpn + advertise_all_vni: true + - afi: ipv4 + safi: unicast + redistribute: + - protocol: connected + +sonic_bgp_neighbors: + - bgp_as: "{{ bgp_asn }}" + peer_group: + - name: LEAF + remote_as: + peer_type: external + advertisement_interval: 5 + timers: + keepalive: 3 + holdtime: 9 + bfd: + check_failure: true + enabled: true + profile: 'profile1' + capability: + extended_nexthop: true + address_family: + afis: + - afi: l2vpn + safi: evpn + activate: true + - afi: ipv4 + safi: unicast + activate: true + neighbors: + - neighbor: "{{ spine_leaf1_inf1 }}" + peer_group: LEAF + - neighbor: "{{ spine_leaf2_inf2 }}" + peer_group: LEAF + - neighbor: "{{ spine_leaf25_inf3 }}" + peer_group: LEAF + - neighbor: "{{ spine_leaf26_inf4 }}" + peer_group: LEAF diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/inventory.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/inventory.yaml new file mode 100644 index 00000000..26845da3 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.81.19 +spine2 ansible_host=100.10.10.2 +leaf1 ansible_host=100.94.81.17 +leaf2 ansible_host=100.10.10.4 +leaf25 ansible_host=100.10.10.5 +leaf26 ansible_host=100.10.10.6 + +[leaf] +leaf1 +leaf2 +leaf25 +leaf26 + +[spine] +spine1 +spine2 + +[datacenter:children] +leaf +spine diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/templates/clos_fabric_cfg.j2 b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/templates/clos_fabric_cfg.j2 new file mode 100644 index 00000000..cbb4732f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/bgp_l3_fabric/templates/clos_fabric_cfg.j2 @@ -0,0 +1,25 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{####################################### + +#Purpose: +BGP L3 Fabric usecase configuration for SONiC devices + +#######################################} +{# prefix list start #} +{% if sonic_prefix_list_cli is defined and sonic_prefix_list_cli %} + {% for sonic_prefix in sonic_prefix_list_cli %} + {% for prefix_entires in sonic_prefix.entries %} +ip prefix-list {{ sonic_prefix.name }} seq {{ prefix_entires.seq }} {{ 'permit' if sonic_prefix.permit else 'deny' }} {{ prefix_entires.ip }} {{ prefix_entires.condition if prefix_entires.condition is defined else ''}} + {% endfor %} + {% endfor %} +{% endif %} +{# prefix list end #} + +{# Route map configuration start #} +{% if sonic_route_maps_cli is defined and sonic_route_maps_cli %} + {% for sonic_route_map in sonic_route_maps_cli %} +route-map {{ sonic_route_map.name }} {{ 'permit' if sonic_route_map.permit else 'deny' }} {{ sonic_route_map.seq }} + match ip address prefix-list {{ sonic_route_map.match_ip_prefix_list }} + {% endfor %} +{% endif %} +{# Route map configuration start #} diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/hosts b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/hosts new file mode 100644 index 00000000..b1214247 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/hosts @@ -0,0 +1,12 @@ +sonic1 ansible_host=100.94.81.17 ansible_user=admin ansible_password=admin +sonic2 ansible_host=100.94.81.19 ansible_user=admin ansible_password=admin + +[datacenter] +sonic1 +sonic2 + +[datacenter:vars] +ansible_network_os=dellemc.enterprise_sonic.sonic +ansible_python_interpreter=/usr/bin/python3 +ansible_httpapi_use_ssl=true +ansible_httpapi_validate_certs=false diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/interface_naming.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/interface_naming.yaml new file mode 100644 index 00000000..36901572 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/interface_naming.yaml @@ -0,0 +1,32 @@ +--- +- hosts: datacenter + connection: network_cli + gather_facts: no + collections: + - dellemc.enterprise_sonic + tasks: + - name: "Configure description for interface in native name" + sonic_config: + commands: + - description Ethernet0 + parents: + - interface Ethernet0 + + # Same prompt and answer for cli "no interface-naming standard" + - name: "Configure cli with prompts on SONiC device" + sonic_config: + commands: + - command: "interface-naming standard" + prompt: "Broadcast message: Interface naming mode has changed. Users running 'sonic-cli' are required to restart your session." + answer: "\n" + + - name: "reset ssh connection" + meta: reset_connection + + - name: "Configure description for interface in standard name" + sonic_config: + commands: + - description Eth1/1 + parents: + - interface Eth1/1 + diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/patch.txt b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/patch.txt new file mode 100644 index 00000000..94a5a760 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/patch.txt @@ -0,0 +1,8 @@ +{"openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": "131.1.1.1", + "config": {"ip": "131.1.1.1", "prefix-length": 24} + }]}} + } diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_aaa.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_aaa.yaml new file mode 100644 index 00000000..7d968b83 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_aaa.yaml @@ -0,0 +1,79 @@ +--- +- name: Ansible resource module example for sonic_aaa + hosts: datacenter + gather_facts: False + connection: httpapi + collections: + - dellemc.enterprise_sonic + tasks: + - name: delete all users + sonic_users: + config: + state: deleted + - name: delete all aaa + sonic_aaa: + config: + state: deleted + - name: delete all tacacs servers + sonic_tacacs_server: + config: + state: deleted + - name: delete all radius servers + sonic_radius_server: + config: + state: deleted + - name: Merge users configurations + sonic_users: + config: + - name: sysadmin + role: admin + password: admin + update_password: always + - name: sysoperator + role: operator + password: operator + update_password: always + state: merged + - name: Test SONiC aaa + sonic_aaa: + config: + authentication: + data: + fail_through: true + group: tacacs+ + local: true + state: merged + - name: Merge tacacs configurations + sonic_tacacs_server: + config: + auth_type: pap + key: pap + source_interface: Eth 1/2 + timeout: 10 + servers: + host: + - name: 1.2.3.4 + auth_type: pap + key: 1234 + state: merged + - name: Merge radius configurations + sonic_radius_server: + config: + auth_type: chap + key: chap + nas_ip: 1.2.3.4 + statistics: true + timeout: 10 + retransmit: 3 + servers: + host: + - name: localhost + auth_type: mschapv2 + key: local + priority: 2 + port: 52 + retransmit: 2 + timeout: 20 + source_interface: Eth 1/2 + vrf: mgmt + state: merged diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_api.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_api.yaml new file mode 100644 index 00000000..4c1c07cf --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_api.yaml @@ -0,0 +1,37 @@ +--- +- name: Ansible resource module example + hosts: datacenter + gather_facts: False + connection: httpapi + collections: + - dellemc.enterprise_sonic + tasks: + - name: "Test patch_api" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Eth1%2f26/config/description + method: "PATCH" + status_code: 204 + body: {"openconfig-interfaces:description": "hi "} + - name: "Test patch_api" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Eth1%2f26/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/ + method: "PATCH" + status_code: 204 + body: "{{ lookup('file','patch.txt') }}" + - name: "Test put_api" + sonic_api: + url: data/openconfig-network-instance:network-instances/network-instance=Vlan100 + method: "PUT" + body: {"openconfig-network-instance:network-instance": [{"name": "Vlan100", "config": {"name": "Vlan100"}}]} + status_code: 204 + - name: "Test get_api" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Eth1%2f26 + method: "GET" + status_code: 200 + - name: "Test delete_api" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Eth1%2f26/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses/address=131.1.1.1/config/prefix-length + method: "DELETE" + status_code: 204 + diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp.yaml new file mode 100644 index 00000000..c3784f98 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp.yaml @@ -0,0 +1,321 @@ +--- +- name: Ansible Enterprise SONiC BGP resource module example + hosts: datacenter + gather_facts: False + connection: httpapi + collections: + - dellemc.enterprise_sonic + vars: + bgp_as1: 4 + bgp_as2: 10 + bgp_as3: 11 + vrf1: VrfCheck1 + tasks: + - name: "Configure route map configurations" + vars: + ansible_connection: network_cli + sonic_config: + commands: + - route-map aa permit 11 + - route-map bb permit 22 + - ip prefix-list p1 seq 1 permit 1.1.1.0/24 + - ip prefix-list p2 seq 2 permit 2.2.2.128/25 + save: yes + - name: Configure VRFs + sonic_vrfs: + config: + - name: "{{vrf1}}" + - name: "Test sonic_bgp" + sonic_bgp: + config: + - bgp_as: "{{bgp_as1}}" + router_id: 10.2.2.4 + log_neighbor_changes: False + bestpath: + as_path: + confed: True + ignore: True + multipath_relax: False + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + max_med: + on_startup: + timer: 667 + med_val: 7878 + - bgp_as: "{{bgp_as2}}" + log_neighbor_changes: True + vrf_name: "{{vrf1}}" + bestpath: + as_path: + confed: False + ignore: True + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + max_med: + on_startup: + timer: 332 + med_val: 9987 + state: merged + - name: "Test sonic_bgp_af merge 01" + sonic_bgp_af: + config: + - bgp_as: "{{bgp_as1}}" + address_family: + afis: + - afi: ipv4 + safi: unicast + advertise_all_vni: false + redistribute: + - metric: "20" + protocol: connected + route_map: aa + - metric: "26" + protocol: ospf + route_map: bb + - metric: "25" + protocol: static + route_map: aa + - afi: ipv6 + safi: unicast + advertise_all_vni: true + redistribute: + - metric: "21" + protocol: connected + route_map: bb + - metric: "27" + protocol: ospf + route_map: aa + - metric: "28" + protocol: static + route_map: aa + - afi: l2vpn + safi: evpn + advertise_all_vni: True + advertise_default_gw: True + route_advertise_list: + - advertise_afi: ipv4 + route_map: aa + - advertise_afi: ipv6 + route_map: bb + - bgp_as: "{{bgp_as2}}" + vrf_name: "{{vrf1}}" + address_family: + afis: + - afi: ipv4 + safi: unicast + redistribute: + - metric: "20" + protocol: connected + route_map: aa + - metric: "26" + protocol: ospf + route_map: bb + - metric: "25" + protocol: static + route_map: aa + - afi: ipv6 + safi: unicast + redistribute: + - metric: "21" + protocol: connected + route_map: bb + - metric: "27" + protocol: ospf + route_map: aa + - metric: "28" + protocol: static + route_map: aa + - afi: l2vpn + safi: evpn + advertise_default_gw: True + route_advertise_list: + - advertise_afi: ipv4 + route_map: aa + - advertise_afi: ipv6 + route_map: bb + state: merged + - name: "Test sonic_bgp_neighbors merged state" + sonic_bgp_neighbors: + config: + - bgp_as: "{{bgp_as1}}" + peer_group: + - name: SPINE + remote_as: + peer_type: internal + bfd: + check_failure: true + enabled: true + profile: 'profile 1' + advertisement_interval: 15 + timers: + keepalive: 50 + holdtime: 40 + capability: + dynamic: true + extended_nexthop: true + address_family: + afis: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: aa + send_default_route: false + prefix_limit: + max_prefixes: 200 + prevent_teardown: false + warning_threshold: 88 + restart_timer: 5 + prefix_list_in: p2 + prefix_list_out: p1 + - name: SPINE2 + neighbors: + - neighbor: Eth1/1 + remote_as: + peer_type: internal + peer_group: SPINE + advertisement_interval: 10 + timers: + keepalive: 40 + holdtime: 50 + bfd: + enabled: false + capability: + dynamic: true + extended_nexthop: true + - neighbor: 192.168.1.4 + - neighbor: Eth1/2 + remote_as: + peer_as: 3 + peer_group: SPINE2 + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - bgp_as: "{{bgp_as2}}" + vrf_name: "{{vrf1}}" + peer_group: + - name: SPINE3 + remote_as: + peer_type: internal + bfd: + check_failure: false + enabled: true + profile: 'profile 3' + advertisement_interval: 15 + timers: + keepalive: 50 + holdtime: 40 + capability: + dynamic: true + extended_nexthop: true + neighbors: + - neighbor: Eth1/11 + peer_group: SPINE3 + - neighbor: Eth1/12 + state: merged + - name: "Test sonic_bgp_neighbors_af merge 01" + sonic_bgp_neighbors_af: + config: + - bgp_as: "{{bgp_as1}}" + neighbors: + - neighbor: Eth1/1 + address_family: + - afi: ipv4 + safi: unicast + activate: true + allowas_in: + value: 6 + route_map: + - name: aa + direction: out + - name: bb + direction: in + route_reflector_client: false + route_server_client: true + - neighbor: Eth1/2 + address_family: + - afi: ipv4 + safi: unicast + activate: true + allowas_in: + origin: true + route_map: + - name: aa + direction: out + - name: bb + direction: in + route_reflector_client: true + route_server_client: true + ip_afi: + default_policy_name: bb + send_default_route: true + prefix_limit: + max_prefixes: 100 + prevent_teardown: true + warning_threshold: 80 + prefix_list_in: p1 + prefix_list_out: p2 + - bgp_as: "{{bgp_as2}}" + vrf_name: "{{vrf1}}" + neighbors: + - neighbor: Eth1/11 + address_family: + - afi: ipv4 + safi: unicast + activate: true + allowas_in: + value: 6 + route_map: + - name: aa + direction: out + - name: bb + direction: in + route_reflector_client: false + route_server_client: true + - afi: ipv6 + safi: unicast + activate: true + allowas_in: + value: 6 + route_map: + - name: aa + direction: out + - name: bb + direction: in + route_reflector_client: false + route_server_client: true + - neighbor: Eth1/12 + address_family: + - afi: ipv4 + safi: unicast + activate: true + allowas_in: + origin: true + route_map: + - name: aa + direction: out + - name: bb + direction: in + route_reflector_client: true + route_server_client: true + - afi: ipv6 + safi: unicast + activate: true + allowas_in: + value: 2 + route_map: + - name: aa + direction: out + - name: bb + direction: in + route_reflector_client: true + route_server_client: true + state: merged diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp_communities.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp_communities.yaml new file mode 100644 index 00000000..1cc31e75 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_bgp_communities.yaml @@ -0,0 +1,66 @@ +--- +- name: "Test Enterprise SONiC BGP Community configurations" + hosts: datacenter + gather_facts: no + connection: httpapi + collections: + - dellemc.enterprise_sonic + tasks: + - name: Add as_path_list configuration + sonic_bgp_as_paths: + config: + - name: test + members: + - "11" + - "22" + - "33" + permit: true + - name: test_1 + members: + - "101.101" + - "201.201" + - "301.301" + permit: false + state: merged + - name: Add bgp_community configuration + sonic_bgp_communities: + config: + - name: test + type: expanded + permit: true + members: + regex: + - "11" + - "22" + - "33" + - name: test_1 + type: expanded + permit: true + members: + regex: + - "101.101" + - "201.201" + - "301.301" + state: merged + - name: Add bgp_extcommunity configuration + sonic_bgp_ext_communities: + config: + - name: exp1 + type: expanded + permit: true + members: + regex: + - "11" + - "22" + - "33" + - name: std1 + type: standard + permit: true + members: + route_target: + - "101.101" + - "201.201" + route_origin: + - "301.301" + - "401.401" + state: merged diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_command.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_command.yaml new file mode 100644 index 00000000..ca637ca9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_command.yaml @@ -0,0 +1,58 @@ +--- + +- name: "Test SONiC CLI" + hosts: datacenter + gather_facts: no + connection: network_cli + collections: + - dellemc.enterprise_sonic + tasks: + + - name: Test SONiC single command + sonic_command: + commands: 'show interface status' + register: cmd_op + - name: Test SONiC single command with wait_for + sonic_command: + commands: 'show version' + wait_for: + - result[0] contains Del + register: cmd_op + - name: Test SONiC multiple command with wait_for + sonic_command: + commands: + - 'show version' + - 'show system' + wait_for: + - result[0] contains Dell + - result[1] contains Hostname + register: cmd_op + - name: Test SONiC commands with wait_for negative case + sonic_command: + commands: + - 'show version' + - 'show system' + wait_for: + - result[0] contains Fel + - result[1] contains Hostname + register: cmd_op + ignore_errors: yes + - name: Test SONiC commands with wait_for and match=any + sonic_command: + commands: + - 'show version' + - 'show system' + wait_for: + - result[0] contains Fel + - result[1] contains Hostname + match: any + retries: 3 + interval: 2 + register: cmd_op + - name: Test SONiC command with prompt handling + sonic_command: + commands: + - command: 'image remove all' + prompt: '\[y/N\]:$' + answer: 'N' + register: cmd_op diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_config.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_config.yaml new file mode 100644 index 00000000..a368e0b6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_config.yaml @@ -0,0 +1,42 @@ +--- +- hosts: datacenter + connection: network_cli + gather_facts: no + collections: + - dellemc.enterprise_sonic + tasks: + - name: "Configure SNMP community for SONiC device along with 'save'" + sonic_config: + commands: ['snmp-server community public group ro'] + save: yes + + - name: "Configure interface description using 'parents' option on SONiC device" + sonic_config: + lines: + - description hi + parents: ['interface Eth1/3'] + + - name: "Configure bgp using multiple level 'parents' on SONiC device" + sonic_config: + lines: + - maximum-paths 4 + parents: ['router bgp 4', 'address-family ipv4 unicast'] + + - name: "Configure ip access-list using 'before' and 'after' option on SONiC device" + sonic_config: + lines: + - seq 1 permit tcp any any ack + parents: ['ip access-list test'] + before: ['no ip access-list test'] + after: ['no ip access-list test'] + + - name: "Configure cli using source file" + sonic_config: + src: src.txt + + - name: "Configure cli with prompts on SONiC device" + sonic_config: + commands: + - command: 'do image remove all' + prompt: '\[y/N\]:$' + answer: 'N' diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_facts.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_facts.yaml new file mode 100644 index 00000000..57f91103 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_facts.yaml @@ -0,0 +1,22 @@ +--- +- name: Ansible resource module facts example + hosts: datacenter + gather_facts: True + connection: httpapi + collections: + - dellemc.enterprise_sonic + tasks: + - name: "Gather facts" + sonic_facts: + gather_subset: min + gather_network_resources: + - interfaces + - l3_interfaces + - lag_interfaces + - vlans + - bgp + register: result + + - name: "debug facts" + debug: + msg: "{{ result.ansible_facts.ansible_network_resources }}" diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_interfaces_config.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_interfaces_config.yaml new file mode 100644 index 00000000..9debc1fb --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_interfaces_config.yaml @@ -0,0 +1,77 @@ +--- +- name: Ansible resource module example + hosts: datacenter + gather_facts: False + connection: httpapi + collections: + - dellemc.enterprise_sonic + tasks: + - name: sonic_interfaces configuration + sonic_interfaces: + config: + - name: Loopback91 + - name: Eth1/12 + description: "hi test" + enabled: False + - name: Eth1/13 + description: "hi ans" + enabled: False + - name: Add VLANs + sonic_vlans: + config: + - vlan_id: 11 + - vlan_id: 12 + - vlan_id: 13 + state: merged + - name: sonic_l2_interfaces configuration + sonic_l2_interfaces: + config: + - name: Eth1/12 + access: + vlan: 12 + - name: Eth1/13 + access: + vlan: 11 + trunk: + allowed_vlans: + - vlan: 12 + - vlan: 13 + state: merged + - name: sonic_lag_interfaces configuration + sonic_lag_interfaces: + config: + - name: PortChannel12 + members: + interfaces: + - member: Eth1/20 + - member: Eth1/21 + - name: PortChannel1 + members: + interfaces: + - member: Eth1/22 + - name: PortChannel2 + state: merged + - name: sonic_l3_interfaces configuration + sonic_l3_interfaces: + config: + - name: Loopback100 + ipv4: + addresses: + - address: 101.1.1.1/32 + - name: vlan 11 + ipv6: + addresses: + - address: 150::1/16 + - name: po 12 + ipv4: + addresses: + - address: 180.1.1.1/16 + - name: Eth1/24 + ipv6: + enabled: true + - name: Merge port breakout configurations + sonic_port_breakout: + config: + - name: 1/11 + mode: 1x100G + state: merged diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_system.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_system.yaml new file mode 100644 index 00000000..8ca5601c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_system.yaml @@ -0,0 +1,18 @@ +--- +- name: Ansible resource module example for sonic_system + hosts: datacenter + gather_facts: False + connection: httpapi + collections: + - dellemc.enterprise_sonic + tasks: + - name: Test SONiC system + sonic_system: + config: + hostname: SONIC-test + interface_naming: standard + anycast_address: + ipv4: true + ipv6: true + mac_address: 00:09:5B:EC:EE:F2 + state: merged diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_vxlans_config.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_vxlans_config.yaml new file mode 100644 index 00000000..f978578c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/sonic_vxlans_config.yaml @@ -0,0 +1,38 @@ +--- +- name: Ansible resource module example + hosts: datacenter + gather_facts: False + connection: httpapi + collections: + - dellemc.enterprise_sonic + tasks: + - name: Add VLANs configuration + sonic_vlans: + config: + - vlan_id: 11 + - vlan_id: 12 + - vlan_id: 13 + state: merged + - name: configure VRFs + sonic_vrfs: + config: + - name: Vrfcheck1 + - name: Vrfcheck2 + - name: "sonic_vxlans configuration" + sonic_vxlans: + config: + - name: vteptest1 + source_ip: 1.1.1.1 + primary_ip: 2.2.2.2 + evpn_nvo: nvo6 + vlan_map: + - vni: 101 + vlan: 11 + - vni: 102 + vlan: 12 + vrf_map: + - vni: 101 + vrf: Vrfcheck1 + - vni: 102 + vrf: Vrfcheck2 + register: merged03_output diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/src.txt b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/src.txt new file mode 100644 index 00000000..47636d04 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/src.txt @@ -0,0 +1,4 @@ +interface Eth1/10 + description hello + mtu 1800 + diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/action/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/action/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/action/sonic.py b/ansible_collections/dellemc/enterprise_sonic/plugins/action/sonic.py new file mode 100644 index 00000000..5f7ac3a8 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/action/sonic.py @@ -0,0 +1,51 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule + +from ansible.utils.display import Display + +display = Display() + +DOCUMENTATION = """ +short_description: Action plugin module for sonic CLI modules +version_added: 1.0.0 +""" + + +class ActionModule(ActionNetworkModule): + + def run(self, task_vars=None): + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'sonic_config' else False + + if self._play_context.connection in ('network_cli', 'httpapi'): + provider = self._task.args.get('provider', {}) + if any(provider.values()): + display.warning('provider is unnecessary when using network_cli and will be ignored') + del self._task.args['provider'] + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py b/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py new file mode 100644 index 00000000..37f1d872 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py @@ -0,0 +1,118 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +name: sonic +short_description: Use sonic cliconf to run command on Dell OS10 platform +description: + - This sonic plugin provides low level abstraction apis for + sending and receiving CLI commands from Dell OS10 network devices. +""" + +import json + +from itertools import chain + +from ansible.errors import AnsibleConnectionFailure +from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common._collections_compat import Mapping +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + device_info['network_os'] = 'sonic' + return device_info + + @enable_mode + def edit_config(self, command): + response = [] + self.send_command("configure terminal") + for cmd in to_list(command): + if isinstance(cmd, dict): + resp = self.get(command=cmd["command"], prompt=cmd["prompt"], answer=cmd["answer"]) + response.append(resp) + else: + response.append(self.send_command(to_bytes(cmd))) + self.send_command("end") + return response + + @enable_mode + def get_config(self, source="running", flags=None, format=None): + if source not in ("running", "startup"): + raise ValueError( + "fetching configuration from %s is not supported" % source + ) + if not flags: + flags = [] + if source == "running": + cmd = "show running-config " + else: + cmd = "show startup-config " + + cmd += " ".join(to_list(flags)) + cmd = cmd.strip() + return self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) + + def run_commands(self, commands=None, check_rc=True): + if commands is None: + raise ValueError("'commands' value is required") + + responses = list() + for cmd in to_list(commands): + if not isinstance(cmd, Mapping): + cmd = {'command': cmd} + + output = cmd.pop('output', None) + if output: + raise ValueError("'output' value %s is not supported for run_commands" % output) + + try: + out = self.send_command(**cmd) + except AnsibleConnectionFailure as e: + if check_rc: + raise + out = getattr(e, 'err', to_text(e)) + + responses.append(out) + + return responses + + def set_cli_prompt_context(self): + """ + Make sure we are in the operational cli mode + :return: None + """ + if self._connection.connected: + self._update_cli_prompt_context(config_context=')#') diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/httpapi/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/httpapi/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/httpapi/sonic.py b/ansible_collections/dellemc/enterprise_sonic/plugins/httpapi/sonic.py new file mode 100644 index 00000000..4745e2e9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/httpapi/sonic.py @@ -0,0 +1,113 @@ +# (c) 2019 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +name: sonic +short_description: HttpApi Plugin for devices supporting Restconf SONIC API +description: + - This HttpApi plugin provides methods to connect to Restconf SONIC API endpoints. +version_added: 1.0.0 +options: + root_path: + type: str + description: + - Specifies the location of the Restconf root. + default: '/restconf' + vars: + - name: ansible_httpapi_restconf_root +""" + +import json + +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import ConnectionError +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.plugins.httpapi import HttpApiBase +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list + +CONTENT_TYPE = 'application/yang-data+json' + + +class HttpApi(HttpApiBase): + def send_request(self, data, **message_kwargs): + if data: + data = json.dumps(data) + + path = '/'.join([self.get_option('root_path').rstrip('/'), message_kwargs.get('path', '').lstrip('/')]) + + headers = { + 'Content-Type': message_kwargs.get('content_type') or CONTENT_TYPE, + 'Accept': message_kwargs.get('accept') or CONTENT_TYPE, + } + response, response_data = self.connection.send(path, data, headers=headers, method=message_kwargs.get('method')) + + return handle_response(response, response_data, message_kwargs) + + def get(self, command): + return self.send_request(path=command, data=None, method='get') + + def edit_config(self, requests): + """Send a list of http requests to remote device and return results + """ + if requests is None: + raise ValueError("'requests' value is required") + + responses = list() + for req in to_list(requests): + try: + response = self.send_request(**req) + except ConnectionError as exc: + raise ConnectionError(to_text(exc, errors='surrogate_then_replace')) + responses.append(response) + return responses + + def get_capabilities(self): + result = {} + result['rpc'] = [] + result['network_api'] = 'sonic_rest' + + return json.dumps(result) + + +def handle_response(response, response_data, request_data): + response_data = response_data.read() + try: + if not response_data: + response_data = "" + else: + response_data = json.loads(response_data.decode('utf-8')) + except ValueError: + pass + + if isinstance(response, HTTPError): + if response_data: + if 'errors' in response_data: + errors = response_data['errors']['error'] + error_text = '\n'.join((error['error-message'] for error in errors)) + else: + error_text = response_data + error_text.update({u'code': response.code}) + error_text.update({u'request_data': request_data}) + raise ConnectionError(error_text, code=response.code) + raise ConnectionError(to_text(response), code=response.code) + return response.getcode(), response_data diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py new file mode 100644 index 00000000..86040892 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py @@ -0,0 +1,66 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_aaa module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class AaaArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_aaa module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'options': { + 'authentication': { + 'options': { + 'data': { + 'options': { + 'fail_through': {'type': 'bool'}, + 'group': { + 'choices': ['ldap', 'radius', 'tacacs+'], + 'type': 'str' + }, + 'local': {'type': 'bool'} + }, + 'type': 'dict' + } + }, + 'type': 'dict' + } + }, + 'type': 'dict' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py new file mode 100644 index 00000000..fb761813 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py @@ -0,0 +1,97 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_bgp module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class BgpArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_bgp module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'bestpath': { + 'options': { + 'as_path': { + 'options': { + 'confed': {'type': 'bool'}, + 'ignore': {'type': 'bool'}, + 'multipath_relax': {'type': 'bool'}, + 'multipath_relax_as_set': {'type': 'bool'} + }, + 'type': 'dict' + }, + 'compare_routerid': {'type': 'bool'}, + 'med': { + 'options': { + 'confed': {'type': 'bool'}, + 'missing_as_worst': {'type': 'bool'}, + 'always_compare_med': {'type': 'bool'} + }, + 'type': 'dict' + } + }, + 'type': 'dict' + }, + 'bgp_as': {'required': True, 'type': 'str'}, + 'log_neighbor_changes': {'type': 'bool'}, + 'router_id': {'type': 'str'}, + "max_med": { + "options": { + "on_startup": { + "options": { + "timer": {"type": "int"}, + "med_val": {"type": "int"} + }, + "type": "dict" + } + }, + "type": "dict" + }, + 'timers': { + 'options': { + 'holdtime': {'type': 'int'}, + 'keepalive_interval': {'type': 'int'} + }, + 'type': 'dict' + }, + 'vrf_name': {'default': 'default', 'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py new file mode 100644 index 00000000..ac22210e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py @@ -0,0 +1,117 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_bgp_af module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Bgp_afArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_bgp_af module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'address_family': { + 'options': { + 'afis': { + 'elements': 'dict', + 'options': { + 'advertise_pip': {'type': 'bool'}, + 'advertise_pip_ip': {'type': 'str'}, + 'advertise_pip_peer_ip': {'type': 'str'}, + 'advertise_svi_ip': {'type': 'bool'}, + 'route_advertise_list': { + 'elements': 'dict', + 'options': { + 'advertise_afi': { + 'choices': ['ipv4', 'ipv6'], + 'required': True, + 'type': 'str' + }, + 'route_map': { + 'type': 'str' + } + }, + 'type': 'list' + }, + 'advertise_all_vni': {'type': 'bool'}, + 'advertise_default_gw': {'type': 'bool'}, + 'afi': { + 'choices': ['ipv4', 'ipv6', 'l2vpn'], + 'required': True, + 'type': 'str' + }, + 'max_path': { + 'options': { + 'ebgp': {'type': 'int'}, + 'ibgp': {'type': 'int'} + }, + 'type': 'dict' + }, + 'network': {'type': 'list', 'elements': 'str'}, + 'dampening': {'type': 'bool'}, + 'redistribute': { + 'elements': 'dict', + 'options': { + 'metric': {'type': 'str'}, + 'protocol': { + 'choices': ['ospf', 'static', 'connected'], + 'required': True, + 'type': 'str' + }, + 'route_map': {'type': 'str'} + }, + 'type': 'list' + }, + 'safi': { + 'choices': ['unicast', 'evpn'], + 'default': 'unicast', + 'type': 'str' + } + }, + 'required_together': [['afi', 'safi']], + 'type': 'list' + } + }, + 'type': 'dict' + }, + 'bgp_as': {'required': True, 'type': 'str'}, + 'vrf_name': {'default': 'default', 'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py new file mode 100644 index 00000000..dec9b930 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py @@ -0,0 +1,48 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_bgp_as_paths module +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Bgp_as_pathsArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_bgp_as_paths module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = {'config': {'elements': 'dict', + 'options': {'permit': {'required': False, 'type': 'bool'}, + 'members': {'elements': 'str', + 'required': False, + 'type': 'list'}, + 'name': {'required': True, 'type': 'str'}}, + 'type': 'list'}, + 'state': {'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str'}} # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py new file mode 100644 index 00000000..867e5520 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py @@ -0,0 +1,59 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_bgp_communities module +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Bgp_communitiesArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_bgp_communities module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = {'config': {'elements': 'dict', + 'options': {'aann': {'type': 'str'}, + 'local_as': {'type': 'bool'}, + 'match': {'choices': ['ALL', 'ANY'], + 'default': 'ANY', + 'type': 'str'}, + 'members': {'options': {'regex': {'elements': 'str', + 'type': 'list'}}, + 'type': 'dict'}, + 'name': {'required': True, 'type': 'str'}, + 'no_advertise': {'type': 'bool'}, + 'no_export': {'type': 'bool'}, + 'no_peer': {'type': 'bool'}, + 'permit': {'type': 'bool'}, + 'type': {'choices': ['standard', 'expanded'], + 'default': 'standard', + 'type': 'str'}}, + 'type': 'list'}, + 'state': {'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str'}} # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py new file mode 100644 index 00000000..aec0f364 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py @@ -0,0 +1,75 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_bgp_ext_communities module +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Bgp_ext_communitiesArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_bgp_ext_communities module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'match': { + 'choices': ['all', 'any'], + 'default': 'any', + 'type': 'str' + }, + 'members': { + 'mutually_exclusive': [ + ['regex', 'route_origin'], + ['regex', 'route_target'] + ], + 'options': { + 'regex': {'elements': 'str', 'type': 'list'}, + 'route_origin': {'elements': 'str', 'type': 'list'}, + 'route_target': {'elements': 'str', 'type': 'list'} + }, + 'type': 'dict' + }, + 'name': {'required': True, 'type': 'str'}, + 'permit': {'type': 'bool'}, + 'type': { + 'choices': ['standard', 'expanded'], + 'default': 'standard', + 'type': 'str' + } + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors/bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors/bgp_neighbors.py new file mode 100644 index 00000000..02e695fb --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors/bgp_neighbors.py @@ -0,0 +1,249 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_bgp_neighbors module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Bgp_neighborsArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_bgp_neighbors module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'bgp_as': {'required': True, 'type': 'str'}, + 'neighbors': { + 'elements': 'dict', + 'options': { + 'neighbor': {'required': True, 'type': 'str'}, + 'remote_as': { + 'mutually_exclusive': [['peer_type', 'peer_as']], + 'options': { + 'peer_type': {'type': 'str', 'choices': ['internal', 'external']}, + 'peer_as': {'type': 'int'}, + }, + 'type': 'dict' + }, + 'peer_group': {'type': 'str'}, + 'bfd': { + 'options': { + 'enabled': {'type': 'bool'}, + 'check_failure': {'type': 'bool'}, + 'profile': {'type': 'str'} + }, + 'type': 'dict' + }, + 'advertisement_interval': {'type': 'int'}, + 'timers': { + 'options': { + 'holdtime': {'type': 'int'}, + 'keepalive': {'type': 'int'}, + 'connect_retry': {'type': 'int'} + }, + 'type': 'dict' + }, + 'capability': { + 'options': { + 'dynamic': {'type': 'bool'}, + 'extended_nexthop': {'type': 'bool'}, + }, + 'type': 'dict' + }, + 'auth_pwd': { + 'options': { + 'pwd': {'required': True, 'type': 'str'}, + 'encrypted': {'default': 'False', 'type': 'bool'}, + }, + 'type': 'dict' + }, + 'nbr_description': {'type': 'str'}, + 'disable_connected_check': {'type': 'bool'}, + 'dont_negotiate_capability': {'type': 'bool'}, + 'ebgp_multihop': { + 'options': { + 'enabled': {'default': 'False', 'type': 'bool'}, + 'multihop_ttl': {'type': 'int'} + }, + 'type': 'dict' + }, + 'enforce_first_as': {'type': 'bool'}, + 'enforce_multihop': {'type': 'bool'}, + 'local_address': {'type': 'str'}, + 'local_as': { + 'options': { + 'as': {'required': True, 'type': 'int'}, + 'no_prepend': {'type': 'bool'}, + 'replace_as': {'type': 'bool'}, + }, + 'type': 'dict' + }, + 'override_capability': {'type': 'bool'}, + 'passive': {'default': 'False', 'type': 'bool'}, + 'port': {'type': 'int'}, + 'shutdown_msg': {'type': 'str'}, + 'solo': {'type': 'bool'}, + 'strict_capability_match': {'type': 'bool'}, + 'ttl_security': {'type': 'int'}, + 'v6only': {'type': 'bool'} + }, + 'type': 'list' + }, + 'peer_group': { + 'elements': 'dict', + 'options': { + 'name': {'required': True, 'type': 'str'}, + 'remote_as': { + 'mutually_exclusive': [['peer_type', 'peer_as']], + 'options': { + 'peer_type': {'type': 'str', 'choices': ['internal', 'external']}, + 'peer_as': {'type': 'int'}, + }, + 'type': 'dict' + }, + 'address_family': { + 'options': { + 'afis': { + 'elements': 'dict', + 'options': { + 'activate': {'type': 'bool'}, + 'afi': { + 'choices': ['ipv4', 'ipv6', 'l2vpn'], + 'type': 'str' + }, + 'allowas_in': { + 'mutually_exclusive': [['origin', 'value']], + 'options': { + 'origin': {'type': 'bool'}, + 'value': {'type': 'int'} + }, + 'type': 'dict' + }, + 'ip_afi': { + 'options': { + 'default_policy_name': {'type': 'str'}, + 'send_default_route': {'default': False, 'type': 'bool'} + }, + 'type': 'dict' + }, + 'prefix_limit': { + 'options': { + 'max_prefixes': {'type': 'int'}, + 'prevent_teardown': {'default': False, 'type': 'bool'}, + 'warning_threshold': {'type': 'int'}, + 'restart_timer': {'type': 'int'} + }, + 'type': 'dict' + }, + 'prefix_list_in': {'type': 'str'}, + 'prefix_list_out': {'type': 'str'}, + 'safi': { + 'choices': ['unicast', 'evpn'], + 'type': 'str' + }, + }, + 'required_together': [['afi', 'safi']], + 'type': 'list' + }, + }, + 'type': 'dict' + }, + 'bfd': { + 'options': { + 'enabled': {'type': 'bool'}, + 'check_failure': {'type': 'bool'}, + 'profile': {'type': 'str'} + }, + 'type': 'dict' + }, + 'advertisement_interval': {'type': 'int'}, + 'timers': { + 'options': { + 'holdtime': {'type': 'int'}, + 'keepalive': {'type': 'int'}, + 'connect_retry': {'type': 'int'} + }, + 'type': 'dict' + }, + 'capability': { + 'options': { + 'dynamic': {'type': 'bool'}, + 'extended_nexthop': {'type': 'bool'}, + }, + 'type': 'dict' + }, + 'auth_pwd': { + 'options': { + 'pwd': {'required': True, 'type': 'str'}, + 'encrypted': {'default': 'False', 'type': 'bool'}, + }, + 'type': 'dict' + }, + 'pg_description': {'type': 'str'}, + 'disable_connected_check': {'type': 'bool'}, + 'dont_negotiate_capability': {'type': 'bool'}, + 'ebgp_multihop': { + 'options': { + 'enabled': {'default': 'False', 'type': 'bool'}, + 'multihop_ttl': {'type': 'int'} + }, + 'type': 'dict' + }, + 'enforce_first_as': {'type': 'bool'}, + 'enforce_multihop': {'type': 'bool'}, + 'local_address': {'type': 'str'}, + 'local_as': { + 'options': { + 'as': {'required': True, 'type': 'int'}, + 'no_prepend': {'type': 'bool'}, + 'replace_as': {'type': 'bool'}, + }, + 'type': 'dict' + }, + 'override_capability': {'type': 'bool'}, + 'passive': {'default': 'False', 'type': 'bool'}, + 'shutdown_msg': {'type': 'str'}, + 'solo': {'type': 'bool'}, + 'strict_capability_match': {'type': 'bool'}, + 'ttl_security': {'type': 'int'} + }, + 'type': 'list' + }, + 'vrf_name': {'default': 'default', 'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/bgp_neighbors_af.py new file mode 100644 index 00000000..6cafc922 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_neighbors_af/bgp_neighbors_af.py @@ -0,0 +1,114 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_bgp_neighbors_af module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Bgp_neighbors_afArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_bgp_neighbors_af module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'bgp_as': {'required': True, 'type': 'str'}, + 'neighbors': { + 'elements': 'dict', + 'options': { + 'address_family': { + 'elements': 'dict', + 'options': { + 'activate': {'type': 'bool'}, + 'afi': { + 'choices': ['ipv4', 'ipv6', 'l2vpn'], + 'required': True, + 'type': 'str' + }, + 'allowas_in': { + 'mutually_exclusive': [['origin', 'value']], + 'options': { + 'origin': {'type': 'bool'}, + 'value': {'type': 'int'} + }, + 'type': 'dict' + }, + 'ip_afi': { + 'options': { + 'default_policy_name': {'type': 'str'}, + 'send_default_route': {'default': False, 'type': 'bool'} + }, + 'type': 'dict' + }, + 'prefix_limit': { + 'options': { + 'max_prefixes': {'type': 'int'}, + 'prevent_teardown': {'default': False, 'type': 'bool'}, + 'warning_threshold': {'type': 'int'}, + 'restart_timer': {'type': 'int'} + }, + 'type': 'dict' + }, + 'prefix_list_in': {'type': 'str'}, + 'prefix_list_out': {'type': 'str'}, + 'route_map': { + 'elements': 'dict', + 'options': { + 'direction': {'type': 'str'}, + 'name': {'type': 'str'} + }, + 'type': 'list' + }, + 'route_reflector_client': {'type': 'bool'}, + 'route_server_client': {'type': 'bool'}, + 'safi': { + 'choices': ['unicast', 'evpn'], + 'default': 'unicast', + 'type': 'str' + } + }, + 'required_together': [['afi', 'safi']], + 'type': 'list' + }, + 'neighbor': {'required': True, 'type': 'str'} + }, + 'type': 'list' + }, + 'vrf_name': {'default': 'default', 'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py new file mode 100644 index 00000000..3a4d0298 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py @@ -0,0 +1,53 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The arg spec for the sonic facts module. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class FactsArgs(object): # pylint: disable=R0903 + + """ The arg spec for the sonic facts module + """ + + def __init__(self, **kwargs): + pass + + choices = [ + 'all', + 'vlans', + 'interfaces', + 'l2_interfaces', + 'l3_interfaces', + 'lag_interfaces', + 'bgp', + 'bgp_af', + 'bgp_neighbors', + 'bgp_neighbors_af', + 'bgp_as_paths', + 'bgp_communities', + 'bgp_ext_communities', + 'mclag', + 'prefix_lists', + 'vrfs', + 'vxlans', + 'users', + 'system', + 'port_breakout', + 'aaa', + 'tacacs_server', + 'radius_server', + 'static_routes', + 'ntp' + ] + + argument_spec = { + 'gather_subset': dict(default=['!config'], type='list', elements='str'), + 'gather_network_resources': dict(choices=choices, type='list', elements='str'), + } diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py new file mode 100644 index 00000000..76c36a90 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py @@ -0,0 +1,56 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_interfaces module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class InterfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "description": {"type": "str"}, + "enabled": {"type": "bool"}, + "mtu": {"type": "int"}, + "name": {"required": True, "type": "str"} + }, + "type": "list" + }, + "state": { + "choices": ["merged", "deleted"], + "default": "merged", + "type": "str" + } + } diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py new file mode 100644 index 00000000..bbebe2d5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py @@ -0,0 +1,71 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_l2_interfaces module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class L2_interfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_l2_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'access': { + 'options': { + 'vlan': {'type': 'int'} + }, + 'type': 'dict' + }, + 'name': {'required': True, 'type': 'str'}, + 'trunk': { + 'options': { + 'allowed_vlans': { + 'elements': 'dict', + 'options': { + 'vlan': {'type': 'int'} + }, + 'type': 'list' + } + }, + 'type': 'dict' + } + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py new file mode 100644 index 00000000..6e83289c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py @@ -0,0 +1,81 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_l3_interfaces module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class L3_interfacesArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_l3_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'ipv4': { + 'mutually_exclusive': [['addresses', 'anycast_addresses']], + 'options': { + 'addresses': { + 'elements': 'dict', + 'options': { + 'address': {'type': 'str'}, + 'secondary': {'default': 'False', 'type': 'bool'} + }, + 'type': 'list' + }, + 'anycast_addresses': {'elements': 'str', 'type': 'list'}, + }, + 'type': 'dict' + }, + 'ipv6': { + 'options': { + 'addresses': { + 'elements': 'dict', + 'options': { + 'address': {'type': 'str'} + }, + 'type': 'list' + }, + 'enabled': {'type': 'bool'} + }, + 'type': 'dict' + }, + 'name': {'required': True, 'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py new file mode 100644 index 00000000..867d61a2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py @@ -0,0 +1,67 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_lag_interfaces module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Lag_interfacesArgs(object): # pylint: disable=R0903 + + """The arg spec for the sonic_lag_interfaces module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "members": { + "options": { + "interfaces": { + "elements": "dict", + "options": { + "member": {"type": "str"} + }, + "type": "list" + } + }, + "type": "dict" + }, + "name": {"required": True, "type": "str"}, + "mode": {"type": "str", "choices": ["static", "lacp"]} + }, + "type": "list" + }, + "state": { + "choices": ["merged", "deleted"], + "default": "merged", + "type": "str" + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py new file mode 100644 index 00000000..be3c38ca --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py @@ -0,0 +1,82 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_mclag module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class MclagArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_mclag module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'options': { + 'domain_id': {'required': True, 'type': 'int'}, + 'keepalive': {'type': 'int'}, + 'peer_address': {'type': 'str'}, + 'peer_link': {'type': 'str'}, + 'members': { + 'options': { + 'portchannels': { + 'elements': 'dict', + 'options': { + 'lag': {'type': 'str'} + }, + 'type': 'list' + } + }, + 'type': 'dict' + }, + 'session_timeout': {'type': 'int'}, + 'source_address': {'type': 'str'}, + 'system_mac': {'type': 'str'}, + 'unique_ip': { + 'options': { + 'vlans': { + 'elements': 'dict', + 'options': { + 'vlan': {'type': 'str'} + }, + 'type': 'list' + } + }, + 'type': 'dict' + }, + }, + 'type': 'dict' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py new file mode 100644 index 00000000..062520af --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py @@ -0,0 +1,89 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_ntp module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class NtpArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_ntp module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'options': { + 'enable_ntp_auth': {'type': 'bool'}, + 'ntp_keys': { + 'elements': 'dict', + 'options': { + 'encrypted': {'type': 'bool'}, + 'key_id': {'required': True, + 'type': 'int', + 'no_log': True}, + 'key_type': {'type': 'str', + 'choices': ['NTP_AUTH_SHA1', + 'NTP_AUTH_MD5', + 'NTP_AUTH_SHA2_256']}, + 'key_value': {'type': 'str', 'no_log': True} + }, + 'type': 'list', + 'no_log': True + }, + 'servers': { + 'elements': 'dict', + 'options': { + 'address': {'required': True, + 'type': 'str'}, + 'key_id': {'type': 'int', 'no_log': True}, + 'maxpoll': {'type': 'int'}, + 'minpoll': {'type': 'int'} + }, + 'type': 'list' + }, + 'source_interfaces': { + 'elements': 'str', + 'type': 'list' + }, + 'trusted_keys': { + 'elements': 'int', + 'type': 'list', + 'no_log': True + }, + 'vrf': {'type': 'str'} + }, + 'type': 'dict' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py new file mode 100644 index 00000000..3b8f4a5a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py @@ -0,0 +1,57 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_port_breakout module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Port_breakoutArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_port_breakout module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'mode': { + 'choices': ['1x100G', '1x400G', '1x40G', '2x100G', '2x200G', + '2x50G', '4x100G', '4x10G', '4x25G', '4x50G'], + 'type': 'str' + }, + 'name': {'required': True, 'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py new file mode 100644 index 00000000..d043ae6f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py @@ -0,0 +1,71 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_prefix_lists module +""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class Prefix_listsArgs: # pylint: disable=R0903 + """The arg spec for the sonic_prefix_lists module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'afi': { + 'choices': ['ipv4', 'ipv6'], + 'default': 'ipv4', + 'type': 'str' + }, + 'name': {'required': True, 'type': 'str'}, + 'prefixes': { + 'elements': 'dict', + 'options': { + 'action': { + 'choices': ['permit', 'deny'], + 'required': True, + 'type': 'str' + }, + 'ge': {'type': 'int'}, + 'le': {'type': 'int'}, + 'prefix': {'required': True, 'type': 'str'}, + 'sequence': {'required': True, 'type': 'int'}}, + 'type': 'list' + } + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py new file mode 100644 index 00000000..a56147a5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py @@ -0,0 +1,83 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_radius_server module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Radius_serverArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_radius_server module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'options': { + 'auth_type': { + 'choices': ['pap', 'chap', 'mschapv2'], + 'default': 'pap', + 'type': 'str' + }, + 'key': {'type': 'str', 'no_log': True}, + 'nas_ip': {'type': 'str'}, + 'retransmit': {'type': 'int'}, + 'servers': { + 'options': { + 'host': { + 'elements': 'dict', + 'options': { + 'auth_type': { + 'choices': ['pap', 'chap', 'mschapv2'], + 'type': 'str' + }, + 'key': {'type': 'str', 'no_log': True}, + 'name': {'type': 'str'}, + 'port': {'type': 'int'}, + 'priority': {'type': 'int'}, + 'retransmit': {'type': 'int'}, + 'source_interface': {'type': 'str'}, + 'timeout': {'type': 'int'}, + 'vrf': {'type': 'str'} + }, + 'type': 'list' + } + }, + 'type': 'dict' + }, + 'statistics': {'type': 'bool'}, + 'timeout': {'type': 'int'} + }, + 'type': 'dict' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py new file mode 100644 index 00000000..a146f1ec --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py @@ -0,0 +1,79 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_static_routes module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Static_routesArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_static_routes module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'static_list': { + 'elements': 'dict', + 'options': { + 'next_hops': { + 'elements': 'dict', + 'options': { + 'index': { + 'required': True, + 'options': { + 'blackhole': {'type': 'bool', 'default': False}, + 'interface': {'type': 'str'}, + 'nexthop_vrf': {'type': 'str'}, + 'next_hop': {'type': 'str'} + }, + 'type': 'dict' + }, + 'metric': {'type': 'int'}, + 'tag': {'type': 'int'}, + 'track': {'type': 'int'} + }, + 'type': 'list' + }, + 'prefix': {'required': True, 'type': 'str'} + }, + 'type': 'list' + }, + 'vrf_name': {'required': True, 'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py new file mode 100644 index 00000000..b08c5f4b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py @@ -0,0 +1,64 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_system module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class SystemArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_system module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'options': { + 'anycast_address': { + 'options': { + 'ipv4': {'type': 'bool'}, + 'ipv6': {'type': 'bool'}, + 'mac_address': {'type': 'str'} + }, + 'type': 'dict' + }, + 'hostname': {'type': 'str'}, + 'interface_naming': { + 'choices': ['standard', 'native'], + 'type': 'str' + } + }, + 'type': 'dict' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py new file mode 100644 index 00000000..aad1746d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py @@ -0,0 +1,80 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_tacacs_server module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class Tacacs_serverArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_tacacs_server module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'options': { + 'auth_type': { + 'choices': ['pap', 'chap', 'mschap', 'login'], + 'default': 'pap', + 'type': 'str' + }, + 'key': {'type': 'str', 'no_log': True}, + 'servers': { + 'options': { + 'host': { + 'elements': 'dict', + 'options': { + 'auth_type': { + 'choices': ['pap', 'chap', 'mschap', 'login'], + 'default': 'pap', + 'type': 'str' + }, + 'key': {'type': 'str', 'no_log': True}, + 'name': {'type': 'str'}, + 'port': {'default': 49, 'type': 'int'}, + 'priority': {'default': 1, 'type': 'int'}, + 'timeout': {'default': 5, 'type': 'int'}, + 'vrf': {'default': 'default', 'type': 'str'} + }, + 'type': 'list' + } + }, + 'type': 'dict' + }, + 'source_interface': {'type': 'str'}, + 'timeout': {'type': 'int'} + }, + 'type': 'dict' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py new file mode 100644 index 00000000..db23d78e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py @@ -0,0 +1,62 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_users module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class UsersArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_users module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'name': {'required': True, 'type': 'str'}, + 'password': {'type': 'str', 'no_log': True}, + 'role': { + 'choices': ['admin', 'operator'], + 'type': 'str' + }, + 'update_password': { + 'choices': ['always', 'on_create'], + 'default': 'always', + 'type': 'str' + } + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py new file mode 100644 index 00000000..971fc857 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py @@ -0,0 +1,54 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_vlans module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class VlansArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_vlans module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'vlan_id': {'required': True, 'type': 'int'}, + 'description': {'type': 'str'} + }, + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py new file mode 100644 index 00000000..e074936a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py @@ -0,0 +1,66 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_vrfs module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class VrfsArgs(object): # pylint: disable=R0903 + + """The arg spec for the sonic_vrfs module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + "config": { + "elements": "dict", + "options": { + "members": { + "options": { + "interfaces": { + "elements": "dict", + "options": { + "name": {"type": "str"} + }, + "type": "list" + } + }, + "type": "dict" + }, + "name": {"required": True, "type": "str"} + }, + "type": "list" + }, + "state": { + "choices": ["merged", "deleted"], + "default": "merged", + "type": "str" + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py new file mode 100644 index 00000000..dd475b78 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py @@ -0,0 +1,73 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The arg spec for the sonic_vxlans module +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class VxlansArgs(object): # pylint: disable=R0903 + """The arg spec for the sonic_vxlans module + """ + + def __init__(self, **kwargs): + pass + + argument_spec = { + 'config': { + 'elements': 'dict', + 'options': { + 'evpn_nvo': {'type': 'str'}, + 'name': {'required': True, 'type': 'str'}, + 'source_ip': {'type': 'str'}, + 'primary_ip': {'type': 'str'}, + 'vlan_map': { + 'elements': 'dict', + 'options': { + 'vlan': {'type': 'int'}, + 'vni': {'required': True, 'type': 'int'} + }, + 'type': 'list' + }, + 'vrf_map': { + 'elements': 'dict', + 'options': { + 'vni': {'required': True, 'type': 'int'}, + 'vrf': {'type': 'str'} + }, + 'type': 'list' + } + }, + 'required_together': [['source_ip', 'evpn_nvo']], + 'type': 'list' + }, + 'state': { + 'choices': ['merged', 'deleted'], + 'default': 'merged', + 'type': 'str' + } + } # pylint: disable=C0301 diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py new file mode 100644 index 00000000..85f93bc7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py @@ -0,0 +1,236 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_aaa class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) + +PATCH = 'patch' +DELETE = 'delete' + + +class Aaa(ConfigBase): + """ + The sonic_aaa class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'aaa', + ] + + def __init__(self, module): + super(Aaa, self).__init__(module) + + def get_aaa_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + aaa_facts = facts['ansible_network_resources'].get('aaa') + if not aaa_facts: + return [] + return aaa_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + + existing_aaa_facts = self.get_aaa_facts() + commands, requests = self.set_config(existing_aaa_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + self.edit_config(requests) + result['changed'] = True + result['commands'] = commands + + changed_aaa_facts = self.get_aaa_facts() + + result['before'] = existing_aaa_facts + if result['changed']: + result['after'] = changed_aaa_facts + + result['warnings'] = warnings + return result + + def edit_config(self, requests): + try: + response = edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + def set_config(self, existing_aaa_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_aaa_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + if not want: + want = {} + + if state == 'deleted': + commands = self._state_deleted(want, have) + elif state == 'merged': + diff = get_diff(want, have) + commands = self._state_merged(want, have, diff) + return commands + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = [] + requests = [] + if diff: + requests = self.get_create_aaa_request(diff) + if len(requests) > 0: + commands = update_states(diff, "merged") + return commands, requests + + def _state_deleted(self, want, have): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = [] + requests = [] + if not want: + if have: + requests = self.get_delete_all_aaa_request(have) + if len(requests) > 0: + commands = update_states(have, "deleted") + else: + want = utils.remove_empties(want) + new_have = self.remove_default_entries(have) + d_diff = get_diff(want, new_have, is_skeleton=True) + diff_want = get_diff(want, d_diff, is_skeleton=True) + if diff_want: + requests = self.get_delete_all_aaa_request(diff_want) + if len(requests) > 0: + commands = update_states(diff_want, "deleted") + return commands, requests + + def get_create_aaa_request(self, commands): + requests = [] + aaa_path = 'data/openconfig-system:system/aaa' + method = PATCH + aaa_payload = self.build_create_aaa_payload(commands) + if aaa_payload: + request = {'path': aaa_path, 'method': method, 'data': aaa_payload} + requests.append(request) + return requests + + def build_create_aaa_payload(self, commands): + payload = {} + if "authentication" in commands and commands["authentication"]: + payload = {"openconfig-system:aaa": {"authentication": {"config": {"authentication-method": []}}}} + if "local" in commands["authentication"]["data"] and commands["authentication"]["data"]["local"]: + payload['openconfig-system:aaa']['authentication']['config']['authentication-method'].append("local") + if "group" in commands["authentication"]["data"] and commands["authentication"]["data"]["group"]: + auth_method = commands["authentication"]["data"]["group"] + payload['openconfig-system:aaa']['authentication']['config']['authentication-method'].append(auth_method) + if "fail_through" in commands["authentication"]["data"]: + cfg = {'failthrough': str(commands["authentication"]["data"]["fail_through"])} + payload['openconfig-system:aaa']['authentication']['config'].update(cfg) + return payload + + def remove_default_entries(self, data): + new_data = {} + if not data: + return new_data + else: + new_data = {'authentication': {'data': {}}} + local = data['authentication']['data'].get('local', None) + if local is not None: + new_data["authentication"]["data"]["local"] = local + group = data['authentication']['data'].get('group', None) + if group is not None: + new_data["authentication"]["data"]["group"] = group + fail_through = data['authentication']['data'].get('fail_through', None) + if fail_through is not None: + new_data["authentication"]["data"]["fail_through"] = fail_through + return new_data + + def get_delete_all_aaa_request(self, have): + requests = [] + if "authentication" in have and have["authentication"]: + if "local" in have["authentication"]["data"] or "group" in have["authentication"]["data"]: + request = self.get_authentication_method_delete_request() + requests.append(request) + if "fail_through" in have["authentication"]["data"]: + request = self.get_failthrough_delete_request() + requests.append(request) + return requests + + def get_authentication_method_delete_request(self): + path = 'data/openconfig-system:system/aaa/authentication/config/authentication-method' + method = DELETE + request = {'path': path, 'method': method} + return request + + def get_failthrough_delete_request(self): + path = 'data/openconfig-system:system/aaa/authentication/config/failthrough' + method = DELETE + request = {'path': path, 'method': method} + return request diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py new file mode 100644 index 00000000..fd4d5c57 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py @@ -0,0 +1,598 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_bgp class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +try: + from urllib import quote +except ImportError: + from urllib.parse import quote + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, + search_obj_in_list, + remove_empties +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + dict_to_set, + update_states, + get_diff, + remove_empties_from_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +POST = 'post' +DELETE = 'delete' +PUT = 'put' + +TEST_KEYS = [{'config': {'vrf_name': '', 'bgp_as': ''}}] + + +class Bgp(ConfigBase): + """ + The sonic_bgp class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'bgp', + ] + + network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance' + protocol_bgp_path = 'protocols/protocol=BGP,bgp/bgp' + log_neighbor_changes_path = 'logging-options/config/log-neighbor-state-changes' + holdtime_path = 'config/hold-time' + keepalive_path = 'config/keepalive-interval' + + def __init__(self, module): + super(Bgp, self).__init__(module) + + def get_bgp_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + bgp_facts = facts['ansible_network_resources'].get('bgp') + if not bgp_facts: + bgp_facts = [] + return bgp_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_bgp_facts = self.get_bgp_facts() + commands, requests = self.set_config(existing_bgp_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_bgp_facts = self.get_bgp_facts() + + result['before'] = existing_bgp_facts + if result['changed']: + result['after'] = changed_bgp_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_bgp_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_bgp_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_bgp_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + is_delete_all = False + # if want is none, then delete all the bgps + if not want: + commands = have + is_delete_all = True + else: + commands = want + + requests = self.get_delete_bgp_requests(commands, have, is_delete_all) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_delete_single_bgp_request(self, vrf_name): + delete_path = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + return ({'path': delete_path, 'method': DELETE}) + + def get_delete_max_med_requests(self, vrf_name, max_med, match): + requests = [] + + match_max_med = match.get('max_med', None) + if not max_med or not match_max_med: + return requests + + generic_del_path = '%s=%s/%s/global/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + + match_max_med_on_startup = match.get('max_med', {}).get('on_startup') + if match_max_med_on_startup: + requests.append({'path': generic_del_path + "max-med/config/time", 'method': DELETE}) + requests.append({'path': generic_del_path + "max-med/config/max-med-val", 'method': DELETE}) + + return requests + + def get_delete_bestpath_requests(self, vrf_name, bestpath, match): + requests = [] + + match_bestpath = match.get('bestpath', None) + if not bestpath or not match_bestpath: + return requests + + route_selection_del_path = '%s=%s/%s/global/route-selection-options/config/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + multi_paths_del_path = '%s=%s/%s/global/use-multiple-paths/ebgp/config/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + generic_del_path = '%s=%s/%s/global/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + + if bestpath.get('compare_routerid', None) and match_bestpath.get('compare_routerid', None): + url = '%s=%s/%s/global/route-selection-options' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + route_selection_cfg = {} + route_selection_cfg['external-compare-router-id'] = False + payload = {'route-selection-options': {'config': route_selection_cfg}} + requests.append({'path': url, 'data': payload, 'method': PATCH}) + # requests.append({'path': route_selection_del_path + "external-compare-router-id", 'method': DELETE}) + + match_as_path = match_bestpath.get('as_path', None) + as_path = bestpath.get('as_path', None) + if as_path and match_as_path: + if as_path.get('confed', None) is not None and match_as_path.get('confed', None): + requests.append({'path': route_selection_del_path + "compare-confed-as-path", 'method': DELETE}) + if as_path.get('ignore', None) is not None and match_as_path.get('ignore', None): + requests.append({'path': route_selection_del_path + "ignore-as-path-length", 'method': DELETE}) + if as_path.get('multipath_relax', None) is not None and match_as_path.get('multipath_relax', None): + requests.append({'path': multi_paths_del_path + "allow-multiple-as", 'method': DELETE}) + if as_path.get('multipath_relax_as_set', None) is not None and match_as_path.get('multipath_relax_as_set', None): + requests.append({'path': multi_paths_del_path + "as-set", 'method': DELETE}) + + match_med = match_bestpath.get('med', None) + med = bestpath.get('med', None) + if med and match_med: + if med.get('confed', None) is not None and match_med.get('confed', None): + requests.append({'path': route_selection_del_path + "med-confed", 'method': DELETE}) + if med.get('missing_as_worst', None) is not None and match_med.get('missing_as_worst', None): + requests.append({'path': route_selection_del_path + "med-missing-as-worst", 'method': DELETE}) + if med.get('always_compare_med', None) is not None and match_med.get('always_compare_med', None): + requests.append({'path': route_selection_del_path + "always-compare-med", 'method': DELETE}) + if med.get('max_med_val', None) is not None and match_med.get('max_med_val', None): + requests.append({'path': generic_del_path + "max-med/config/admin-max-med-val", 'method': DELETE}) + + return requests + + def get_delete_all_bgp_requests(self, commands): + requests = [] + for cmd in commands: + requests.append(self.get_delete_single_bgp_request(cmd['vrf_name'])) + return requests + + def get_delete_specific_bgp_param_request(self, command, match): + vrf_name = command['vrf_name'] + requests = [] + + router_id = command.get('router_id', None) + timers = command.get('timers', None) + holdtime = None + keepalive = None + if timers: + holdtime = command['timers'].get('holdtime', None) + keepalive = command['timers'].get('keepalive_interval', None) + log_neighbor_changes = command.get('log_neighbor_changes', None) + bestpath = command.get('bestpath', None) + + if router_id and match.get('router_id', None): + url = '%s=%s/%s/global/config/router-id' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + requests.append({"path": url, "method": DELETE}) + + if holdtime and match['timers'].get('holdtime', None) != 180: + url = '%s=%s/%s/global/config/hold-time' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + requests.append({"path": url, "method": DELETE}) + + if keepalive and match['timers'].get('keepalive_interval', None) != 60: + url = '%s=%s/%s/global/config/keepalive-interval' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + requests.append({"path": url, "method": DELETE}) + + # Delete the log_neighbor_changes only when existing values is True. + if log_neighbor_changes is not None and match.get('log_neighbor_changes', None): + del_log_neighbor_req = self.get_modify_log_change_request(vrf_name, False) + if del_log_neighbor_req: + requests.append(del_log_neighbor_req) + + bestpath_del_reqs = self.get_delete_bestpath_requests(vrf_name, bestpath, match) + if bestpath_del_reqs: + requests.extend(bestpath_del_reqs) + + max_med = command.get('max_med', None) + max_med_del_reqs = self.get_delete_max_med_requests(vrf_name, max_med, match) + if max_med_del_reqs: + requests.extend(max_med_del_reqs) + + return requests + + def get_delete_bgp_requests(self, commands, have, is_delete_all): + requests = [] + if is_delete_all: + requests = self.get_delete_all_bgp_requests(commands) + else: + for cmd in commands: + vrf_name = cmd['vrf_name'] + as_val = cmd['bgp_as'] + + match = next((cfg for cfg in have if cfg['vrf_name'] == vrf_name and cfg['bgp_as'] == as_val), None) + if not match: + continue + # if there is specific parameters to delete then delete those alone + if cmd.get('router_id', None) or cmd.get('log_neighbor_changes', None) or cmd.get('bestpath', None): + requests.extend(self.get_delete_specific_bgp_param_request(cmd, match)) + else: + # delete entire bgp + requests.append(self.get_delete_single_bgp_request(vrf_name)) + + if requests: + # reorder the requests to get default vrfs at end of the requests. so deletion will get success + default_vrf_reqs = [] + other_vrf_reqs = [] + for req in requests: + if '=default/' in req['path']: + default_vrf_reqs.append(req) + else: + other_vrf_reqs.append(req) + requests.clear() + requests.extend(other_vrf_reqs) + requests.extend(default_vrf_reqs) + + return requests + + def get_modify_multi_paths_req(self, vrf_name, as_path): + request = None + if not as_path: + return request + + method = PATCH + multipath_cfg = {} + + as_path_multipath_relax = as_path.get('multipath_relax', None) + as_path_multipath_relax_as_set = as_path.get('multipath_relax_as_set', None) + + if as_path_multipath_relax is not None: + multipath_cfg['allow-multiple-as'] = as_path_multipath_relax + if as_path_multipath_relax_as_set is not None: + multipath_cfg['as-set'] = as_path_multipath_relax_as_set + + payload = {"openconfig-network-instance:config": multipath_cfg} + if payload: + url = '%s=%s/%s/global/use-multiple-paths/ebgp/config' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + request = {"path": url, "method": method, "data": payload} + + return request + + def get_modify_route_selection_req(self, vrf_name, compare_routerid, as_path, med): + requests = [] + if compare_routerid is None and not as_path and not med: + return requests + + route_selection_cfg = {} + + as_path_confed = None + as_path_ignore = None + + med_confed = None + med_missing_as_worst = None + always_compare_med = None + + if compare_routerid is not None: + route_selection_cfg['external-compare-router-id'] = compare_routerid + + if as_path: + as_path_confed = as_path.get('confed', None) + as_path_ignore = as_path.get('ignore', None) + if as_path_confed is not None: + route_selection_cfg['compare-confed-as-path'] = as_path_confed + if as_path_ignore is not None: + route_selection_cfg['ignore-as-path-length'] = as_path_ignore + + if med: + med_confed = med.get('confed', None) + med_missing_as_worst = med.get('missing_as_worst', None) + always_compare_med = med.get('always_compare_med', None) + if med_confed is not None: + route_selection_cfg['med-confed'] = med_confed + if med_missing_as_worst is not None: + route_selection_cfg['med-missing-as-worst'] = med_missing_as_worst + if always_compare_med is not None: + route_selection_cfg['always-compare-med'] = always_compare_med + method = PATCH + payload = {'route-selection-options': {'config': route_selection_cfg}} + + if payload: + url = '%s=%s/%s/global/route-selection-options' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def get_modify_bestpath_requests(self, vrf_name, bestpath): + requests = [] + if not bestpath: + return requests + + compare_routerid = bestpath.get('compare_routerid', None) + as_path = bestpath.get('as_path', None) + med = bestpath.get('med', None) + + route_selection_req = self.get_modify_route_selection_req(vrf_name, compare_routerid, as_path, med) + if route_selection_req: + requests.extend(route_selection_req) + + multi_paths_req = self.get_modify_multi_paths_req(vrf_name, as_path) + if multi_paths_req: + requests.append(multi_paths_req) + + return requests + + def get_modify_max_med_requests(self, vrf_name, max_med): + request = None + method = PATCH + payload = {} + on_startup_time = max_med.get('on_startup', {}).get('timer') + on_startup_med = max_med.get('on_startup', {}).get('med_val') + + if on_startup_med is not None: + payload = { + 'max-med': { + 'config': { + 'max-med-val': on_startup_med, + 'time': on_startup_time + } + } + } + + if payload: + url = '%s=%s/%s/global/max-med' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + request = {"path": url, "method": method, "data": payload} + + return [request] + + def get_modify_log_change_request(self, vrf_name, log_neighbor_changes): + request = None + method = PATCH + payload = {} + + if log_neighbor_changes is not None: + payload['log-neighbor-state-changes'] = log_neighbor_changes + + if payload: + url = '%s=%s/%s/global/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.log_neighbor_changes_path) + request = {"path": url, "method": method, "data": payload} + + return request + + def get_modify_holdtime_request(self, vrf_name, holdtime): + request = None + method = PATCH + payload = {} + + if holdtime is not None: + payload['hold-time'] = str(holdtime) + + if payload: + url = '%s=%s/%s/global/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.holdtime_path) + request = {"path": url, "method": method, "data": payload} + + return request + + def get_modify_keepalive_request(self, vrf_name, keepalive_interval): + request = None + method = PATCH + payload = {} + + if keepalive_interval is not None: + payload['keepalive-interval'] = str(keepalive_interval) + + if payload: + url = '%s=%s/%s/global/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.keepalive_path) + request = {"path": url, "method": method, "data": payload} + + return request + + def get_new_bgp_request(self, vrf_name, as_val): + request = None + url = None + method = PATCH + payload = {} + + cfg = {} + if as_val: + as_cfg = {'config': {'as': float(as_val)}} + global_cfg = {'global': as_cfg} + cfg = {'bgp': global_cfg} + cfg['name'] = "bgp" + cfg['identifier'] = "openconfig-policy-types:BGP" + + if cfg: + payload['openconfig-network-instance:protocol'] = [cfg] + url = '%s=%s/protocols/protocol/' % (self.network_instance_path, vrf_name) + request = {"path": url, "method": method, "data": payload} + + return request + + def get_modify_global_config_request(self, vrf_name, router_id, as_val): + request = None + method = PATCH + payload = {} + + cfg = {} + if router_id: + cfg['router-id'] = router_id + if as_val: + cfg['as'] = float(as_val) + + if cfg: + payload['openconfig-network-instance:config'] = cfg + url = '%s=%s/%s/global/config' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + request = {"path": url, "method": method, "data": payload} + + return request + + def get_modify_bgp_requests(self, commands, have): + requests = [] + if not commands: + return requests + + # Create URL and payload + for conf in commands: + vrf_name = conf['vrf_name'] + as_val = None + router_id = None + log_neighbor_changes = None + bestpath = None + max_med = None + holdtime = None + keepalive_interval = None + + if 'bgp_as' in conf: + as_val = conf['bgp_as'] + if 'router_id' in conf: + router_id = conf['router_id'] + if 'log_neighbor_changes' in conf: + log_neighbor_changes = conf['log_neighbor_changes'] + if 'bestpath' in conf: + bestpath = conf['bestpath'] + if 'max_med' in conf: + max_med = conf['max_med'] + if 'timers' in conf and conf['timers']: + if 'holdtime' in conf['timers']: + holdtime = conf['timers']['holdtime'] + if 'keepalive_interval' in conf['timers']: + keepalive_interval = conf['timers']['keepalive_interval'] + + if not any(cfg for cfg in have if cfg['vrf_name'] == vrf_name and (cfg['bgp_as'] == as_val)): + new_bgp_req = self.get_new_bgp_request(vrf_name, as_val) + if new_bgp_req: + requests.append(new_bgp_req) + + global_req = self.get_modify_global_config_request(vrf_name, router_id, as_val) + if global_req: + requests.append(global_req) + + log_neighbor_changes_req = self.get_modify_log_change_request(vrf_name, log_neighbor_changes) + if log_neighbor_changes_req: + requests.append(log_neighbor_changes_req) + + if holdtime: + holdtime_req = self.get_modify_holdtime_request(vrf_name, holdtime) + if holdtime_req: + requests.append(holdtime_req) + + if keepalive_interval: + keepalive_req = self.get_modify_keepalive_request(vrf_name, keepalive_interval) + if keepalive_req: + requests.append(keepalive_req) + + bestpath_reqs = self.get_modify_bestpath_requests(vrf_name, bestpath) + if bestpath_reqs: + requests.extend(bestpath_reqs) + if max_med: + max_med_reqs = self.get_modify_max_med_requests(vrf_name, max_med) + if max_med_reqs: + requests.extend(max_med_reqs) + + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py new file mode 100644 index 00000000..2a5c4cfc --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py @@ -0,0 +1,848 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_bgp_af class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +try: + from urllib import quote +except ImportError: + from urllib.parse import quote + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, + search_obj_in_list, + remove_empties +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + dict_to_set, + update_states, + get_diff, + remove_empties_from_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + validate_bgps, +) +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +DELETE = 'delete' +TEST_KEYS = [ + {'config': {'vrf_name': '', 'bgp_as': ''}}, + {'afis': {'afi': '', 'safi': ''}}, + {'redistribute': {'protocol': ''}}, + {'route_advertise_list': {'advertise_afi': ''}} +] + + +class Bgp_af(ConfigBase): + """ + The sonic_bgp_af class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'bgp_af', + ] + + network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance' + protocol_bgp_path = 'protocols/protocol=BGP,bgp/bgp' + l2vpn_evpn_config_path = 'l2vpn-evpn/openconfig-bgp-evpn-ext:config' + l2vpn_evpn_route_advertise_path = 'l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise' + afi_safi_path = 'global/afi-safis/afi-safi' + table_connection_path = 'table-connections/table-connection' + + def __init__(self, module): + super(Bgp_af, self).__init__(module) + + def get_bgp_af_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + bgp_af_facts = facts['ansible_network_resources'].get('bgp_af') + if not bgp_af_facts: + bgp_af_facts = [] + return bgp_af_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_bgp_af_facts = self.get_bgp_af_facts() + commands, requests = self.set_config(existing_bgp_af_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_bgp_af_facts = self.get_bgp_af_facts() + + result['before'] = existing_bgp_af_facts + if result['changed']: + result['after'] = changed_bgp_af_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_bgp_af_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_bgp_af_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + validate_bgps(self._module, commands, have) + requests = self.get_modify_bgp_af_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the bgp_afs + is_delete_all = False + if not want: + commands = have + is_delete_all = True + else: + commands = want + + requests = self.get_delete_bgp_af_requests(commands, have, is_delete_all) + requests.extend(self.get_delete_route_advertise_requests(commands, have, is_delete_all)) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + return commands, requests + + def get_modify_address_family_request(self, vrf_name, conf_afi, conf_safi): + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s/global' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + afi_safi_load = {'afi-safi-name': ("openconfig-bgp-types:%s" % (afi_safi))} + afi_safis_load = {'afi-safis': {'afi-safi': [afi_safi_load]}} + pay_load = {'openconfig-network-instance:global': afi_safis_load} + + return ({"path": url, "method": PATCH, "data": pay_load}) + + def get_modify_advertise_request(self, vrf_name, conf_afi, conf_safi, conf_addr_fam): + request = None + conf_adv_pip = conf_addr_fam.get('advertise_pip', None) + conf_adv_pip_ip = conf_addr_fam.get('advertise_pip_ip', None) + conf_adv_pip_peer_ip = conf_addr_fam.get('advertise_pip_peer_ip', None) + conf_adv_svi_ip = conf_addr_fam.get('advertise_svi_ip', None) + conf_adv_all_vni = conf_addr_fam.get('advertise_all_vni', None) + conf_adv_default_gw = conf_addr_fam.get('advertise_default_gw', None) + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + evpn_cfg = {} + + if conf_adv_pip: + evpn_cfg['advertise-pip'] = conf_adv_pip + + if conf_adv_pip_ip: + evpn_cfg['advertise-pip-ip'] = conf_adv_pip_ip + + if conf_adv_pip_peer_ip: + evpn_cfg['advertise-pip-peer-ip'] = conf_adv_pip_peer_ip + + if conf_adv_svi_ip: + evpn_cfg['advertise-svi-ip'] = conf_adv_svi_ip + + if conf_adv_all_vni: + evpn_cfg['advertise-all-vni'] = conf_adv_all_vni + + if conf_adv_default_gw: + evpn_cfg['advertise-default-gw'] = conf_adv_default_gw + + if evpn_cfg: + url = '%s=%s/%s/global' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + afi_safi_load = {'afi-safi-name': ("openconfig-bgp-types:%s" % (afi_safi))} + afi_safi_load['l2vpn-evpn'] = {'openconfig-bgp-evpn-ext:config': evpn_cfg} + afi_safis_load = {'afi-safis': {'afi-safi': [afi_safi_load]}} + pay_load = {'openconfig-network-instance:global': afi_safis_load} + request = {"path": url, "method": PATCH, "data": pay_load} + + return request + + def get_modify_route_advertise_list_request(self, vrf_name, conf_afi, conf_safi, conf_addr_fam): + request = [] + route_advertise = [] + afi_safi = ('%s_%s' % (conf_afi, conf_safi)).upper() + route_advertise_list = conf_addr_fam.get('route_advertise_list', []) + if route_advertise_list: + for rt_adv in route_advertise_list: + advertise_afi = rt_adv.get('advertise_afi', None) + route_map = rt_adv.get('route_map', None) + if advertise_afi: + advertise_afi_safi = '%s_UNICAST' % advertise_afi.upper() + url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '/%s=%s/%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_route_advertise_path) + cfg = None + if route_map: + route_map_list = [route_map] + cfg = {'advertise-afi-safi': advertise_afi_safi, 'route-map': route_map_list} + else: + cfg = {'advertise-afi-safi': advertise_afi_safi} + route_advertise.append({'advertise-afi-safi': advertise_afi_safi, 'config': cfg}) + pay_load = {'openconfig-bgp-evpn-ext:route-advertise': {'route-advertise-list': route_advertise}} + request = {"path": url, "method": PATCH, "data": pay_load} + return request + + def get_modify_redistribute_requests(self, vrf_name, conf_afi, conf_safi, conf_redis_arr): + requests = [] + url = "%s=%s/table-connections" % (self.network_instance_path, vrf_name) + cfgs = [] + for conf_redis in conf_redis_arr: + conf_metric = conf_redis.get('metric', None) + if conf_metric is not None: + conf_metric = float(conf_redis['metric']) + + afi_cfg = "openconfig-types:%s" % (conf_afi.upper()) + cfg_data = {'address-family': afi_cfg} + cfg_data['dst-protocol'] = "openconfig-policy-types:BGP" + conf_protocol = conf_redis['protocol'].upper() + if conf_protocol == 'CONNECTED': + conf_protocol = "DIRECTLY_CONNECTED" + cfg_data['src-protocol'] = "openconfig-policy-types:%s" % (conf_protocol) + cfg_data['config'] = {'address-family': afi_cfg} + if conf_metric is not None: + cfg_data['config']['metric'] = conf_metric + + conf_route_map = conf_redis.get('route_map', None) + if conf_route_map: + cfg_data['config']['import-policy'] = [conf_route_map] + + cfgs.append(cfg_data) + + if cfgs: + pay_load = {'openconfig-network-instance:table-connections': {'table-connection': cfgs}} + requests.append({"path": url, "method": PATCH, "data": pay_load}) + return requests + + def get_modify_max_path_request(self, vrf_name, conf_afi, conf_safi, conf_max_path): + request = None + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '%s=%s/use-multiple-paths' % (self.afi_safi_path, afi_safi) + conf_ebgp = conf_max_path.get('ebgp', None) + conf_ibgp = conf_max_path.get('ibgp', None) + max_path_load = {} + if conf_ebgp: + max_path_load['ebgp'] = {'config': {'maximum-paths': conf_ebgp}} + if conf_ibgp: + max_path_load['ibgp'] = {'config': {'maximum-paths': conf_ibgp}} + + pay_load = {} + if max_path_load: + pay_load['openconfig-network-instance:use-multiple-paths'] = max_path_load + + request = {"path": url, "method": PATCH, "data": pay_load} + return request + + def get_modify_network_request(self, vrf_name, conf_afi, conf_safi, conf_network): + request = None + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '%s=%s/network-config' % (self.afi_safi_path, afi_safi) + network_payload = [] + for each in conf_network: + payload = {} + payload = {'config': {'prefix': each}, 'prefix': each} + network_payload.append(payload) + if network_payload: + new_payload = {'network-config': {'network': network_payload}} + + request = {"path": url, "method": PATCH, "data": new_payload} + return request + + def get_modify_dampening_request(self, vrf_name, conf_afi, conf_safi, conf_dampening): + request = None + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '%s=%s/route-flap-damping' % (self.afi_safi_path, afi_safi) + damp_payload = {'route-flap-damping': {'config': {'enabled': conf_dampening}}} + if damp_payload: + request = {"path": url, "method": PATCH, "data": damp_payload} + return request + + def get_modify_single_af_request(self, vrf_name, conf_afi, conf_safi, conf_addr_fam): + requests = [] + + requests.append(self.get_modify_address_family_request(vrf_name, conf_afi, conf_safi)) + if conf_afi == 'ipv4' and conf_safi == 'unicast': + conf_dampening = conf_addr_fam.get('dampening', None) + if conf_dampening: + request = self.get_modify_dampening_request(vrf_name, conf_afi, conf_safi, conf_dampening) + if request: + requests.append(request) + if conf_afi in ['ipv4', 'ipv6'] and conf_safi == 'unicast': + conf_redis_arr = conf_addr_fam.get('redistribute', []) + if conf_redis_arr: + requests.extend(self.get_modify_redistribute_requests(vrf_name, conf_afi, conf_safi, conf_redis_arr)) + conf_max_path = conf_addr_fam.get('max_path', None) + if conf_max_path: + request = self.get_modify_max_path_request(vrf_name, conf_afi, conf_safi, conf_max_path) + if request: + requests.append(request) + conf_network = conf_addr_fam.get('network', []) + if conf_network: + request = self.get_modify_network_request(vrf_name, conf_afi, conf_safi, conf_network) + if request: + requests.append(request) + elif conf_afi == "l2vpn" and conf_safi == 'evpn': + adv_req = self.get_modify_advertise_request(vrf_name, conf_afi, conf_safi, conf_addr_fam) + if adv_req: + requests.append(adv_req) + return requests + + def get_modify_all_af_requests(self, conf_addr_fams, vrf_name): + requests = [] + for conf_addr_fam in conf_addr_fams: + conf_afi = conf_addr_fam.get('afi', None) + conf_safi = conf_addr_fam.get('safi', None) + if conf_afi and conf_safi: + requests.extend(self.get_modify_single_af_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)) + return requests + + def get_modify_requests(self, conf, match, vrf_name): + requests = [] + payload = {} + conf_addr_fams = conf.get('address_family', None) + if conf_addr_fams: + conf_addr_fams = conf_addr_fams.get('afis', []) + + mat_addr_fams = [] + if match: + mat_addr_fams = match.get('address_family', None) + if mat_addr_fams: + mat_addr_fams = mat_addr_fams.get('afis', []) + + if conf_addr_fams and not mat_addr_fams: + requests.extend(self.get_modify_all_af_requests(conf_addr_fams, vrf_name)) + else: + for conf_addr_fam in conf_addr_fams: + conf_afi = conf_addr_fam.get('afi', None) + conf_safi = conf_addr_fam.get('safi', None) + + if conf_afi is None or conf_safi is None: + continue + + mat_addr_fam = next((e_addr_fam for e_addr_fam in mat_addr_fams if (e_addr_fam['afi'] == conf_afi and e_addr_fam['safi'] == conf_safi)), None) + + if mat_addr_fam is None: + requests.extend(self.get_modify_single_af_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)) + continue + + if conf_afi == 'ipv4' and conf_safi == 'unicast': + conf_dampening = conf_addr_fam.get('dampening', None) + if conf_dampening: + request = self.get_modify_dampening_request(vrf_name, conf_afi, conf_safi, conf_dampening) + if request: + requests.append(request) + + if conf_afi == "l2vpn" and conf_safi == "evpn": + adv_req = self.get_modify_advertise_request(vrf_name, conf_afi, conf_safi, conf_addr_fam) + rt_adv_req = self.get_modify_route_advertise_list_request(vrf_name, conf_afi, conf_safi, conf_addr_fam) + if adv_req: + requests.append(adv_req) + if rt_adv_req: + requests.append(rt_adv_req) + + elif conf_afi in ["ipv4", "ipv6"] and conf_safi == "unicast": + conf_redis_arr = conf_addr_fam.get('redistribute', []) + conf_max_path = conf_addr_fam.get('max_path', None) + conf_network = conf_addr_fam.get('network', []) + if not conf_redis_arr and not conf_max_path and not conf_network: + continue + + url = "%s=%s/table-connections" % (self.network_instance_path, vrf_name) + pay_loads = [] + modify_redis_arr = [] + for conf_redis in conf_redis_arr: + conf_metric = conf_redis.get('metric', None) + if conf_metric is not None: + conf_metric = float(conf_redis['metric']) + + conf_route_map = conf_redis.get('route_map', None) + + have_redis_arr = mat_addr_fam.get('redistribute', []) + have_redis = None + have_route_map = None + # Check the route_map, if existing route_map is different from required route_map, delete the existing route map + if conf_route_map and have_redis_arr: + have_redis = next((redis_cfg for redis_cfg in have_redis_arr if conf_redis['protocol'] == redis_cfg['protocol']), None) + if have_redis: + have_route_map = have_redis.get('route_map', None) + if have_route_map and have_route_map != conf_route_map: + requests.append(self.get_delete_route_map_request(vrf_name, conf_afi, have_redis, have_route_map)) + + modify_redis = {} + if conf_metric is not None: + modify_redis['metric'] = conf_metric + if conf_route_map: + modify_redis['route_map'] = conf_route_map + + if modify_redis: + modify_redis['protocol'] = conf_redis['protocol'] + modify_redis_arr.append(modify_redis) + + if modify_redis_arr: + requests.extend(self.get_modify_redistribute_requests(vrf_name, conf_afi, conf_safi, modify_redis_arr)) + if conf_max_path: + max_path_req = self.get_modify_max_path_request(vrf_name, conf_afi, conf_safi, conf_max_path) + if max_path_req: + requests.append(max_path_req) + + if conf_network: + network_req = self.get_modify_network_request(vrf_name, conf_afi, conf_safi, conf_network) + if network_req: + requests.append(network_req) + + return requests + + def get_modify_bgp_af_requests(self, commands, have): + requests = [] + if not commands: + return requests + + # Create URL and payload + for conf in commands: + vrf_name = conf['vrf_name'] + as_val = conf['bgp_as'] + + match = next((cfg for cfg in have if (cfg['vrf_name'] == vrf_name and (cfg['bgp_as'] == as_val))), None) + modify_reqs = self.get_modify_requests(conf, match, vrf_name) + if modify_reqs: + requests.extend(modify_reqs) + + return requests + + def get_delete_advertise_attribute_request(self, vrf_name, conf_afi, conf_safi, attr): + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '/%s=%s/%s/%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_config_path, attr) + + return ({"path": url, "method": DELETE}) + + def get_delete_route_advertise_request(self, vrf_name, conf_afi, conf_safi): + afi_safi = ('%s_%s' % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '/%s=%s/%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_route_advertise_path) + + return ({'path': url, 'method': DELETE}) + + def get_delete_route_advertise_list_request(self, vrf_name, conf_afi, conf_safi, advertise_afi): + afi_safi = ('%s_%s' % (conf_afi, conf_safi)).upper() + advertise_afi_safi = '%s_UNICAST' % advertise_afi.upper() + url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '/%s=%s/%s/route-advertise-list=%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_route_advertise_path, advertise_afi_safi) + + return ({'path': url, 'method': DELETE}) + + def get_delete_route_advertise_route_map_request(self, vrf_name, conf_afi, conf_safi, advertise_afi, route_map): + afi_safi = ('%s_%s' % (conf_afi, conf_safi)).upper() + advertise_afi_safi = '%s_UNICAST' % advertise_afi.upper() + url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '/%s=%s/%s/route-advertise-list=%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_route_advertise_path, advertise_afi_safi) + url += '/config/route-map=%s' % route_map + + return ({'path': url, 'method': DELETE}) + + def get_delete_route_advertise_requests(self, commands, have, is_delete_all): + requests = [] + if not is_delete_all: + for cmd in commands: + vrf_name = cmd['vrf_name'] + addr_fams = cmd.get('address_family', None) + if addr_fams: + addr_fams = addr_fams.get('afis', []) + if not addr_fams: + return requests + for addr_fam in addr_fams: + afi = addr_fam.get('afi', None) + safi = addr_fam.get('safi', None) + route_advertise_list = addr_fam.get('route_advertise_list', []) + if route_advertise_list: + for rt_adv in route_advertise_list: + advertise_afi = rt_adv.get('advertise_afi', None) + route_map = rt_adv.get('route_map', None) + # Check if the commands to be deleted are configured + for conf in have: + conf_vrf_name = conf['vrf_name'] + conf_addr_fams = conf.get('address_family', None) + if conf_addr_fams: + conf_addr_fams = conf_addr_fams.get('afis', []) + for conf_addr_fam in conf_addr_fams: + conf_afi = conf_addr_fam.get('afi', None) + conf_safi = conf_addr_fam.get('safi', None) + conf_route_advertise_list = conf_addr_fam.get('route_advertise_list', []) + if conf_route_advertise_list: + for conf_rt_adv in conf_route_advertise_list: + conf_advertise_afi = conf_rt_adv.get('advertise_afi', None) + conf_route_map = conf_rt_adv.get('route_map', None) + # Deletion at route-advertise level + if (not advertise_afi and vrf_name == conf_vrf_name and afi == conf_afi and safi == conf_safi): + requests.append(self.get_delete_route_advertise_request(vrf_name, afi, safi)) + # Deletion at advertise-afi-safi level + if (advertise_afi and not route_map and vrf_name == conf_vrf_name and afi == conf_afi and safi == + conf_safi and advertise_afi == conf_advertise_afi): + requests.append(self.get_delete_route_advertise_list_request(vrf_name, afi, safi, advertise_afi)) + # Deletion at route-map level + if (route_map and vrf_name == conf_vrf_name and afi == conf_afi and safi == conf_safi + and advertise_afi == conf_advertise_afi and route_map == conf_route_map): + requests.append(self.get_delete_route_advertise_route_map_request(vrf_name, afi, safi, + advertise_afi, route_map)) + + return requests + + def get_delete_dampening_request(self, vrf_name, conf_afi, conf_safi): + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '/%s=%s/route-flap-damping/config/enabled' % (self.afi_safi_path, afi_safi) + + return ({"path": url, "method": DELETE}) + + def get_delete_address_family_request(self, vrf_name, conf_afi, conf_safi): + request = None + + if conf_afi != "l2vpn": + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '/%s=openconfig-bgp-types:%s' % (self.afi_safi_path, afi_safi) + request = {"path": url, "method": DELETE} + + return request + + def get_delete_single_bgp_af_request(self, conf, is_delete_all, match=None): + requests = [] + vrf_name = conf['vrf_name'] + + conf_addr_fams = conf.get('address_family', None) + if conf_addr_fams is None: + return requests + + conf_addr_fams = conf_addr_fams.get('afis', []) + + if match and not conf_addr_fams: + conf_addr_fams = match.get('address_family', None) + if conf_addr_fams: + conf_addr_fams = conf_addr_fams.get('afis', []) + conf_addr_fams = [{'afi': af['afi'], 'safi': af['safi']} for af in conf_addr_fams] + + if not conf_addr_fams: + return requests + + for conf_addr_fam in conf_addr_fams: + conf_afi = conf_addr_fam.get('afi', None) + conf_safi = conf_addr_fam.get('safi', None) + if not conf_afi or not conf_safi: + continue + conf_redis_arr = conf_addr_fam.get('redistribute', []) + conf_adv_pip = conf_addr_fam.get('advertise_pip', None) + conf_adv_pip_ip = conf_addr_fam.get('advertise_pip_ip', None) + conf_adv_pip_peer_ip = conf_addr_fam.get('advertise_pip_peer_ip', None) + conf_adv_svi_ip = conf_addr_fam.get('advertise_svi_ip', None) + conf_adv_all_vni = conf_addr_fam.get('advertise_all_vni', None) + conf_adv_default_gw = conf_addr_fam.get('advertise_default_gw', None) + conf_max_path = conf_addr_fam.get('max_path', None) + conf_dampening = conf_addr_fam.get('dampening', None) + conf_network = conf_addr_fam.get('network', []) + if is_delete_all: + if conf_adv_pip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip')) + if conf_adv_pip_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-ip')) + if conf_adv_pip_peer_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-peer-ip')) + if conf_adv_svi_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-svi-ip')) + if conf_adv_all_vni: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-all-vni')) + if conf_dampening: + requests.append(self.get_delete_dampening_request(vrf_name, conf_afi, conf_safi)) + if conf_network: + requests.extend(self.get_delete_network_request(vrf_name, conf_afi, conf_safi, conf_network, is_delete_all, None)) + if conf_adv_default_gw: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-default-gw')) + if conf_redis_arr: + requests.extend(self.get_delete_redistribute_requests(vrf_name, conf_afi, conf_safi, conf_redis_arr, is_delete_all, None)) + if conf_max_path: + requests.extend(self.get_delete_max_path_requests(vrf_name, conf_afi, conf_safi, conf_max_path, is_delete_all, None)) + addr_family_del_req = self.get_delete_address_family_request(vrf_name, conf_afi, conf_safi) + if addr_family_del_req: + requests.append(addr_family_del_req) + elif match: + match_addr_fams = match.get('address_family', None) + if match_addr_fams: + match_addr_fams = match_addr_fams.get('afis', []) + if not match_addr_fams: + continue + for match_addr_fam in match_addr_fams: + mat_afi = match_addr_fam.get('afi', None) + mat_safi = match_addr_fam.get('safi', None) + if mat_afi and mat_safi and mat_afi == conf_afi and mat_safi == conf_safi: + mat_advt_pip = match_addr_fam.get('advertise_pip', None) + mat_advt_pip_ip = match_addr_fam.get('advertise_pip_ip', None) + mat_advt_pip_peer_ip = match_addr_fam.get('advertise_pip_peer_ip', None) + mat_advt_svi_ip = match_addr_fam.get('advertise_svi_ip', None) + mat_advt_all_vni = match_addr_fam.get('advertise_all_vni', None) + mat_redis_arr = match_addr_fam.get('redistribute', []) + mat_advt_defaut_gw = match_addr_fam.get('advertise_default_gw', None) + mat_max_path = match_addr_fam.get('max_path', None) + mat_dampening = match_addr_fam.get('dampening', None) + mat_network = match_addr_fam.get('network', []) + + if (conf_adv_pip is None and conf_adv_pip_ip is None and conf_adv_pip_peer_ip is None and conf_adv_svi_ip is None + and conf_adv_all_vni is None and not conf_redis_arr and conf_adv_default_gw is None + and not conf_max_path and conf_dampening is None and not conf_network): + if mat_advt_pip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip')) + if mat_advt_pip_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-ip')) + if mat_advt_pip_peer_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-peer-ip')) + if mat_advt_svi_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-svi-ip')) + if mat_advt_all_vni is not None: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-all-vni')) + if mat_dampening is not None: + requests.append(self.get_delete_dampening_request(vrf_name, conf_afi, conf_safi)) + if mat_advt_defaut_gw: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-default-gw')) + if mat_redis_arr: + requests.extend(self.get_delete_redistribute_requests(vrf_name, conf_afi, conf_safi, mat_redis_arr, False, mat_redis_arr)) + if mat_max_path: + requests.extend(self.get_delete_max_path_requests(vrf_name, conf_afi, conf_safi, mat_max_path, is_delete_all, mat_max_path)) + if mat_network: + requests.extend(self.get_delete_network_request(vrf_name, conf_afi, conf_safi, mat_network, False, mat_network)) + addr_family_del_req = self.get_delete_address_family_request(vrf_name, conf_afi, conf_safi) + if addr_family_del_req: + requests.append(addr_family_del_req) + else: + if conf_adv_pip and mat_advt_pip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip')) + if conf_adv_pip_ip and mat_advt_pip_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-ip')) + if conf_adv_pip_peer_ip and mat_advt_pip_peer_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-peer-ip')) + if conf_adv_svi_ip and mat_advt_svi_ip: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-svi-ip')) + if conf_adv_all_vni and mat_advt_all_vni: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-all-vni')) + if conf_dampening and mat_dampening: + requests.append(self.get_delete_dampening_request(vrf_name, conf_afi, conf_safi)) + if conf_adv_default_gw and mat_advt_defaut_gw: + requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-default-gw')) + if conf_redis_arr and mat_redis_arr: + requests.extend(self.get_delete_redistribute_requests(vrf_name, conf_afi, conf_safi, conf_redis_arr, False, mat_redis_arr)) + if conf_max_path and mat_max_path: + requests.extend(self.get_delete_max_path_requests(vrf_name, conf_afi, conf_safi, conf_max_path, is_delete_all, mat_max_path)) + if conf_network and mat_network: + requests.extend(self.get_delete_network_request(vrf_name, conf_afi, conf_safi, conf_network, False, mat_network)) + break + + return requests + + def get_delete_network_request(self, vrf_name, conf_afi, conf_safi, conf_network, is_delete_all, mat_network): + requests = [] + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '%s=%s/network-config/network=' % (self.afi_safi_path, afi_safi) + mat_list = [] + for conf in conf_network: + if mat_network: + mat_prefix = next((pre for pre in mat_network if pre == conf), None) + if mat_prefix: + mat_list.append(mat_prefix) + if not is_delete_all and mat_list: + for each in mat_list: + tmp = each.replace('/', '%2f') + requests.append({'path': url + tmp, 'method': DELETE}) + elif is_delete_all: + for each in conf_network: + tmp = each.replace('/', '%2f') + requests.append({'path': url + tmp, 'method': DELETE}) + return requests + + def get_delete_max_path_requests(self, vrf_name, conf_afi, conf_safi, conf_max_path, is_delete_all, mat_max_path): + requests = [] + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + url += '%s=%s/use-multiple-paths/' % (self.afi_safi_path, afi_safi) + + conf_ebgp = conf_max_path.get('ebgp', None) + conf_ibgp = conf_max_path.get('ibgp', None) + mat_ebgp = None + mat_ibgp = None + if mat_max_path: + mat_ebgp = mat_max_path.get('ebgp', None) + mat_ibgp = mat_max_path.get('ibgp', None) + + if (conf_ebgp and mat_ebgp) or is_delete_all: + requests.append({'path': url + 'ebgp', 'method': DELETE}) + if (conf_ibgp and mat_ibgp) or is_delete_all: + requests.append({'path': url + 'ibgp', 'method': DELETE}) + + return requests + + def get_delete_route_map_request(self, vrf_name, conf_afi, conf_redis, conf_route_map): + addr_family = "openconfig-types:%s" % (conf_afi.upper()) + conf_protocol = conf_redis['protocol'].upper() + if conf_protocol == 'CONNECTED': + conf_protocol = "DIRECTLY_CONNECTED" + src_protocol = "openconfig-policy-types:%s" % (conf_protocol) + dst_protocol = "openconfig-policy-types:BGP" + url = '%s=%s/%s=' % (self.network_instance_path, vrf_name, self.table_connection_path) + url += '%s,%s,%s/config/import-policy=%s' % (src_protocol, dst_protocol, addr_family, conf_route_map) + return ({'path': url, 'method': DELETE}) + + def get_delete_redistribute_requests(self, vrf_name, conf_afi, conf_safi, conf_redis_arr, is_delete_all, mat_redis_arr): + requests = [] + for conf_redis in conf_redis_arr: + addr_family = "openconfig-types:%s" % (conf_afi.upper()) + conf_protocol = conf_redis['protocol'].upper() + + ext_metric_flag = False + ext_route_flag = False + mat_redis = None + mat_metric = None + mat_route_map = None + if not is_delete_all: + mat_redis = next((redis_cfg for redis_cfg in mat_redis_arr if redis_cfg['protocol'].upper() == conf_protocol), None) + if mat_redis: + mat_metric = mat_redis.get('metric', None) + mat_route_map = mat_redis.get('route_map', None) + if mat_metric: + ext_metric_flag = True + if mat_route_map: + ext_route_flag = True + + if conf_protocol == 'CONNECTED': + conf_protocol = "DIRECTLY_CONNECTED" + + src_protocol = "openconfig-policy-types:%s" % (conf_protocol) + dst_protocol = "openconfig-policy-types:BGP" + + conf_route_map = conf_redis.get('route_map', None) + conf_metric = conf_redis.get('metric', None) + if conf_metric is not None: + conf_metric = float(conf_redis['metric']) + + url = '%s=%s/%s=' % (self.network_instance_path, vrf_name, self.table_connection_path) + + new_metric_flag = conf_metric is not None + new_route_flag = conf_route_map is not None + is_delete_protocol = False + if is_delete_all: + is_delete_protocol = True + else: + is_delete_protocol = (new_metric_flag == ext_metric_flag) and (new_route_flag == ext_route_flag) + + if is_delete_protocol: + url += '%s,%s,%s' % (src_protocol, dst_protocol, addr_family) + requests.append({'path': url, 'method': DELETE}) + continue + + if new_metric_flag and ext_metric_flag: + url += '%s,%s,%s/config/metric' % (src_protocol, dst_protocol, addr_family) + requests.append({'path': url, 'method': DELETE}) + + if new_route_flag and ext_route_flag: + url += '%s,%s,%s/config/import-policy=%s' % (src_protocol, dst_protocol, addr_family, conf_route_map) + requests.append({'path': url, 'method': DELETE}) + + return requests + + def get_delete_bgp_af_requests(self, commands, have, is_delete_all): + requests = [] + for cmd in commands: + vrf_name = cmd['vrf_name'] + as_val = cmd['bgp_as'] + match_cfg = None + if not is_delete_all: + match_cfg = next((have_cfg for have_cfg in have if have_cfg['vrf_name'] == vrf_name and have_cfg['bgp_as'] == as_val), None) + requests.extend(self.get_delete_single_bgp_af_request(cmd, is_delete_all, match_cfg)) + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py new file mode 100644 index 00000000..dc2b023b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py @@ -0,0 +1,304 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_bgp_as_paths class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +try: + from urllib.parse import urlencode +except Exception: + from urllib import urlencode + + +class Bgp_as_paths(ConfigBase): + """ + The sonic_bgp_as_paths class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'bgp_as_paths', + ] + + def __init__(self, module): + super(Bgp_as_paths, self).__init__(module) + + def get_bgp_as_paths_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + bgp_as_paths_facts = facts['ansible_network_resources'].get('bgp_as_paths') + if not bgp_as_paths_facts: + return [] + return bgp_as_paths_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + + existing_bgp_as_paths_facts = self.get_bgp_as_paths_facts() + commands, requests = self.set_config(existing_bgp_as_paths_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_bgp_as_paths_facts = self.get_bgp_as_paths_facts() + + result['before'] = existing_bgp_as_paths_facts + if result['changed']: + result['after'] = changed_bgp_as_paths_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_bgp_as_paths_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_bgp_as_paths_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + for i in want: + if i.get('members'): + temp = [] + for j in i['members']: + temp.append(j.replace('\\\\', '\\')) + i['members'] = temp + diff = get_diff(want, have) + for i in want: + if i.get('members'): + temp = [] + for j in i['members']: + temp.append(j.replace('\\', '\\\\')) + i['members'] = temp + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + @staticmethod + def _state_replaced(**kwargs): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + return commands + + @staticmethod + def _state_overridden(**kwargs): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + return commands + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_as_path_list_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # To Delete a single member + # data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=xyz/config/as-path-set-member=11 + # This will delete the as path and its all members + # data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=xyz + # This will delete ALL as path completely + # data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets + + is_delete_all = False + # if want is none, then delete ALL + if not want: + commands = have + is_delete_all = True + else: + commands = want + + requests = self.get_delete_as_path_requests(commands, have, is_delete_all) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_new_add_request(self, conf): + request = None + members = conf.get('members', None) + permit = conf.get('permit', None) + permit_str = "" + if permit: + permit_str = "PERMIT" + else: + permit_str = "DENY" + if members: + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets" + method = "PATCH" + cfg = {'as-path-set-name': conf['name'], 'as-path-set-member': members, 'openconfig-bgp-policy-ext:action': permit_str} + as_path_set = {'as-path-set-name': conf['name'], 'config': cfg} + payload = {'openconfig-bgp-policy:as-path-sets': {'as-path-set': [as_path_set]}} + request = {"path": url, "method": method, "data": payload} + return request + + def get_delete_all_as_path_requests(self, commands): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets" + method = "DELETE" + requests = [] + if commands: + request = {"path": url, "method": method} + requests.append(request) + return requests + + def get_delete_single_as_path_member_requests(self, name, members): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:" + url = url + "bgp-defined-sets/as-path-sets/as-path-set={name}/config/{members_param}" + method = "DELETE" + members_params = {'as-path-set-member': ','.join(members)} + members_str = urlencode(members_params) + request = {"path": url.format(name=name, members_param=members_str), "method": method} + return request + + def get_delete_single_as_path_requests(self, name): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set={}" + method = "DELETE" + request = {"path": url.format(name), "method": method} + return request + + def get_delete_single_as_path_action_requests(self, name): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set={}" + url = url + "/openconfig-bgp-policy-ext:action" + method = "DELETE" + request = {"path": url.format(name), "method": method} + return request + + def get_delete_as_path_requests(self, commands, have, is_delete_all): + requests = [] + if is_delete_all: + requests = self.get_delete_all_as_path_requests(commands) + else: + for cmd in commands: + name = cmd['name'] + members = cmd['members'] + permit = cmd['permit'] + if members: + diff_members = [] + for item in have: + if item['name'] == name: + for member_want in cmd['members']: + if item['members']: + if str(member_want) in item['members']: + diff_members.append(member_want) + if diff_members: + requests.append(self.get_delete_single_as_path_member_requests(name, diff_members)) + + elif permit: + for item in have: + if item['name'] == name: + requests.append(self.get_delete_single_as_path_action_requests(name)) + else: + for item in have: + if item['name'] == name: + requests.append(self.get_delete_single_as_path_requests(name)) + + return requests + + def get_modify_as_path_list_requests(self, commands, have): + requests = [] + if not commands: + return requests + + for conf in commands: + new_req = self.get_new_add_request(conf) + if new_req: + requests.append(new_req) + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py new file mode 100644 index 00000000..670fb26d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py @@ -0,0 +1,368 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_bgp_communities class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError +import json +from ansible.module_utils._text import to_native +import traceback +try: + import jinja2 + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + +try: + from urllib.parse import urlencode +except Exception: + from urllib import urlencode + + +class Bgp_communities(ConfigBase): + """ + The sonic_bgp_communities class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'bgp_communities', + ] + + def __init__(self, module): + super(Bgp_communities, self).__init__(module) + + def get_bgp_communities_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + bgp_communities_facts = facts['ansible_network_resources'].get('bgp_communities') + if not bgp_communities_facts: + return [] + return bgp_communities_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + + existing_bgp_communities_facts = self.get_bgp_communities_facts() + commands, requests = self.set_config(existing_bgp_communities_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_bgp_communities_facts = self.get_bgp_communities_facts() + + result['before'] = existing_bgp_communities_facts + if result['changed']: + result['after'] = changed_bgp_communities_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_bgp_communities_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_bgp_communities_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + diff = get_diff(want, have) + # with open('/root/ansible_log.log', 'a+') as fp: + # fp.write('comm: want: ' + str(want) + '\n') + # fp.write('comm: have: ' + str(have) + '\n') + # fp.write('comm: diff: ' + str(diff) + '\n') + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + @staticmethod + def _state_replaced(**kwargs): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + return commands + + @staticmethod + def _state_overridden(**kwargs): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + return commands + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_bgp_community_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # Delete a community + # https://100.94.81.19/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=extest + # Delete all members but not community + # https://100.94.81.19/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=extest/config/community-member + # Dete a memeber from the expanded community + # https://100.94.81.19/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=extest/config/community-member=REGEX%3A100.100 + # Delete ALL Bgp_communities and its members + # https://100.94.81.19/restconf/data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets + is_delete_all = False + # if want is none, then delete ALL + if not want: + commands = have + is_delete_all = True + else: + commands = want + + requests = self.get_delete_bgp_communities(commands, have, is_delete_all) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_delete_single_bgp_community_member_requests(self, name, type, members): + requests = [] + for member in members: + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:" + url = url + "bgp-defined-sets/community-sets/community-set={name}/config/{members_param}" + method = "DELETE" + memberstr = member + if type == 'expanded': + memberstr = 'REGEX:' + member + members_params = {'community-member': memberstr} + members_str = urlencode(members_params) + request = {"path": url.format(name=name, members_param=members_str), "method": method} + requests.append(request) + return requests + + def get_delete_all_members_bgp_community_requests(self, name): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:" + url = url + "bgp-defined-sets/community-sets/community-set={}/config/community-member" + method = "DELETE" + request = {"path": url.format(name), "method": method} + return request + + def get_delete_single_bgp_community_requests(self, name): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set={}" + method = "DELETE" + request = {"path": url.format(name), "method": method} + return request + + def get_delete_all_bgp_communities(self, commands): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets" + method = "DELETE" + requests = [] + if commands: + request = {"path": url, "method": method} + requests.append(request) + return requests + + def get_delete_bgp_communities(self, commands, have, is_delete_all): + # with open('/root/ansible_log.log', 'a+') as fp: + # fp.write('bgp_commmunities: delete requests ************** \n') + requests = [] + if is_delete_all: + requests = self.get_delete_all_bgp_communities(commands) + else: + for cmd in commands: + name = cmd['name'] + type = cmd['type'] + members = cmd['members'] + if members: + if members['regex']: + diff_members = [] + for item in have: + if item['name'] == name and item['members']: + for member_want in members['regex']: + if str(member_want) in item['members']['regex']: + diff_members.append(member_want) + if diff_members: + requests.extend(self.get_delete_single_bgp_community_member_requests(name, type, diff_members)) + else: + for item in have: + if item['name'] == name: + if item['members']: + requests.append(self.get_delete_all_members_bgp_community_requests(name)) + else: + for item in have: + if item['name'] == name: + requests.append(self.get_delete_single_bgp_community_requests(name)) + + # with open('/root/ansible_log.log', 'a+') as fp: + # fp.write('bgp_commmunities: delete requests' + str(requests) + '\n') + return requests + + def get_new_add_request(self, conf): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets" + method = "PATCH" + # members = conf['members'] + # members_str = ', '.join(members) + # members_list = list() + # for member in members.split(','): + # members_list.append(str(member)) + + if 'match' not in conf: + conf['match'] = "ANY" + # with open('/root/ansible_log.log', 'a+') as fp: + # fp.write('bgp_communities: conf' + str(conf) + '\n') + if 'local_as' in conf and conf['local_as']: + conf['members']['regex'].append("NO_EXPORT_SUBCONFED") + if 'no_peer' in conf and conf['no_peer']: + conf['members']['regex'].append("NOPEER") + if 'no_export' in conf and conf['no_export']: + conf['members']['regex'].append("NO_EXPORT") + if 'no_advertise' in conf and conf['no_advertise']: + conf['members']['regex'].append("NO_ADVERTISE") + input_data = {'name': conf['name'], 'members_list': conf['members']['regex'], 'match': conf['match']} + if conf['type'] == 'expanded': + input_data['regex'] = "REGEX:" + else: + input_data['regex'] = "" + if conf['permit']: + input_data['permit'] = "PERMIT" + else: + input_data['permit'] = "DENY" + payload_template = """ + { + "openconfig-bgp-policy:community-sets": { + "community-set": [ + { + "community-set-name": "{{name}}", + "config": { + "community-set-name": "{{name}}", + "community-member": [ + {% for member in members_list %}"{{regex}}{{member}}"{%- if not loop.last -%},{% endif %}{%endfor%} + ], + "openconfig-bgp-policy-ext:action": "{{permit}}", + "match-set-options": "{{match}}" + } + } + ] + } + }""" + env = jinja2.Environment(autoescape=False) + t = env.from_string(payload_template) + intended_payload = t.render(input_data) + ret_payload = json.loads(intended_payload) + request = {"path": url, "method": method, "data": ret_payload} + # with open('/root/ansible_log.log', 'a+') as fp: + # fp.write('bgp_communities: request' + str(request) + '\n') + return request + + def get_modify_bgp_community_requests(self, commands, have): + requests = [] + if not commands: + return requests + + for conf in commands: + for item in have: + if item['name'] == conf['name']: + if 'type' not in conf: + conf['type'] = item['type'] + if 'permit' not in conf: + conf['permit'] = item['permit'] + if 'match' not in conf: + conf['match'] = item['match'] + if 'members' not in conf: + conf['members'] = item['members'] + new_req = self.get_new_add_request(conf) + if new_req: + requests.append(new_req) + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py new file mode 100644 index 00000000..751f88e4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py @@ -0,0 +1,371 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_bgp_ext_communities class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +import json +from ansible.module_utils._text import to_native +from ansible.module_utils.connection import ConnectionError +import traceback +try: + import jinja2 + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + +try: + from urllib.parse import urlencode +except Exception: + from urllib import urlencode + + +class Bgp_ext_communities(ConfigBase): + """ + The sonic_bgp_ext_communities class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'bgp_ext_communities', + ] + + def __init__(self, module): + super(Bgp_ext_communities, self).__init__(module) + + def get_bgp_ext_communities_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + bgp_ext_communities_facts = facts['ansible_network_resources'].get('bgp_ext_communities') + if not bgp_ext_communities_facts: + return [] + return bgp_ext_communities_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + + existing_bgp_ext_communities_facts = self.get_bgp_ext_communities_facts() + commands, requests = self.set_config(existing_bgp_ext_communities_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_bgp_ext_communities_facts = self.get_bgp_ext_communities_facts() + + result['before'] = existing_bgp_ext_communities_facts + if result['changed']: + result['after'] = changed_bgp_ext_communities_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_bgp_ext_communities_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_bgp_ext_communities_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + new_want = self.validate_type(want) + diff = get_diff(new_want, have) + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + @staticmethod + def _state_replaced(**kwargs): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + return commands + + @staticmethod + def _state_overridden(**kwargs): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + return commands + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_bgp_ext_community_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + is_delete_all = False + # if want is none, then delete ALL + if not want: + commands = have + is_delete_all = True + else: + commands = want + + requests = self.get_delete_bgp_ext_communities(commands, have, is_delete_all) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_delete_single_bgp_ext_community_member_requests(self, name, type, members): + requests = [] + for member in members: + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:" + url = url + "bgp-defined-sets/ext-community-sets/ext-community-set={name}/config/{members_param}" + method = "DELETE" + members_params = {'ext-community-member': member} + members_str = urlencode(members_params) + request = {"path": url.format(name=name, members_param=members_str), "method": method} + requests.append(request) + return requests + + def get_delete_all_members_bgp_ext_community_requests(self, name): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:" + url = url + "bgp-defined-sets/ext-community-sets/ext-community-set={}/config/ext-community-member" + method = "DELETE" + request = {"path": url.format(name), "method": method} + return request + + def get_delete_single_bgp_ext_community_requests(self, name): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set={}" + method = "DELETE" + request = {"path": url.format(name), "method": method} + return request + + def get_delete_all_bgp_ext_communities(self, commands): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets" + method = "DELETE" + requests = [] + if commands: + request = {"path": url, "method": method} + requests.append(request) + return requests + + def get_delete_bgp_ext_communities(self, commands, have, is_delete_all): + requests = [] + if is_delete_all: + requests = self.get_delete_all_bgp_ext_communities(commands) + else: + for cmd in commands: + name = cmd['name'] + type = cmd['type'] + members = cmd['members'] + if members: + if members['regex'] or members['route_origin'] or members['route_target']: + diff_members = [] + for item in have: + if item['name'] == name and item['members']: + if members['regex']: + for member_want in members['regex']: + if str(member_want) in item['members']['regex']: + diff_members.append('REGEX:' + str(member_want)) + if members['route_origin']: + for member_want in members['route_origin']: + if str(member_want) in item['members']['route_origin']: + diff_members.append("route-origin:" + str(member_want)) + if members['route_target']: + for member_want in members['route_target']: + if str(member_want) in item['members']['route_target']: + diff_members.append("route-target:" + str(member_want)) + if diff_members: + requests.extend(self.get_delete_single_bgp_ext_community_member_requests(name, type, diff_members)) + else: + for item in have: + if item['name'] == name: + if item['members']: + requests.append(self.get_delete_all_members_bgp_ext_community_requests(name)) + else: + for item in have: + if item['name'] == name: + requests.append(self.get_delete_single_bgp_ext_community_requests(name)) + + return requests + + def get_new_add_request(self, conf): + + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets" + method = "PATCH" + members = conf.get('members', None) + if 'match' not in conf: + conf['match'] = "ANY" + else: + conf['match'] = conf['match'].upper() + input_data = {'name': conf['name'], 'match': conf['match']} + + input_data['members_list'] = list() + if members: + regex = members.get('regex', None) + if regex: + input_data['members_list'].extend(["REGEX:" + cfg for cfg in regex]) + else: + route_target = members.get('route_target', None) + if route_target: + input_data['members_list'].extend(["route-target:" + cfg for cfg in route_target]) + route_origin = members.get('route_origin', None) + if route_origin: + input_data['members_list'].extend(["route-origin:" + cfg for cfg in route_origin]) + + if conf['type'] == 'expanded': + input_data['regex'] = "REGEX:" + else: + input_data['regex'] = "" + if conf['permit']: + input_data['permit'] = "PERMIT" + else: + input_data['permit'] = "DENY" + payload_template = """ + { + "openconfig-bgp-policy:ext-community-sets": { + "ext-community-set": [ + { + "ext-community-set-name": "{{name}}", + "config": { + "ext-community-set-name": "{{name}}", + "ext-community-member": [ + {% for member in members_list %}"{{member}}"{%- if not loop.last -%},{% endif %}{%endfor%} + ], + "openconfig-bgp-policy-ext:action": "{{permit}}", + "match-set-options": "{{match}}" + } + } + ] + } + }""" + env = jinja2.Environment(autoescape=False) + t = env.from_string(payload_template) + intended_payload = t.render(input_data) + ret_payload = json.loads(intended_payload) + request = {"path": url, "method": method, "data": ret_payload} + return request + + def get_modify_bgp_ext_community_requests(self, commands, have): + requests = [] + if not commands: + return requests + + for conf in commands: + for item in have: + if item['name'] == conf['name']: + if 'type' not in conf: + conf['type'] = item['type'] + if 'permit' not in conf: + conf['permit'] = item['permit'] + if 'match' not in conf: + conf['match'] = item['match'] + if 'members' not in conf: + conf['members'] = item['members'] + break + new_req = self.get_new_add_request(conf) + if new_req: + requests.append(new_req) + return requests + + def validate_type(self, want): + new_want = [] + if want: + for conf in want: + cfg = conf.copy() + cfg['type'] = 'standard' + members = conf.get('members', None) + if members and members.get('regex', None): + cfg['type'] = 'expanded' + + new_want.append(cfg) + return new_want diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py new file mode 100644 index 00000000..31bbec78 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py @@ -0,0 +1,1100 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_bgp_neighbors class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + validate_bgps, + normalize_neighbors_interface_name, + get_ip_afi_cfg_payload, + get_prefix_limit_payload +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +DELETE = 'delete' + +TEST_KEYS = [ + {'config': {'vrf_name': '', 'bgp_as': ''}}, + {'neighbors': {'neighbor': ''}}, + {'peer_group': {'name': ''}}, + {'afis': {'afi': '', 'safi': ''}}, +] + + +class Bgp_neighbors(ConfigBase): + """ + The sonic_bgp_neighbors class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'bgp_neighbors', + ] + + network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance' + protocol_bgp_path = 'protocols/protocol=BGP,bgp/bgp' + neighbor_path = 'neighbors/neighbor' + + def __init__(self, module): + super(Bgp_neighbors, self).__init__(module) + + def get_bgp_neighbors_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + bgp_facts = facts['ansible_network_resources'].get('bgp_neighbors') + if not bgp_facts: + bgp_facts = [] + return bgp_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_bgp_facts = self.get_bgp_neighbors_facts() + commands, requests = self.set_config(existing_bgp_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_bgp_facts = self.get_bgp_neighbors_facts() + + result['before'] = existing_bgp_facts + if result['changed']: + result['after'] = changed_bgp_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_bgp_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + normalize_neighbors_interface_name(want, self._module) + have = existing_bgp_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = [] + requests = [] + commands = diff + validate_bgps(self._module, commands, have) + requests = self.get_modify_bgp_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + is_delete_all = False + if not want: + is_delete_all = True + if is_delete_all: + commands = have + new_have = have + else: + new_have = self.remove_default_entries(have) + d_diff = get_diff(want, new_have, TEST_KEYS, is_skeleton=True) + delete_diff = get_diff(want, d_diff, TEST_KEYS, is_skeleton=True) + commands = delete_diff + requests = self.get_delete_bgp_neighbor_requests(commands, new_have, want, is_delete_all) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + return commands, requests + + def remove_default_entries(self, data): + new_data = [] + if not data: + return new_data + for conf in data: + new_conf = {} + as_val = conf['bgp_as'] + vrf_name = conf['vrf_name'] + new_conf['bgp_as'] = as_val + new_conf['vrf_name'] = vrf_name + peergroup = conf.get('peer_group', None) + new_peergroups = [] + if peergroup is not None: + for pg in peergroup: + new_pg = {} + pg_val = pg.get('name', None) + new_pg['name'] = pg_val + remote_as = pg.get('remote_as', None) + new_remote = {} + if remote_as: + peer_as = remote_as.get('peer_as', None) + peer_type = remote_as.get('peer_type', None) + if peer_as is not None: + new_remote['peer_as'] = peer_as + if peer_type is not None: + new_remote['peer_type'] = peer_type + if new_remote: + new_pg['remote_as'] = new_remote + timers = pg.get('timers', None) + new_timers = {} + if timers: + keepalive = timers.get('keepalive', None) + holdtime = timers.get('holdtime', None) + connect_retry = timers.get('connect_retry', None) + if keepalive is not None and keepalive != 60: + new_timers['keepalive'] = keepalive + if holdtime is not None and holdtime != 180: + new_timers['holdtime'] = holdtime + if connect_retry is not None and connect_retry != 30: + new_timers['connect_retry'] = connect_retry + if new_timers: + new_pg['timers'] = new_timers + advertisement_interval = pg.get('advertisement_interval', None) + if advertisement_interval is not None and advertisement_interval != 30: + new_pg['advertisement_interval'] = advertisement_interval + bfd = pg.get('bfd', None) + if bfd is not None: + new_pg['bfd'] = bfd + capability = pg.get('capability', None) + if capability is not None: + new_pg['capability'] = capability + afi = [] + address_family = pg.get('address_family', None) + if address_family: + if address_family.get('afis', None): + for each in address_family['afis']: + if each: + tmp = {} + if each.get('afi', None) is not None: + tmp['afi'] = each['afi'] + if each.get('safi', None) is not None: + tmp['safi'] = each['safi'] + if each.get('activate', None) is not None and each['activate'] is not False: + tmp['activate'] = each['activate'] + if each.get('allowas_in', None) is not None: + tmp['allowas_in'] = each['allowas_in'] + if each.get('ip_afi', None) is not None: + tmp['ip_afi'] = each['ip_afi'] + if each.get('prefix_limit', None) is not None: + tmp['prefix_limit'] = each['prefix_limit'] + if each.get('prefix_list_in', None) is not None: + tmp['prefix_list_in'] = each['prefix_list_in'] + if each.get('prefix_list_out', None) is not None: + tmp['prefix_list_out'] = each['prefix_list_out'] + afi.append(tmp) + if afi and len(afi) > 0: + afis = {} + afis.update({'afis': afi}) + new_pg['address_family'] = afis + if new_pg: + new_peergroups.append(new_pg) + if new_peergroups: + new_conf['peer_group'] = new_peergroups + neighbors = conf.get('neighbors', None) + new_neighbors = [] + if neighbors is not None: + for neighbor in neighbors: + new_neighbor = {} + neighbor_val = neighbor.get('neighbor', None) + new_neighbor['neighbor'] = neighbor_val + remote_as = neighbor.get('remote_as', None) + new_remote = {} + if remote_as: + peer_as = remote_as.get('peer_as', None) + peer_type = remote_as.get('peer_type', None) + if peer_as is not None: + new_remote['peer_as'] = peer_as + if peer_type is not None: + new_remote['peer_type'] = peer_type + if new_remote: + new_neighbor['remote_as'] = new_remote + peer_group = neighbor.get('peer_group', None) + if peer_group: + new_neighbor['peer_group'] = peer_group + timers = neighbor.get('timers', None) + new_timers = {} + if timers: + keepalive = timers.get('keepalive', None) + holdtime = timers.get('holdtime', None) + connect_retry = timers.get('connect_retry', None) + if keepalive is not None and keepalive != 60: + new_timers['keepalive'] = keepalive + if holdtime is not None and holdtime != 180: + new_timers['holdtime'] = holdtime + if connect_retry is not None and connect_retry != 30: + new_timers['connect_retry'] = connect_retry + if new_timers: + new_neighbor['timers'] = new_timers + advertisement_interval = neighbor.get('advertisement_interval', None) + if advertisement_interval is not None and advertisement_interval != 30: + new_neighbor['advertisement_interval'] = advertisement_interval + bfd = neighbor.get('bfd', None) + if bfd is not None: + new_neighbor['bfd'] = bfd + capability = neighbor.get('capability', None) + if capability is not None: + new_neighbor['capability'] = capability + if new_neighbor: + new_neighbors.append(new_neighbor) + if new_neighbors: + new_conf['neighbors'] = new_neighbors + if new_conf: + new_data.append(new_conf) + return new_data + + def build_bgp_peer_groups_payload(self, cmd, have, bgp_as, vrf_name): + requests = [] + bgp_peer_group_list = [] + for peer_group in cmd: + if peer_group: + bgp_peer_group = {} + peer_group_cfg = {} + tmp_bfd = {} + tmp_ebgp = {} + tmp_timers = {} + tmp_capability = {} + tmp_remote = {} + tmp_transport = {} + afi = [] + if peer_group.get('name', None) is not None: + peer_group_cfg.update({'peer-group-name': peer_group['name']}) + bgp_peer_group.update({'peer-group-name': peer_group['name']}) + if peer_group.get('bfd', None) is not None: + if peer_group['bfd'].get('enabled', None) is not None: + tmp_bfd.update({'enabled': peer_group['bfd']['enabled']}) + if peer_group['bfd'].get('check_failure', None) is not None: + tmp_bfd.update({'check-control-plane-failure': peer_group['bfd']['check_failure']}) + if peer_group['bfd'].get('profile', None) is not None: + tmp_bfd.update({'bfd-profile': peer_group['bfd']['profile']}) + if peer_group.get('auth_pwd', None) is not None: + if (peer_group['auth_pwd'].get('pwd', None) is not None and + peer_group['auth_pwd'].get('encrypted', None) is not None): + bgp_peer_group.update({'auth-password': {'config': {'password': peer_group['auth_pwd']['pwd'], + 'encrypted': peer_group['auth_pwd']['encrypted']}}}) + if peer_group.get('ebgp_multihop', None) is not None: + if peer_group['ebgp_multihop'].get('enabled', None) is not None: + tmp_ebgp.update({'enabled': peer_group['ebgp_multihop']['enabled']}) + if peer_group['ebgp_multihop'].get('multihop_ttl', None) is not None: + tmp_ebgp.update({'multihop-ttl': peer_group['ebgp_multihop']['multihop_ttl']}) + if peer_group.get('timers', None) is not None: + if peer_group['timers'].get('holdtime', None) is not None: + tmp_timers.update({'hold-time': peer_group['timers']['holdtime']}) + if peer_group['timers'].get('keepalive', None) is not None: + tmp_timers.update({'keepalive-interval': peer_group['timers']['keepalive']}) + if peer_group['timers'].get('connect_retry', None) is not None: + tmp_timers.update({'connect-retry': peer_group['timers']['connect_retry']}) + if peer_group.get('capability', None) is not None: + if peer_group['capability'].get('dynamic', None) is not None: + tmp_capability.update({'capability-dynamic': peer_group['capability']['dynamic']}) + if peer_group['capability'].get('extended_nexthop', None) is not None: + tmp_capability.update({'capability-extended-nexthop': peer_group['capability']['extended_nexthop']}) + if peer_group.get('pg_description', None) is not None: + peer_group_cfg.update({'description': peer_group['pg_description']}) + if peer_group.get('disable_connected_check', None) is not None: + peer_group_cfg.update({'disable-ebgp-connected-route-check': peer_group['disable_connected_check']}) + if peer_group.get('dont_negotiate_capability', None) is not None: + peer_group_cfg.update({'dont-negotiate-capability': peer_group['dont_negotiate_capability']}) + if peer_group.get('enforce_first_as', None) is not None: + peer_group_cfg.update({'enforce-first-as': peer_group['enforce_first_as']}) + if peer_group.get('enforce_multihop', None) is not None: + peer_group_cfg.update({'enforce-multihop': peer_group['enforce_multihop']}) + if peer_group.get('override_capability', None) is not None: + peer_group_cfg.update({'override-capability': peer_group['override_capability']}) + if peer_group.get('shutdown_msg', None) is not None: + peer_group_cfg.update({'shutdown-message': peer_group['shutdown_msg']}) + if peer_group.get('solo', None) is not None: + peer_group_cfg.update({'solo-peer': peer_group['solo']}) + if peer_group.get('strict_capability_match', None) is not None: + peer_group_cfg.update({'strict-capability-match': peer_group['strict_capability_match']}) + if peer_group.get('ttl_security', None) is not None: + peer_group_cfg.update({'ttl-security-hops': peer_group['ttl_security']}) + if peer_group.get('local_as', None) is not None: + if peer_group['local_as'].get('as', None) is not None: + peer_group_cfg.update({'local-as': peer_group['local_as']['as']}) + if peer_group['local_as'].get('no_prepend', None) is not None: + peer_group_cfg.update({'local-as-no-prepend': peer_group['local_as']['no_prepend']}) + if peer_group['local_as'].get('replace_as', None) is not None: + peer_group_cfg.update({'local-as-replace-as': peer_group['local_as']['replace_as']}) + if peer_group.get('local_address', None) is not None: + tmp_transport.update({'local-address': peer_group['local_address']}) + if peer_group.get('passive', None) is not None: + tmp_transport.update({'passive-mode': peer_group['passive']}) + if peer_group.get('advertisement_interval', None) is not None: + tmp_timers.update({'minimum-advertisement-interval': peer_group['advertisement_interval']}) + if peer_group.get('remote_as', None) is not None: + have_nei = self.find_pg(have, bgp_as, vrf_name, peer_group) + if peer_group['remote_as'].get('peer_as', None) is not None: + if have_nei: + if have_nei.get("remote_as", None) is not None: + if have_nei["remote_as"].get("peer_type", None) is not None: + del_nei = {} + del_nei.update({'name': have_nei['name']}) + del_nei.update({'remote_as': have_nei['remote_as']}) + requests.extend(self.delete_specific_peergroup_param_request(vrf_name, del_nei)) + tmp_remote.update({'peer-as': peer_group['remote_as']['peer_as']}) + if peer_group['remote_as'].get('peer_type', None) is not None: + if have_nei: + if have_nei.get("remote_as", None) is not None: + if have_nei["remote_as"].get("peer_as", None) is not None: + del_nei = {} + del_nei.update({'name': have_nei['name']}) + del_nei.update({'remote_as': have_nei['remote_as']}) + requests.extend(self.delete_specific_peergroup_param_request(vrf_name, del_nei)) + tmp_remote.update({'peer-type': peer_group['remote_as']['peer_type'].upper()}) + if peer_group.get('address_family', None) is not None: + if peer_group['address_family'].get('afis', None) is not None: + for each in peer_group['address_family']['afis']: + samp = {} + afi_safi_cfg = {} + pfx_lmt_cfg = {} + pfx_lst_cfg = {} + ip_dict = {} + if each.get('afi', None) is not None and each.get('safi', None) is not None: + afi_safi = each['afi'].upper() + "_" + each['safi'].upper() + if afi_safi is not None: + afi_safi_name = 'openconfig-bgp-types:' + afi_safi + if afi_safi_name is not None: + samp.update({'afi-safi-name': afi_safi_name}) + samp.update({'config': {'afi-safi-name': afi_safi_name}}) + if each.get('prefix_limit', None) is not None: + pfx_lmt_cfg = get_prefix_limit_payload(each['prefix_limit']) + if pfx_lmt_cfg and afi_safi == 'L2VPN_EVPN': + samp.update({'l2vpn-evpn': {'prefix-limit': {'config': pfx_lmt_cfg}}}) + else: + if each.get('ip_afi', None) is not None: + afi_safi_cfg = get_ip_afi_cfg_payload(each['ip_afi']) + if afi_safi_cfg: + ip_dict.update({'config': afi_safi_cfg}) + if pfx_lmt_cfg: + ip_dict.update({'prefix-limit': {'config': pfx_lmt_cfg}}) + if ip_dict and afi_safi == 'IPV4_UNICAST': + samp.update({'ipv4-unicast': ip_dict}) + elif ip_dict and afi_safi == 'IPV6_UNICAST': + samp.update({'ipv6-unicast': ip_dict}) + if each.get('activate', None) is not None: + enabled = each['activate'] + if enabled is not None: + samp.update({'config': {'enabled': enabled}}) + if each.get('allowas_in', None) is not None: + have_pg_af = self.find_af(have, bgp_as, vrf_name, peer_group, each['afi'], each['safi']) + if each['allowas_in'].get('origin', None) is not None: + if have_pg_af: + if have_pg_af.get('allowas_in', None) is not None: + if have_pg_af['allowas_in'].get('value', None) is not None: + del_nei = {} + del_nei.update({'name': peer_group['name']}) + afis_list = [] + temp_cfg = {'afi': each['afi'], 'safi': each['safi']} + temp_cfg['allowas_in'] = {'value': have_pg_af['allowas_in']['value']} + afis_list.append(temp_cfg) + del_nei.update({'address_family': {'afis': afis_list}}) + requests.extend(self.delete_specific_peergroup_param_request(vrf_name, del_nei)) + origin = each['allowas_in']['origin'] + samp.update({'allow-own-as': {'config': {'origin': origin, "enabled": bool("true")}}}) + if each['allowas_in'].get('value', None) is not None: + if have_pg_af: + if have_pg_af.get('allowas_in', None) is not None: + if have_pg_af['allowas_in'].get('origin', None) is not None: + del_nei = {} + del_nei.update({'name': peer_group['name']}) + afis_list = [] + temp_cfg = {'afi': each['afi'], 'safi': each['safi']} + temp_cfg['allowas_in'] = {'origin': have_pg_af['allowas_in']['origin']} + afis_list.append(temp_cfg) + del_nei.update({'address_family': {'afis': afis_list}}) + requests.extend(self.delete_specific_peergroup_param_request(vrf_name, del_nei)) + as_count = each['allowas_in']['value'] + samp.update({'allow-own-as': {'config': {'as-count': as_count, "enabled": bool("true")}}}) + if each.get('prefix_list_in', None) is not None: + prefix_list_in = each['prefix_list_in'] + if prefix_list_in is not None: + pfx_lst_cfg.update({'import-policy': prefix_list_in}) + if each.get('prefix_list_out', None) is not None: + prefix_list_out = each['prefix_list_out'] + if prefix_list_out is not None: + pfx_lst_cfg.update({'export-policy': prefix_list_out}) + if pfx_lst_cfg: + samp.update({'prefix-list': {'config': pfx_lst_cfg}}) + if samp: + afi.append(samp) + if tmp_bfd: + bgp_peer_group.update({'enable-bfd': {'config': tmp_bfd}}) + if tmp_ebgp: + bgp_peer_group.update({'ebgp-multihop': {'config': tmp_ebgp}}) + if tmp_timers: + bgp_peer_group.update({'timers': {'config': tmp_timers}}) + if tmp_transport: + bgp_peer_group.update({'transport': {'config': tmp_transport}}) + if afi and len(afi) > 0: + bgp_peer_group.update({'afi-safis': {'afi-safi': afi}}) + if tmp_capability: + peer_group_cfg.update(tmp_capability) + if tmp_remote: + peer_group_cfg.update(tmp_remote) + if peer_group_cfg: + bgp_peer_group.update({'config': peer_group_cfg}) + if bgp_peer_group: + bgp_peer_group_list.append(bgp_peer_group) + payload = {'openconfig-network-instance:peer-groups': {'peer-group': bgp_peer_group_list}} + return payload, requests + + def find_pg(self, have, bgp_as, vrf_name, peergroup): + mat_dict = next((m_peer for m_peer in have if m_peer['bgp_as'] == bgp_as and m_peer['vrf_name'] == vrf_name), None) + if mat_dict and mat_dict.get("peer_group", None) is not None: + mat_pg = next((m for m in mat_dict['peer_group'] if m["name"] == peergroup['name']), None) + return mat_pg + + def find_af(self, have, bgp_as, vrf_name, peergroup, afi, safi): + mat_pg = self.find_pg(have, bgp_as, vrf_name, peergroup) + if mat_pg and mat_pg['address_family'].get('afis', None) is not None: + mat_af = next((af for af in mat_pg['address_family']['afis'] if af['afi'] == afi and af['safi'] == safi), None) + return mat_af + + def find_nei(self, have, bgp_as, vrf_name, neighbor): + mat_dict = next((m_neighbor for m_neighbor in have if m_neighbor['bgp_as'] == bgp_as and m_neighbor['vrf_name'] == vrf_name), None) + if mat_dict and mat_dict.get("neighbors", None) is not None: + mat_neighbor = next((m for m in mat_dict['neighbors'] if m["neighbor"] == neighbor['neighbor']), None) + return mat_neighbor + + def build_bgp_neighbors_payload(self, cmd, have, bgp_as, vrf_name): + bgp_neighbor_list = [] + requests = [] + for neighbor in cmd: + if neighbor: + bgp_neighbor = {} + neighbor_cfg = {} + tmp_bfd = {} + tmp_ebgp = {} + tmp_timers = {} + tmp_capability = {} + tmp_remote = {} + tmp_transport = {} + if neighbor.get('bfd', None) is not None: + if neighbor['bfd'].get('enabled', None) is not None: + tmp_bfd.update({'enabled': neighbor['bfd']['enabled']}) + if neighbor['bfd'].get('check_failure', None) is not None: + tmp_bfd.update({'check-control-plane-failure': neighbor['bfd']['check_failure']}) + if neighbor['bfd'].get('profile', None) is not None: + tmp_bfd.update({'bfd-profile': neighbor['bfd']['profile']}) + if neighbor.get('auth_pwd', None) is not None: + if (neighbor['auth_pwd'].get('pwd', None) is not None and + neighbor['auth_pwd'].get('encrypted', None) is not None): + bgp_neighbor.update({'auth-password': {'config': {'password': neighbor['auth_pwd']['pwd'], + 'encrypted': neighbor['auth_pwd']['encrypted']}}}) + if neighbor.get('ebgp_multihop', None) is not None: + if neighbor['ebgp_multihop'].get('enabled', None) is not None: + tmp_ebgp.update({'enabled': neighbor['ebgp_multihop']['enabled']}) + if neighbor['ebgp_multihop'].get('multihop_ttl', None) is not None: + tmp_ebgp.update({'multihop-ttl': neighbor['ebgp_multihop']['multihop_ttl']}) + if neighbor.get('timers', None) is not None: + if neighbor['timers'].get('holdtime', None) is not None: + tmp_timers.update({'hold-time': neighbor['timers']['holdtime']}) + if neighbor['timers'].get('keepalive', None) is not None: + tmp_timers.update({'keepalive-interval': neighbor['timers']['keepalive']}) + if neighbor['timers'].get('connect_retry', None) is not None: + tmp_timers.update({'connect-retry': neighbor['timers']['connect_retry']}) + if neighbor.get('capability', None) is not None: + if neighbor['capability'].get('dynamic', None) is not None: + tmp_capability.update({'capability-dynamic': neighbor['capability']['dynamic']}) + if neighbor['capability'].get('extended_nexthop', None) is not None: + tmp_capability.update({'capability-extended-nexthop': neighbor['capability']['extended_nexthop']}) + if neighbor.get('advertisement_interval', None) is not None: + tmp_timers.update({'minimum-advertisement-interval': neighbor['advertisement_interval']}) + if neighbor.get('neighbor', None) is not None: + bgp_neighbor.update({'neighbor-address': neighbor['neighbor']}) + neighbor_cfg.update({'neighbor-address': neighbor['neighbor']}) + if neighbor.get('peer_group', None) is not None: + neighbor_cfg.update({'peer-group': neighbor['peer_group']}) + if neighbor.get('nbr_description', None) is not None: + neighbor_cfg.update({'description': neighbor['nbr_description']}) + if neighbor.get('disable_connected_check', None) is not None: + neighbor_cfg.update({'disable-ebgp-connected-route-check': neighbor['disable_connected_check']}) + if neighbor.get('dont_negotiate_capability', None) is not None: + neighbor_cfg.update({'dont-negotiate-capability': neighbor['dont_negotiate_capability']}) + if neighbor.get('enforce_first_as', None) is not None: + neighbor_cfg.update({'enforce-first-as': neighbor['enforce_first_as']}) + if neighbor.get('enforce_multihop', None) is not None: + neighbor_cfg.update({'enforce-multihop': neighbor['enforce_multihop']}) + if neighbor.get('override_capability', None) is not None: + neighbor_cfg.update({'override-capability': neighbor['override_capability']}) + if neighbor.get('port', None) is not None: + neighbor_cfg.update({'peer-port': neighbor['port']}) + if neighbor.get('shutdown_msg', None) is not None: + neighbor_cfg.update({'shutdown-message': neighbor['shutdown_msg']}) + if neighbor.get('solo', None) is not None: + neighbor_cfg.update({'solo-peer': neighbor['solo']}) + if neighbor.get('strict_capability_match', None) is not None: + neighbor_cfg.update({'strict-capability-match': neighbor['strict_capability_match']}) + if neighbor.get('ttl_security', None) is not None: + neighbor_cfg.update({'ttl-security-hops': neighbor['ttl_security']}) + if neighbor.get('v6only', None) is not None: + neighbor_cfg.update({'openconfig-bgp-ext:v6only': neighbor['v6only']}) + if neighbor.get('local_as', None) is not None: + if neighbor['local_as'].get('as', None) is not None: + neighbor_cfg.update({'local-as': neighbor['local_as']['as']}) + if neighbor['local_as'].get('no_prepend', None) is not None: + neighbor_cfg.update({'local-as-no-prepend': neighbor['local_as']['no_prepend']}) + if neighbor['local_as'].get('replace_as', None) is not None: + neighbor_cfg.update({'local-as-replace-as': neighbor['local_as']['replace_as']}) + if neighbor.get('local_address', None) is not None: + tmp_transport.update({'local-address': neighbor['local_address']}) + if neighbor.get('passive', None) is not None: + tmp_transport.update({'passive-mode': neighbor['passive']}) + if neighbor.get('remote_as', None) is not None: + have_nei = self.find_nei(have, bgp_as, vrf_name, neighbor) + if neighbor['remote_as'].get('peer_as', None) is not None: + if have_nei: + if have_nei.get("remote_as", None) is not None: + if have_nei["remote_as"].get("peer_type", None) is not None: + del_nei = {} + del_nei.update({'neighbor': have_nei['neighbor']}) + del_nei.update({'remote_as': have_nei['remote_as']}) + requests.extend(self.delete_specific_param_request(vrf_name, del_nei)) + tmp_remote.update({'peer-as': neighbor['remote_as']['peer_as']}) + if neighbor['remote_as'].get('peer_type', None) is not None: + if have_nei: + if have_nei.get("remote_as", None) is not None: + if have_nei["remote_as"].get("peer_as", None) is not None: + del_nei = {} + del_nei.update({'neighbor': have_nei['neighbor']}) + del_nei.update({'remote_as': have_nei['remote_as']}) + requests.extend(self.delete_specific_param_request(vrf_name, del_nei)) + tmp_remote.update({'peer-type': neighbor['remote_as']['peer_type'].upper()}) + if tmp_bfd: + bgp_neighbor.update({'enable-bfd': {'config': tmp_bfd}}) + if tmp_ebgp: + bgp_neighbor.update({'ebgp-multihop': {'config': tmp_ebgp}}) + if tmp_timers: + bgp_neighbor.update({'timers': {'config': tmp_timers}}) + if tmp_transport: + bgp_neighbor.update({'transport': {'config': tmp_transport}}) + if tmp_capability: + neighbor_cfg.update(tmp_capability) + if tmp_remote: + neighbor_cfg.update(tmp_remote) + if neighbor_cfg: + bgp_neighbor.update({'config': neighbor_cfg}) + if bgp_neighbor: + bgp_neighbor_list.append(bgp_neighbor) + payload = {'openconfig-network-instance:neighbors': {'neighbor': bgp_neighbor_list}} + return payload, requests + + def get_modify_bgp_requests(self, commands, have): + requests = [] + if not commands: + return requests + + for cmd in commands: + edit_path = '%s=%s/%s' % (self.network_instance_path, cmd['vrf_name'], self.protocol_bgp_path) + if 'peer_group' in cmd and cmd['peer_group']: + edit_peer_groups_payload, edit_requests = self.build_bgp_peer_groups_payload(cmd['peer_group'], have, cmd['bgp_as'], cmd['vrf_name']) + edit_peer_groups_path = edit_path + '/peer-groups' + if edit_requests: + requests.extend(edit_requests) + requests.append({'path': edit_peer_groups_path, 'method': PATCH, 'data': edit_peer_groups_payload}) + if 'neighbors' in cmd and cmd['neighbors']: + edit_neighbors_payload, edit_requests = self.build_bgp_neighbors_payload(cmd['neighbors'], have, cmd['bgp_as'], cmd['vrf_name']) + edit_neighbors_path = edit_path + '/neighbors' + if edit_requests: + requests.extend(edit_requests) + requests.append({'path': edit_neighbors_path, 'method': PATCH, 'data': edit_neighbors_payload}) + return requests + + def get_delete_specific_bgp_peergroup_param_request(self, vrf_name, cmd, want_match): + requests = [] + want_peer_group = want_match.get('peer_group', None) + for each in cmd['peer_group']: + if each: + name = each.get('name', None) + remote_as = each.get('remote_as', None) + timers = each.get('timers', None) + advertisement_interval = each.get('advertisement_interval', None) + bfd = each.get('bfd', None) + capability = each.get('capability', None) + address_family = each.get('address_family', None) + if name and not remote_as and not timers and not advertisement_interval and not bfd and not capability and not address_family: + want_pg_match = None + if want_peer_group: + want_pg_match = next((cfg for cfg in want_peer_group if cfg['name'] == name), None) + if want_pg_match: + keys = ['remote_as', 'timers', 'advertisement_interval', 'bfd', 'capability', 'address_family'] + if not any(want_pg_match.get(key, None) for key in keys): + requests.append(self.get_delete_vrf_specific_peergroup_request(vrf_name, name)) + else: + requests.extend(self.delete_specific_peergroup_param_request(vrf_name, each)) + return requests + + def delete_specific_peergroup_param_request(self, vrf_name, cmd): + requests = [] + delete_static_path = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + delete_static_path = delete_static_path + '/peer-groups/peer-group=%s' % (cmd['name']) + if cmd.get('remote_as', None) is not None: + if cmd['remote_as'].get('peer_as', None) is not None: + delete_path = delete_static_path + '/config/peer-as' + requests.append({'path': delete_path, 'method': DELETE}) + elif cmd['remote_as'].get('peer_type', None) is not None: + delete_path = delete_static_path + '/config/peer-type' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('advertisement_interval', None) is not None: + delete_path = delete_static_path + '/timers/config/minimum-advertisement-interval' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('timers', None) is not None: + if cmd['timers'].get('holdtime', None) is not None: + delete_path = delete_static_path + '/timers/config/hold-time' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['timers'].get('keepalive', None) is not None: + delete_path = delete_static_path + '/timers/config/keepalive-interval' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['timers'].get('connect_retry', None) is not None: + delete_path = delete_static_path + '/timers/config/connect-retry' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('capability', None) is not None: + if cmd['capability'].get('dynamic', None) is not None: + delete_path = delete_static_path + '/config/capability-dynamic' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['capability'].get('extended_nexthop', None) is not None: + delete_path = delete_static_path + '/config/capability-extended-nexthop' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('pg_description', None) is not None: + delete_path = delete_static_path + '/config/description' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('disable_connected_check', None) is not None: + delete_path = delete_static_path + '/config/disable-ebgp-connected-route-check' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('dont_negotiate_capability', None) is not None: + delete_path = delete_static_path + '/config/dont-negotiate-capability' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('enforce_first_as', None) is not None: + delete_path = delete_static_path + '/config/enforce-first-as' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('enforce_multihop', None) is not None: + delete_path = delete_static_path + '/config/enforce-multihop' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('override_capability', None) is not None: + delete_path = delete_static_path + '/config/override-capability' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('shutdown_msg', None) is not None: + delete_path = delete_static_path + '/config/shutdown-message' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('solo', None) is not None: + delete_path = delete_static_path + '/config/solo-peer' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('strict_capability_match', None) is not None: + delete_path = delete_static_path + '/config/strict-capability-match' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('ttl_security', None) is not None: + delete_path = delete_static_path + '/config/ttl-security-hops' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('local_as', None) is not None: + if cmd['local_as'].get('as', None) is not None: + delete_path = delete_static_path + '/config/local-as' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['local_as'].get('no_prepend', None) is not None: + delete_path = delete_static_path + '/config/local-as-no-prepend' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['local_as'].get('replace_as', None) is not None: + delete_path = delete_static_path + '/config/local-as-replace-as' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('local_address', None) is not None: + delete_path = delete_static_path + '/transport/config/local-address' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('passive', None) is not None: + delete_path = delete_static_path + '/transport/config/passive-mode' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('bfd', None) is not None: + if cmd['bfd'].get('enabled', None) is not None: + delete_path = delete_static_path + '/enable-bfd/config/enabled' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['bfd'].get('check_failure', None) is not None: + delete_path = delete_static_path + '/enable-bfd/config/check-control-plane-failure' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['bfd'].get('profile', None) is not None: + delete_path = delete_static_path + '/enable-bfd/config/bfd-profile' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('auth_pwd', None) is not None: + if cmd['auth_pwd'].get('pwd', None) is not None: + delete_path = delete_static_path + '/auth-password/config/password' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['auth_pwd'].get('encrypted', None) is not None: + delete_path = delete_static_path + '/auth-password/config/encrypted' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('ebgp_multihop', None) is not None: + if cmd['ebgp_multihop'].get('enabled', None) is not None: + delete_path = delete_static_path + '/ebgp-multihop/config/enabled' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['ebgp_multihop'].get('multihop_ttl', None) is not None: + delete_path = delete_static_path + '/ebgp-multihop/config/multihop_ttl' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('address_family', None) is not None: + if cmd['address_family'].get('afis', None) is None: + delete_path = delete_static_path + '/afi-safis/afi-safi' + requests.append({'path': delete_path, 'method': DELETE}) + else: + for each in cmd['address_family']['afis']: + afi = each.get('afi', None) + safi = each.get('safi', None) + activate = each.get('activate', None) + allowas_in = each.get('allowas_in', None) + ip_afi = each.get('ip_afi', None) + prefix_limit = each.get('prefix_limit', None) + prefix_list_in = each.get('prefix_list_in', None) + prefix_list_out = each.get('prefix_list_out', None) + afi_safi = afi.upper() + '_' + safi.upper() + afi_safi_name = 'openconfig-bgp-types:' + afi_safi + if (afi and safi and not activate and not allowas_in and not ip_afi and not prefix_limit and not prefix_list_in + and not prefix_list_out): + delete_path = delete_static_path + '/afi-safis/afi-safi=%s' % (afi_safi_name) + requests.append({'path': delete_path, 'method': DELETE}) + else: + if activate: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/config/enabled' % (afi_safi_name) + requests.append({'path': delete_path, 'method': DELETE}) + if allowas_in: + if allowas_in.get('origin', None): + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/allow-own-as/config/origin' % (afi_safi_name) + requests.append({'path': delete_path, 'method': DELETE}) + if allowas_in.get('value', None): + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/allow-own-as/config/as-count' % (afi_safi_name) + requests.append({'path': delete_path, 'method': DELETE}) + if prefix_list_in: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/prefix-list/config/import-policy' % (afi_safi_name) + requests.append({'path': delete_path, 'method': DELETE}) + if prefix_list_out: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/prefix-list/config/export-policy' % (afi_safi_name) + requests.append({'path': delete_path, 'method': DELETE}) + if afi_safi == 'IPV4_UNICAST': + if ip_afi: + requests.extend(self.delete_ip_afi_requests(ip_afi, afi_safi_name, 'ipv4-unicast', delete_static_path)) + if prefix_limit: + requests.extend(self.delete_prefix_limit_requests(prefix_limit, afi_safi_name, 'ipv4-unicast', delete_static_path)) + elif afi_safi == 'IPV6_UNICAST': + if ip_afi: + requests.extend(self.delete_ip_afi_requests(ip_afi, afi_safi_name, 'ipv6-unicast', delete_static_path)) + if prefix_limit: + requests.extend(self.delete_prefix_limit_requests(prefix_limit, afi_safi_name, 'ipv6-unicast', delete_static_path)) + elif afi_safi == 'L2VPN_EVPN': + if prefix_limit: + requests.extend(self.delete_prefix_limit_requests(prefix_limit, afi_safi_name, 'l2vpn-evpn', delete_static_path)) + + return requests + + def delete_ip_afi_requests(self, ip_afi, afi_safi_name, afi_safi, delete_static_path): + requests = [] + default_policy_name = ip_afi.get('default_policy_name', None) + send_default_route = ip_afi.get('send_default_route', None) + if default_policy_name: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/%s/config/default-policy-name' % (afi_safi_name, afi_safi) + requests.append({'path': delete_path, 'method': DELETE}) + if send_default_route: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/%s/config/send_default_route' % (afi_safi_name, afi_safi) + requests.append({'path': delete_path, 'method': DELETE}) + + return requests + + def delete_prefix_limit_requests(self, prefix_limit, afi_safi_name, afi_safi, delete_static_path): + requests = [] + max_prefixes = prefix_limit.get('max_prefixes', None) + prevent_teardown = prefix_limit.get('prevent_teardown', None) + warning_threshold = prefix_limit.get('warning_threshold', None) + restart_timer = prefix_limit.get('restart_timer', None) + if max_prefixes: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/%s/prefix-limit/config/max-prefixes' % (afi_safi_name, afi_safi) + requests.append({'path': delete_path, 'method': DELETE}) + if prevent_teardown: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/%s/prefix-limit/config/prevent-teardown' % (afi_safi_name, afi_safi) + requests.append({'path': delete_path, 'method': DELETE}) + if warning_threshold: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/%s/prefix-limit/config/warning-threshold-pct' % (afi_safi_name, afi_safi) + requests.append({'path': delete_path, 'method': DELETE}) + if restart_timer: + delete_path = delete_static_path + '/afi-safis/afi-safi=%s/%s/prefix-limit/config/restart-timer' % (afi_safi_name, afi_safi) + requests.append({'path': delete_path, 'method': DELETE}) + + return requests + + def get_delete_specific_bgp_param_request(self, vrf_name, cmd, want_match): + requests = [] + want_neighbors = want_match.get('neighbors', None) + for each in cmd['neighbors']: + if each: + neighbor = each.get('neighbor', None) + remote_as = each.get('remote_as', None) + peer_group = each.get('peer_group', None) + timers = each.get('timers', None) + advertisement_interval = each.get('advertisement_interval', None) + bfd = each.get('bfd', None) + capability = each.get('capability', None) + if neighbor and not remote_as and not peer_group and not timers and not advertisement_interval and not bfd and not capability: + want_nei_match = None + if want_neighbors: + want_nei_match = next(cfg for cfg in want_neighbors if cfg['neighbor'] == neighbor) + if want_nei_match: + keys = ['remote_as', 'peer_group', 'timers', 'advertisement_interval', 'bfd', 'capability'] + if not any(want_nei_match.get(key, None) for key in keys): + requests.append(self.delete_neighbor_whole_request(vrf_name, neighbor)) + else: + requests.extend(self.delete_specific_param_request(vrf_name, each)) + return requests + + def delete_neighbor_whole_request(self, vrf_name, neighbor): + requests = [] + url = '%s=%s/%s/%s=%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.neighbor_path, neighbor) + return ({'path': url, 'method': DELETE}) + + def delete_specific_param_request(self, vrf_name, cmd): + requests = [] + delete_static_path = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path) + delete_static_path = delete_static_path + '/neighbors/neighbor=%s' % (cmd['neighbor']) + if cmd.get('remote_as', None) is not None: + if cmd['remote_as'].get('peer_as', None) is not None: + delete_path = delete_static_path + '/config/peer-as' + requests.append({'path': delete_path, 'method': DELETE}) + elif cmd['remote_as'].get('peer_type', None) is not None: + delete_path = delete_static_path + '/config/peer-type' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('peer_group', None) is not None: + delete_path = delete_static_path + '/config/peer-group' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('nbr_description', None) is not None: + delete_path = delete_static_path + '/config/description' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('disable_connected_check', None) is not None: + delete_path = delete_static_path + '/config/disable-ebgp-connected-route-check' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('dont_negotiate_capability', None) is not None: + delete_path = delete_static_path + '/config/dont-negotiate-capability' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('enforce_first_as', None) is not None: + delete_path = delete_static_path + '/config/enforce-first-as' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('enforce_multihop', None) is not None: + delete_path = delete_static_path + '/config/enforce-multihop' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('override_capability', None) is not None: + delete_path = delete_static_path + '/config/override-capability' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('port', None) is not None: + delete_path = delete_static_path + '/config/peer-port' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('shutdown_msg', None) is not None: + delete_path = delete_static_path + '/config/shutdown-message' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('solo', None) is not None: + delete_path = delete_static_path + '/config/solo-peer' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('strict_capability_match', None) is not None: + delete_path = delete_static_path + '/config/strict-capability-match' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('ttl_security', None) is not None: + delete_path = delete_static_path + '/config/ttl-security-hops' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('v6only', None) is not None: + delete_path = delete_static_path + '/config/openconfig-bgp-ext:v6only' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('local_as', None) is not None: + if cmd['local_as'].get('as', None) is not None: + delete_path = delete_static_path + '/config/local-as' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['local_as'].get('no_prepend', None) is not None: + delete_path = delete_static_path + '/config/local-as-no-prepend' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['local_as'].get('replace_as', None) is not None: + delete_path = delete_static_path + '/config/local-as-replace-as' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('local_address', None) is not None: + delete_path = delete_static_path + '/transport/config/local-address' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('passive', None) is not None: + delete_path = delete_static_path + '/transport/config/passive-mode' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('advertisement_interval', None) is not None: + delete_path = delete_static_path + '/timers/config/minimum-advertisement-interval' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('timers', None) is not None: + if cmd['timers'].get('holdtime', None) is not None: + delete_path = delete_static_path + '/timers/config/hold-time' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['timers'].get('keepalive', None) is not None: + delete_path = delete_static_path + '/timers/config/keepalive-interval' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['timers'].get('connect_retry', None) is not None: + delete_path = delete_static_path + '/timers/config/connect-retry' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('capability', None) is not None: + if cmd['capability'].get('dynamic', None) is not None: + delete_path = delete_static_path + '/config/capability-dynamic' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['capability'].get('extended_nexthop', None) is not None: + delete_path = delete_static_path + '/config/capability-extended-nexthop' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('bfd', None) is not None: + if cmd['bfd'].get('enabled', None) is not None: + delete_path = delete_static_path + '/enable-bfd/config/enabled' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['bfd'].get('check_failure', None) is not None: + delete_path = delete_static_path + '/enable-bfd/config/check-control-plane-failure' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['bfd'].get('profile', None) is not None: + delete_path = delete_static_path + '/enable-bfd/config/bfd-profile' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('auth_pwd', None) is not None: + if cmd['auth_pwd'].get('pwd', None) is not None: + delete_path = delete_static_path + '/auth-password/config/password' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['auth_pwd'].get('encrypted', None) is not None: + delete_path = delete_static_path + '/auth-password/config/encrypted' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd.get('ebgp_multihop', None) is not None: + if cmd['ebgp_multihop'].get('enabled', None) is not None: + delete_path = delete_static_path + '/ebgp-multihop/config/enabled' + requests.append({'path': delete_path, 'method': DELETE}) + if cmd['ebgp_multihop'].get('multihop_ttl', None) is not None: + delete_path = delete_static_path + '/ebgp-multihop/config/multihop_ttl' + requests.append({'path': delete_path, 'method': DELETE}) + + return requests + + def get_delete_vrf_specific_neighbor_request(self, vrf_name, have): + requests = [] + for each in have: + if each.get('neighbor', None): + requests.append(self.delete_neighbor_whole_request(vrf_name, each['neighbor'])) + return requests + + def get_delete_vrf_specific_peergroup_request(self, vrf_name, peergroup_name): + requests = [] + delete_neighbor_path = '%s=%s/%s/peer-groups/peer-group=%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, peergroup_name) + return ({'path': delete_neighbor_path, 'method': DELETE}) + + def get_delete_all_bgp_neighbor_requests(self, commands): + requests = [] + for cmd in commands: + if cmd.get('neighbors', None): + requests.extend(self.get_delete_vrf_specific_neighbor_request(cmd['vrf_name'], cmd['neighbors'])) + if 'peer_group' in cmd and cmd['peer_group']: + for each in cmd['peer_group']: + requests.append(self.get_delete_vrf_specific_peergroup_request(cmd['vrf_name'], each['name'])) + return requests + + def get_delete_bgp_neighbor_requests(self, commands, have, want, is_delete_all): + requests = [] + if is_delete_all: + requests = self.get_delete_all_bgp_neighbor_requests(commands) + else: + for cmd in commands: + vrf_name = cmd['vrf_name'] + as_val = cmd['bgp_as'] + neighbors = cmd.get('neighbors', None) + peer_group = cmd.get('peer_group', None) + want_match = next((cfg for cfg in want if vrf_name == cfg['vrf_name'] and as_val == cfg['bgp_as']), None) + want_neighbors = want_match.get('neighbors', None) + want_peer_group = want_match.get('peer_group', None) + if neighbors is None and peer_group is None and want_neighbors is None and want_peer_group is None: + new_cmd = {} + for each in have: + if vrf_name == each['vrf_name'] and as_val == each['bgp_as']: + new_neighbors = [] + new_pg = [] + if each.get('neighbors', None): + new_neighbors = [{'neighbor': i['neighbor']} for i in each.get('neighbors', None)] + if each.get('peer_group', None): + new_pg = [{'name': i['name']} for i in each.get('peer_group', None)] + if new_neighbors: + new_cmd['neighbors'] = new_neighbors + requests.extend(self.get_delete_vrf_specific_neighbor_request(vrf_name, new_cmd['neighbors'])) + if new_pg: + new_cmd['name'] = new_pg + for each in new_cmd['name']: + requests.append(self.get_delete_vrf_specific_peergroup_request(vrf_name, each['name'])) + break + else: + if neighbors: + requests.extend(self.get_delete_specific_bgp_param_request(vrf_name, cmd, want_match)) + if peer_group: + requests.extend(self.get_delete_specific_bgp_peergroup_param_request(vrf_name, cmd, want_match)) + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py new file mode 100644 index 00000000..15f46f96 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py @@ -0,0 +1,584 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_bgp_neighbors_af class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +try: + from urllib import quote +except ImportError: + from urllib.parse import quote + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + validate_bgps, + normalize_neighbors_interface_name, + get_ip_afi_cfg_payload, + get_prefix_limit_payload +) +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +DELETE = 'delete' +TEST_KEYS = [ + {'config': {'vrf_name': '', 'bgp_as': ''}}, + {'neighbors': {'neighbor': ''}}, + {'address_family': {'afi': '', 'safi': ''}}, + {'route_map': {'name': '', 'direction': ''}}, +] + + +class Bgp_neighbors_af(ConfigBase): + """ + The sonic_bgp_neighbors_af class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'bgp_neighbors_af', + ] + + network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance' + protocol_bgp_path = 'protocols/protocol=BGP,bgp/bgp' + neighbor_path = 'neighbors/neighbor' + afi_safi_path = 'afi-safis/afi-safi' + activate_path = "/config/enabled" + ref_client_path = "/config/route-reflector-client" + serv_client_path = "/config/route-server-client" + allowas_origin_path = "/allow-own-as/config/origin" + allowas_value_path = "/allow-own-as/config/as-count" + allowas_enabled_path = "/allow-own-as/config/enabled" + prefix_list_in_path = "/prefix-list/config/import-policy" + prefix_list_out_path = "/prefix-list/config/export-policy" + def_policy_name_path = "/%s/config/default-policy-name" + send_def_route_path = "/%s/config/send-default-route" + max_prefixes_path = "/%s/prefix-limit/config/max-prefixes" + prv_teardown_path = "/%s/prefix-limit/config/prevent-teardown" + restart_timer_path = "/%s/prefix-limit/config/restart-timer" + wrn_threshold_path = "/%s/prefix-limit/config/warning-threshold-pct" + + def __init__(self, module): + super(Bgp_neighbors_af, self).__init__(module) + + def get_bgp_neighbors_af_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + bgp_neighbors_af_facts = facts['ansible_network_resources'].get('bgp_neighbors_af') + if not bgp_neighbors_af_facts: + bgp_neighbors_af_facts = [] + return bgp_neighbors_af_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_bgp_neighbors_af_facts = self.get_bgp_neighbors_af_facts() + commands, requests = self.set_config(existing_bgp_neighbors_af_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_bgp_neighbors_af_facts = self.get_bgp_neighbors_af_facts() + + result['before'] = existing_bgp_neighbors_af_facts + if result['changed']: + result['after'] = changed_bgp_neighbors_af_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_bgp_neighbors_af_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + normalize_neighbors_interface_name(want, self._module) + have = existing_bgp_neighbors_af_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + validate_bgps(self._module, want, have) + requests = self.get_modify_bgp_neighbors_af_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the bgp_neighbors_afs + is_delete_all = False + if not want: + commands = have + is_delete_all = True + else: + commands = want + + requests = self.get_delete_bgp_neighbors_af_requests(commands, have, is_delete_all) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def set_val(self, cfg, var, src_key, des_key): + value = var.get(src_key, None) + if value is not None: + cfg[des_key] = value + + def get_allowas_in(self, match, conf_neighbor_val, conf_afi, conf_safi): + mat_allowas_in = None + if match: + mat_neighbors = match.get('neighbors', None) + if mat_neighbors: + mat_neighbor = next((nei for nei in mat_neighbors if nei['neighbor'] == conf_neighbor_val), None) + if mat_neighbor: + mat_nei_addr_fams = mat_neighbor.get('address_family', []) + if mat_nei_addr_fams: + mat_nei_addr_fam = next((af for af in mat_nei_addr_fams if (af['afi'] == conf_afi and af['safi'] == conf_safi)), None) + if mat_nei_addr_fam: + mat_allowas_in = mat_nei_addr_fam.get('allowas_in', None) + return mat_allowas_in + + def get_single_neighbors_af_modify_request(self, match, vrf_name, conf_neighbor_val, conf_neighbor): + requests = [] + conf_nei_addr_fams = conf_neighbor.get('address_family', []) + url = '%s=%s/%s/%s=%s/afi-safis' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.neighbor_path, conf_neighbor_val) + payload = {} + afi_safis = [] + if not conf_nei_addr_fams: + return requests + + for conf_nei_addr_fam in conf_nei_addr_fams: + afi_safi = {} + conf_afi = conf_nei_addr_fam.get('afi', None) + conf_safi = conf_nei_addr_fam.get('safi', None) + afi_safi_val = ("%s_%s" % (conf_afi, conf_safi)).upper() + del_url = '%s=%s/%s/%s=%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.neighbor_path, conf_neighbor_val) + del_url += '%s=openconfig-bgp-types:%s' % (self.afi_safi_path, afi_safi_val) + + afi_safi_cfg = {} + if conf_afi and conf_safi: + afi_safi_name = ("%s_%s" % (conf_afi, conf_safi)).upper() + afi_safi['afi-safi-name'] = afi_safi_name + afi_safi_cfg['afi-safi-name'] = afi_safi_name + + self.set_val(afi_safi_cfg, conf_nei_addr_fam, 'activate', 'enabled') + self.set_val(afi_safi_cfg, conf_nei_addr_fam, 'route_reflector_client', 'route-reflector-client') + self.set_val(afi_safi_cfg, conf_nei_addr_fam, 'route_server_client', 'route-server-client') + + if afi_safi_cfg: + afi_safi['config'] = afi_safi_cfg + + policy_cfg = {} + conf_route_map = conf_nei_addr_fam.get('route_map', None) + if conf_route_map: + for route in conf_route_map: + policy_key = "import-policy" if "in" == route['direction'] else "export-policy" + route_name = route['name'] + policy_cfg[policy_key] = [route_name] + if policy_cfg: + afi_safi['apply-policy'] = {'config': policy_cfg} + + pfx_lst_cfg = {} + conf_prefix_list_in = conf_nei_addr_fam.get('prefix_list_in', None) + conf_prefix_list_out = conf_nei_addr_fam.get('prefix_list_out', None) + if conf_prefix_list_in: + pfx_lst_cfg['import-policy'] = conf_prefix_list_in + if conf_prefix_list_out: + pfx_lst_cfg['export-policy'] = conf_prefix_list_out + if pfx_lst_cfg: + afi_safi['prefix-list'] = {'config': pfx_lst_cfg} + + ip_dict = {} + ip_afi_cfg = {} + pfx_lmt_cfg = {} + conf_ip_afi = conf_nei_addr_fam.get('ip_afi') + conf_prefix_limit = conf_nei_addr_fam.get('prefix_limit') + if conf_prefix_limit: + pfx_lmt_cfg = get_prefix_limit_payload(conf_prefix_limit) + if pfx_lmt_cfg and afi_safi_val == 'L2VPN_EVPN': + afi_safi['l2vpn-evpn'] = {'prefix-limit': {'config': pfx_lmt_cfg}} + else: + if conf_ip_afi: + ip_afi_cfg = get_ip_afi_cfg_payload(conf_ip_afi) + if ip_afi_cfg: + ip_dict['config'] = ip_afi_cfg + if pfx_lmt_cfg: + ip_dict['prefix-limit'] = {'config': pfx_lmt_cfg} + if ip_dict and afi_safi_val == 'IPV4_UNICAST': + afi_safi['ipv4-unicast'] = ip_dict + elif ip_dict and afi_safi_val == 'IPV6_UNICAST': + afi_safi['ipv6-unicast'] = ip_dict + + allowas_in_cfg = {} + conf_allowas_in = conf_nei_addr_fam.get('allowas_in', None) + if conf_allowas_in: + mat_allowas_in = self.get_allowas_in(match, conf_neighbor_val, conf_afi, conf_safi) + origin = conf_allowas_in.get('origin', None) + if origin is not None: + if mat_allowas_in: + mat_value = mat_allowas_in.get('value', None) + if mat_value: + self.append_delete_request(requests, mat_value, mat_allowas_in, 'value', del_url, self.allowas_value_path) + allowas_in_cfg['origin'] = origin + else: + value = conf_allowas_in.get('value', None) + if value is not None: + if mat_allowas_in: + mat_origin = mat_allowas_in.get('origin', None) + if mat_origin: + self.append_delete_request(requests, mat_origin, mat_allowas_in, 'origin', del_url, self.allowas_origin_path) + allowas_in_cfg['as-count'] = value + if allowas_in_cfg: + allowas_in_cfg['enabled'] = True + afi_safi['allow-own-as'] = {'config': allowas_in_cfg} + + if afi_safi: + afi_safis.append(afi_safi) + + if afi_safis: + payload = {"openconfig-network-instance:afi-safis": {"afi-safi": afi_safis}} + requests.append({'path': url, 'method': PATCH, 'data': payload}) + + return requests + + def get_delete_neighbor_af_routemaps_requests(self, vrf_name, conf_neighbor_val, afi, safi, routes): + requests = [] + for route in routes: + afi_safi_name = ("%s_%s" % (afi, safi)).upper() + policy_type = "import-policy" if "in" == route['direction'] else "export-policy" + url = '%s=%s/%s/%s=%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.neighbor_path, conf_neighbor_val) + url += ('%s=%s/apply-policy/config/%s' % (self.afi_safi_path, afi_safi_name, policy_type)) + requests.append({'path': url, 'method': DELETE}) + return requests + + def get_all_neighbors_af_modify_requests(self, match, conf_neighbors, vrf_name): + requests = [] + for conf_neighbor in conf_neighbors: + conf_neighbor_val = conf_neighbor.get('neighbor', None) + if conf_neighbor_val: + requests.extend(self.get_single_neighbors_af_modify_request(match, vrf_name, conf_neighbor_val, conf_neighbor)) + return requests + + def get_modify_requests(self, conf, match, vrf_name): + requests = [] + conf_neighbors = conf.get('neighbors', []) + mat_neighbors = [] + if match and match.get('neighbors', None): + mat_neighbors = match.get('neighbors') + + if conf_neighbors: + for conf_neighbor in conf_neighbors: + conf_neighbor_val = conf_neighbor.get('neighbor', None) + if conf_neighbor_val is None: + continue + + mat_neighbor = next((e_neighbor for e_neighbor in mat_neighbors if e_neighbor['neighbor'] == conf_neighbor_val), None) + if mat_neighbor is None: + continue + + conf_nei_addr_fams = conf_neighbor.get('address_family', None) + mat_nei_addr_fams = mat_neighbor.get('address_family', None) + if conf_nei_addr_fams is None or mat_nei_addr_fams is None: + continue + + for conf_nei_addr_fam in conf_nei_addr_fams: + afi = conf_nei_addr_fam.get('afi', None) + safi = conf_nei_addr_fam.get('safi', None) + if afi is None or safi is None: + continue + + mat_nei_addr_fam = next((addr_fam for addr_fam in mat_nei_addr_fams if (addr_fam['afi'] == afi and addr_fam['safi'] == safi)), None) + if mat_nei_addr_fam is None: + continue + + conf_route_map = conf_nei_addr_fam.get('route_map', None) + mat_route_map = mat_nei_addr_fam.get('route_map', None) + if conf_route_map is None or mat_route_map is None: + continue + + del_routes = [] + for route in conf_route_map: + exist_route = next((e_route for e_route in mat_route_map if e_route['direction'] == route['direction']), None) + if exist_route: + del_routes.append(exist_route) + if del_routes: + requests.extend(self.get_delete_neighbor_af_routemaps_requests(vrf_name, conf_neighbor_val, afi, safi, del_routes)) + + requests.extend(self.get_all_neighbors_af_modify_requests(match, conf_neighbors, vrf_name)) + return requests + + def get_modify_bgp_neighbors_af_requests(self, commands, have): + requests = [] + if not commands: + return requests + + # Create URL and payload + for conf in commands: + vrf_name = conf['vrf_name'] + as_val = conf['bgp_as'] + + match = next((cfg for cfg in have if (cfg['vrf_name'] == vrf_name and (cfg['bgp_as'] == as_val))), None) + modify_reqs = self.get_modify_requests(conf, match, vrf_name) + if modify_reqs: + requests.extend(modify_reqs) + + return requests + + def append_delete_request(self, requests, cur_var, mat_var, key, url, path): + ret_value = False + request = None + if cur_var is not None and mat_var.get(key, None): + requests.append({'path': url + path, 'method': DELETE}) + ret_value = True + return ret_value + + def delete_ip_afi_requests(self, conf_ip_afi, mat_ip_afi, conf_afi_safi_val, url): + requests = [] + default_policy_name = conf_ip_afi.get('default_policy_name', None) + send_default_route = conf_ip_afi.get('send_default_route', None) + if default_policy_name: + self.append_delete_request(requests, default_policy_name, mat_ip_afi, 'default_policy_name', url, self.def_policy_name_path % (conf_afi_safi_val)) + if send_default_route: + self.append_delete_request(requests, send_default_route, mat_ip_afi, 'send_default_route', url, self.send_def_route_path % (conf_afi_safi_val)) + + return requests + + def delete_prefix_limit_requests(self, conf_prefix_limit, mat_prefix_limit, conf_afi_safi_val, url): + requests = [] + max_prefixes = conf_prefix_limit.get('max_prefixes', None) + prevent_teardown = conf_prefix_limit.get('prevent_teardown', None) + restart_timer = conf_prefix_limit.get('restart_timer', None) + warning_threshold = conf_prefix_limit.get('warning_threshold', None) + if max_prefixes: + self.append_delete_request(requests, max_prefixes, mat_prefix_limit, 'max_prefixes', url, self.max_prefixes_path % (conf_afi_safi_val)) + if prevent_teardown: + self.append_delete_request(requests, prevent_teardown, mat_prefix_limit, 'prevent_teardown', url, self.prv_teardown_path % (conf_afi_safi_val)) + if restart_timer: + self.append_delete_request(requests, restart_timer, mat_prefix_limit, 'restart_timer', url, self.restart_timer_path % (conf_afi_safi_val)) + if warning_threshold: + self.append_delete_request(requests, warning_threshold, mat_prefix_limit, 'warning_threshold', url, self.wrn_threshold_path % (conf_afi_safi_val)) + + return requests + + def process_delete_specific_params(self, vrf_name, conf_neighbor_val, conf_nei_addr_fam, conf_afi, conf_safi, matched_nei_addr_fams, url): + requests = [] + conf_afi_safi_val = ("%s-%s" % (conf_afi, conf_safi)) + + mat_nei_addr_fam = None + if matched_nei_addr_fams: + mat_nei_addr_fam = next((e_af for e_af in matched_nei_addr_fams if (e_af['afi'] == conf_afi and e_af['safi'] == conf_safi)), None) + + if mat_nei_addr_fam: + conf_alllowas_in = conf_nei_addr_fam.get('allowas_in', None) + conf_activate = conf_nei_addr_fam.get('activate', None) + conf_route_map = conf_nei_addr_fam.get('route_map', None) + conf_route_reflector_client = conf_nei_addr_fam.get('route_reflector_client', None) + conf_route_server_client = conf_nei_addr_fam.get('route_server_client', None) + conf_prefix_list_in = conf_nei_addr_fam.get('prefix_list_in', None) + conf_prefix_list_out = conf_nei_addr_fam.get('prefix_list_out', None) + conf_ip_afi = conf_nei_addr_fam.get('ip_afi', None) + conf_prefix_limit = conf_nei_addr_fam.get('prefix_limit', None) + + var_list = [conf_alllowas_in, conf_activate, conf_route_map, conf_route_reflector_client, conf_route_server_client, + conf_prefix_list_in, conf_prefix_list_out, conf_ip_afi, conf_prefix_limit] + if len(list(filter(lambda var: (var is None), var_list))) == len(var_list): + requests.append({'path': url, 'method': DELETE}) + else: + mat_route_map = mat_nei_addr_fam.get('route_map', None) + if conf_route_map and mat_route_map: + del_routes = [] + for route in conf_route_map: + if any(e_route for e_route in mat_route_map if route['direction'] == e_route['direction']): + del_routes.append(route) + if del_routes: + requests.extend(self.get_delete_neighbor_af_routemaps_requests(vrf_name, conf_neighbor_val, conf_afi, conf_safi, del_routes)) + + self.append_delete_request(requests, conf_activate, mat_nei_addr_fam, 'activate', url, self.activate_path) + self.append_delete_request(requests, conf_route_reflector_client, mat_nei_addr_fam, 'route_reflector_client', url, self.ref_client_path) + self.append_delete_request(requests, conf_route_server_client, mat_nei_addr_fam, 'route_server_client', url, self.serv_client_path) + self.append_delete_request(requests, conf_prefix_list_in, mat_nei_addr_fam, 'prefix_list_in', url, self.prefix_list_in_path) + self.append_delete_request(requests, conf_prefix_list_out, mat_nei_addr_fam, 'prefix_list_out', url, self.prefix_list_out_path) + + mat_alllowas_in = mat_nei_addr_fam.get('allowas_in', None) + if conf_alllowas_in is not None and mat_alllowas_in: + origin = conf_alllowas_in.get('origin', None) + if origin is not None: + if self.append_delete_request(requests, origin, mat_alllowas_in, 'origin', url, self.allowas_origin_path): + self.append_delete_request(requests, True, {'enabled': True}, 'enabled', url, self.allowas_enabled_path) + else: + value = conf_alllowas_in.get('value', None) + if value is not None: + if self.append_delete_request(requests, value, mat_alllowas_in, 'value', url, self.allowas_value_path): + self.append_delete_request(requests, True, {'enabled': True}, 'enabled', url, self.allowas_enabled_path) + + mat_ip_afi = mat_nei_addr_fam.get('ip_afi', None) + mat_prefix_limit = mat_nei_addr_fam.get('prefix_limit', None) + if conf_ip_afi and mat_ip_afi: + requests.extend(self.delete_ip_afi_requests(conf_ip_afi, mat_ip_afi, conf_afi_safi_val, url)) + if conf_prefix_limit and mat_prefix_limit: + requests.extend(self.delete_prefix_limit_requests(conf_prefix_limit, mat_prefix_limit, conf_afi_safi_val, url)) + + return requests + + def process_neighbor_delete_address_families(self, vrf_name, conf_nei_addr_fams, matched_nei_addr_fams, neighbor_val, is_delete_all): + requests = [] + + for conf_nei_addr_fam in conf_nei_addr_fams: + conf_afi = conf_nei_addr_fam.get('afi', None) + conf_safi = conf_nei_addr_fam.get('safi', None) + if not conf_afi or not conf_safi: + continue + afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper() + url = '%s=%s/%s/%s=%s/' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.neighbor_path, neighbor_val) + url += '%s=openconfig-bgp-types:%s' % (self.afi_safi_path, afi_safi) + if is_delete_all: + requests.append({'path': url, 'method': DELETE}) + else: + requests.extend(self.process_delete_specific_params(vrf_name, neighbor_val, conf_nei_addr_fam, conf_afi, conf_safi, matched_nei_addr_fams, url)) + + return requests + + def get_delete_single_bgp_neighbors_af_request(self, conf, is_delete_all, match=None): + requests = [] + vrf_name = conf['vrf_name'] + conf_neighbors = conf.get('neighbors', []) + + if match and not conf_neighbors: + conf_neighbors = match.get('neighbors', []) + if conf_neighbors: + conf_neighbors = [{'neighbor': nei['neighbor']} for nei in conf_neighbors] + + if not conf_neighbors: + return requests + mat_neighbors = None + if match: + mat_neighbors = match.get('neighbors', []) + + for conf_neighbor in conf_neighbors: + conf_neighbor_val = conf_neighbor.get('neighbor', None) + if not conf_neighbor_val: + continue + + mat_neighbor = None + if mat_neighbors: + mat_neighbor = next((e_nei for e_nei in mat_neighbors if e_nei['neighbor'] == conf_neighbor_val), None) + + conf_nei_addr_fams = conf_neighbor.get('address_family', None) + if mat_neighbor and not conf_nei_addr_fams: + conf_nei_addr_fams = mat_neighbor.get('address_family', None) + if conf_nei_addr_fams: + conf_nei_addr_fams = [{'afi': af['afi'], 'safi': af['safi']} for af in conf_nei_addr_fams] + + if not conf_nei_addr_fams: + continue + + mat_nei_addr_fams = None + if mat_neighbor: + mat_nei_addr_fams = mat_neighbor.get('address_family', None) + + requests.extend(self.process_neighbor_delete_address_families(vrf_name, conf_nei_addr_fams, mat_nei_addr_fams, conf_neighbor_val, is_delete_all)) + + return requests + + def get_delete_bgp_neighbors_af_requests(self, commands, have, is_delete_all): + requests = [] + for cmd in commands: + vrf_name = cmd['vrf_name'] + as_val = cmd['bgp_as'] + match = None + if not is_delete_all: + match = next((have_cfg for have_cfg in have if have_cfg['vrf_name'] == vrf_name and have_cfg['bgp_as'] == as_val), None) + requests.extend(self.get_delete_single_bgp_neighbors_af_request(cmd, is_delete_all, match)) + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py new file mode 100644 index 00000000..acf985eb --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py @@ -0,0 +1,354 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_interfaces class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +try: + from urllib import quote +except ImportError: + from urllib.parse import quote + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import ( + Facts, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.interfaces_util import ( + build_interfaces_create_request, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, + update_states, + normalize_interface_name +) +from ansible.module_utils._text import to_native +from ansible.module_utils.connection import ConnectionError +import traceback + +LIB_IMP_ERR = None +ERR_MSG = None +try: + import requests + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + +PATCH = 'patch' +DELETE = 'delete' + + +class Interfaces(ConfigBase): + """ + The sonic_interfaces class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'interfaces', + ] + + params = ('description', 'mtu', 'enabled') + delete_flag = False + + def __init__(self, module): + super(Interfaces, self).__init__(module) + + def get_interfaces_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + interfaces_facts = facts['ansible_network_resources'].get('interfaces') + if not interfaces_facts: + return [] + + return interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + + existing_interfaces_facts = self.get_interfaces_facts() + commands, requests = self.set_config(existing_interfaces_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_interfaces_facts = self.get_interfaces_facts() + + result['before'] = existing_interfaces_facts + if result['changed']: + result['after'] = changed_interfaces_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + normalize_interface_name(want, self._module) + have = existing_interfaces_facts + + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + # diff method works on dict, so creating temp dict + diff = get_diff(want, have) + # removing the dict in case diff found + + if state == 'overridden': + have = [each_intf for each_intf in have if each_intf['name'].startswith('Ethernet')] + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + + return commands, requests + + def _state_replaced(self, want, have, diff): + """ The command generator when state is replaced + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :param interface_type: interface type + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = self.filter_comands_to_change(diff, have) + requests = self.get_delete_interface_requests(commands, have) + requests.extend(self.get_modify_interface_requests(commands, have)) + if commands and len(requests) > 0: + commands = update_states(commands, "replaced") + else: + commands = [] + + return commands, requests + + def _state_overridden(self, want, have, diff): + """ The command generator when state is overridden + + :param want: the desired configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + commands_del = self.filter_comands_to_change(want, have) + requests = self.get_delete_interface_requests(commands_del, have) + del_req_count = len(requests) + if commands_del and del_req_count > 0: + commands_del = update_states(commands_del, "deleted") + commands.extend(commands_del) + + commands_over = diff + requests.extend(self.get_modify_interface_requests(commands_over, have)) + if commands_over and len(requests) > del_req_count: + commands_over = update_states(commands_over, "overridden") + commands.extend(commands_over) + + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_interface_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :param interface_type: interface type + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the interfaces + if not want: + commands = have + else: + commands = want + + requests = self.get_delete_interface_requests(commands, have) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def filter_comands_to_delete(self, configs, have): + commands = [] + + for conf in configs: + if self.is_this_delete_required(conf, have): + temp_conf = dict() + temp_conf['name'] = conf['name'] + temp_conf['description'] = '' + temp_conf['mtu'] = 9100 + temp_conf['enabled'] = True + commands.append(temp_conf) + return commands + + def filter_comands_to_change(self, configs, have): + commands = [] + if configs: + for conf in configs: + if self.is_this_change_required(conf, have): + commands.append(conf) + return commands + + def get_modify_interface_requests(self, configs, have): + self.delete_flag = False + commands = self.filter_comands_to_change(configs, have) + + return self.get_interface_requests(commands, have) + + def get_delete_interface_requests(self, configs, have): + self.delete_flag = True + commands = self.filter_comands_to_delete(configs, have) + + return self.get_interface_requests(commands, have) + + def get_interface_requests(self, configs, have): + requests = [] + if not configs: + return requests + + # Create URL and payload + for conf in configs: + name = conf["name"] + if self.delete_flag and name.startswith('Loopback'): + method = DELETE + url = 'data/openconfig-interfaces:interfaces/interface=%s' % quote(name, safe='') + request = {"path": url, "method": method} + else: + # Create Loopback in case not availble in have + if name.startswith('Loopback'): + have_conf = next((cfg for cfg in have if cfg['name'] == name), None) + if not have_conf: + loopback_create_request = build_interfaces_create_request(name) + requests.append(loopback_create_request) + method = PATCH + url = 'data/openconfig-interfaces:interfaces/interface=%s/config' % quote(name, safe='') + payload = self.build_create_payload(conf) + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def is_this_delete_required(self, conf, have): + if conf['name'] == "eth0": + return False + intf = next((e_intf for e_intf in have if conf['name'] == e_intf['name']), None) + if intf: + if (intf['name'].startswith('Loopback') or not ((intf.get('description') is None or intf.get('description') == '') and + (intf.get('enabled') is None or intf.get('enabled') is True) and (intf.get('mtu') is None or intf.get('mtu') == 9100))): + return True + return False + + def is_this_change_required(self, conf, have): + if conf['name'] == "eth0": + return False + ret_flag = False + intf = next((e_intf for e_intf in have if conf['name'] == e_intf['name']), None) + if intf: + # Check all parameter if any one is differen from existing + for param in self.params: + if conf.get(param) is not None and conf.get(param) != intf.get(param): + ret_flag = True + break + # if given interface is not present + else: + ret_flag = True + + return ret_flag + + def build_create_payload(self, conf): + temp_conf = dict() + temp_conf['name'] = conf['name'] + + if not temp_conf['name'].startswith('Loopback'): + if conf.get('enabled') is not None: + if conf.get('enabled'): + temp_conf['enabled'] = True + else: + temp_conf['enabled'] = False + if conf.get('description') is not None: + temp_conf['description'] = conf['description'] + if conf.get('mtu') is not None: + temp_conf['mtu'] = conf['mtu'] + + payload = {'openconfig-interfaces:config': temp_conf} + return payload diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py new file mode 100644 index 00000000..fccba770 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py @@ -0,0 +1,414 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_l2_interfaces class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, + update_states, + normalize_interface_name +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import ( + Facts +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils._text import to_native +from ansible.module_utils.connection import ConnectionError +import traceback + +LIB_IMP_ERR = None +ERR_MSG = None +try: + import requests + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + +PATCH = 'patch' +intf_key = 'openconfig-if-ethernet:ethernet' +port_chnl_key = 'openconfig-if-aggregate:aggregation' + +TEST_KEYS = [ + {'allowed_vlans': {'vlan': ''}}, +] + + +class L2_interfaces(ConfigBase): + """ + The sonic_l2_interfaces class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'l2_interfaces', + ] + + def __init__(self, module): + super(L2_interfaces, self).__init__(module) + + def get_l2_interfaces_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + l2_interfaces_facts = facts['ansible_network_resources'].get('l2_interfaces') + if not l2_interfaces_facts: + return [] + return l2_interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + + existing_l2_interfaces_facts = self.get_l2_interfaces_facts() + commands, requests = self.set_config(existing_l2_interfaces_facts) + + if commands: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_l2_interfaces_facts = self.get_l2_interfaces_facts() + + result['before'] = existing_l2_interfaces_facts + if result['changed']: + result['after'] = changed_l2_interfaces_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_l2_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + normalize_interface_name(want, self._module) + have = existing_l2_interfaces_facts + + for intf in have: + if not intf.get('access'): + intf.update({'access': None}) + if not intf.get('trunk'): + intf.update({'trunk': None}) + + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + + return commands, requests + + def _state_replaced(self, want, have, diff): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + + requests = [] + commands = diff + + if commands: + requests_del = self.get_delete_all_switchport_requests(commands) + if requests_del: + requests.extend(requests_del) + + requests_rep = self.get_create_l2_interface_request(commands) + if len(requests_del) or len(requests_rep): + requests.extend(requests_rep) + commands = update_states(commands, "replaced") + else: + commands = [] + + return commands, requests + + def _state_overridden(self, want, have, diff): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + + commands_del = get_diff(have, want, TEST_KEYS) + requests_del = self.get_delete_all_switchport_requests(commands_del) + if len(requests_del): + requests.extend(requests_del) + commands_del = update_states(commands_del, "deleted") + commands.extend(commands_del) + + commands_over = diff + requests_over = self.get_create_l2_interface_request(commands_over) + if requests_over: + requests.extend(requests_over) + commands_over = update_states(commands_over, "overridden") + commands.extend(commands_over) + + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration at position-0 + Requests necessary to merge to the current configuration + at position-1 + """ + commands = diff + requests = self.get_create_l2_interface_request(commands) + if commands and len(requests): + commands = update_states(commands, "merged") + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + + # if want is none, then delete all the vlan links + if not want or len(have) == 0: + commands = have + requests = self.get_delete_all_switchport_requests(commands) + else: + commands = want + requests = self.get_delete_specifig_switchport_requests(want, have) + if len(requests) == 0: + commands = [] + + if commands: + commands = update_states(commands, "deleted") + + return commands, requests + + def get_trunk_delete_switchport_request(self, config, match_config): + method = "DELETE" + name = config['name'] + requests = [] + match_trunk = match_config.get('trunk') + if match_trunk: + conf_allowed_vlans = config['trunk'].get('allowed_vlans', []) + if conf_allowed_vlans: + for each_allowed_vlan in conf_allowed_vlans: + if each_allowed_vlan in match_trunk.get('allowed_vlans'): + vlan_id = each_allowed_vlan['vlan'] + key = intf_key + if name.startswith('PortChannel'): + key = port_chnl_key + url = "data/openconfig-interfaces:interfaces/interface={0}/{1}/".format(name, key) + url += "openconfig-vlan:switched-vlan/config/trunk-vlans={0}".format(vlan_id) + request = {"path": url, "method": method} + requests.append(request) + return requests + + def get_access_delete_switchport_request(self, config, match_config): + method = "DELETE" + request = None + name = config['name'] + match_access = match_config.get('access') + if match_access and match_access.get('vlan') == config['access'].get('vlan'): + key = intf_key + if name.startswith('PortChannel'): + key = port_chnl_key + url = "data/openconfig-interfaces:interfaces/interface={}/{}/openconfig-vlan:switched-vlan/config/access-vlan" + request = {"path": url.format(name, key), "method": method} + return request + + def get_delete_all_switchport_requests(self, configs): + requests = [] + if not configs: + return requests + # Create URL and payload + url = "data/openconfig-interfaces:interfaces/interface={}/{}/openconfig-vlan:switched-vlan/config" + method = "DELETE" + for intf in configs: + name = intf.get("name") + key = intf_key + if name.startswith('PortChannel'): + key = port_chnl_key + request = {"path": url.format(name, key), + "method": method, + } + requests.append(request) + + return requests + + def get_delete_specifig_switchport_requests(self, configs, have): + requests = [] + if not configs: + return requests + + for conf in configs: + name = conf['name'] + + matched = next((cnf for cnf in have if cnf['name'] == name), None) + if matched: + keys = conf.keys() + + # if both access and trunk not mention in delete + if not ('access' in keys) and not ('trunk' in keys): + requests.extend(self.get_delete_all_switchport_requests([conf])) + else: + # if access or trnuk is mentioned with value + if conf.get('access') or conf.get('trunk'): + # if access is mentioned with value + if conf.get('access'): + vlan = conf.get('access').get('vlan') + if vlan: + request = self.get_access_delete_switchport_request(conf, matched) + if request: + requests.append(request) + else: + if matched.get('access') and matched.get('access').get('vlan'): + conf['access']['vlan'] = matched.get('access').get('vlan') + request = self.get_access_delete_switchport_request(conf, matched) + if request: + requests.append(request) + + # if trunk is mentioned with value + if conf.get('trunk'): + allowed_vlans = conf['trunk'].get('allowed_vlans') + if allowed_vlans: + requests.extend(self.get_trunk_delete_switchport_request(conf, matched)) + # allowed vlans mentinoed without value + else: + if matched.get('trunk') and matched.get('trunk').get('allowed_vlans'): + conf['trunk']['allowed_vlans'] = matched.get('trunk') and matched.get('trunk').get('allowed_vlans').copy() + requests.extend(self.get_trunk_delete_switchport_request(conf, matched)) + # check for access or trunk is mentioned without value + else: + # access mentioned wothout value + if ('access' in keys) and conf.get('access', None) is None: + # get the existing values and delete it + if matched.get('access'): + conf['access'] = matched.get('access').copy() + request = self.get_access_delete_switchport_request(conf, matched) + if request: + requests.append(request) + # trunk mentioned wothout value + if ('trunk' in keys) and conf.get('trunk', None) is None: + # get the existing values and delete it + if matched.get('trunk'): + conf['trunk'] = matched.get('trunk').copy() + requests.extend(self.get_trunk_delete_switchport_request(conf, matched)) + + return requests + + def get_create_l2_interface_request(self, configs): + requests = [] + if not configs: + return requests + # Create URL and payload + url = "data/openconfig-interfaces:interfaces/interface={}/{}/openconfig-vlan:switched-vlan/config" + method = "PATCH" + for conf in configs: + name = conf.get('name') + if name == "eth0": + continue + key = intf_key + if name.startswith('PortChannel'): + key = port_chnl_key + payload = self.build_create_payload(conf) + request = {"path": url.format(name, key), + "method": method, + "data": payload + } + requests.append(request) + return requests + + def build_create_payload(self, conf): + payload_url = '{"openconfig-vlan:config":{ ' + access_payload = '' + trunk_payload = '' + if conf.get('access'): + access_vlan_id = conf['access']['vlan'] + access_payload = '"access-vlan": {0}'.format(access_vlan_id) + if conf.get('trunk'): + trunk_payload = '"trunk-vlans": [' + cnt = 0 + for each_allowed_vlan in conf['trunk']['allowed_vlans']: + if cnt > 0: + trunk_payload += ',' + trunk_payload += str(each_allowed_vlan['vlan']) + cnt = cnt + 1 + trunk_payload += ']' + + if access_payload != '': + payload_url += access_payload + if trunk_payload != '': + if access_payload != '': + payload_url += ',' + payload_url += trunk_payload + + payload_url += '}}' + + ret_payload = json.loads(payload_url) + return ret_payload diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py new file mode 100644 index 00000000..d1b73525 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py @@ -0,0 +1,515 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_l3_interfaces class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, + update_states, + normalize_interface_name, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils._text import to_native +from ansible.module_utils.connection import ConnectionError + +TEST_KEYS = [ + {"addresses": {"address": "", "secondary": ""}} +] + +DELETE = "DELETE" +PATCH = "PATCH" + + +class L3_interfaces(ConfigBase): + """ + The sonic_l3_interfaces class + """ + + gather_subset = [ + '!all', + '!min' + ] + + gather_network_resources = [ + 'l3_interfaces', + ] + + def __init__(self, module): + super(L3_interfaces, self).__init__(module) + + def get_l3_interfaces_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + l3_interfaces_facts = facts['ansible_network_resources'].get('l3_interfaces') + if not l3_interfaces_facts: + return [] + return l3_interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + + existing_l3_interfaces_facts = self.get_l3_interfaces_facts() + commands, requests = self.set_config(existing_l3_interfaces_facts) + if commands: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_l3_interfaces_facts = self.get_l3_interfaces_facts() + + result['before'] = existing_l3_interfaces_facts + if result['changed']: + result['after'] = changed_l3_interfaces_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_l3_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + normalize_interface_name(want, self._module) + have = existing_l3_interfaces_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + diff = get_diff(want, have, TEST_KEYS) + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + ret_commands = commands + return ret_commands, requests + + def _state_replaced(self, want, have, diff): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + ret_requests = list() + commands = list() + l3_interfaces_to_delete = get_diff(have, want, TEST_KEYS) + obj = self.get_object(l3_interfaces_to_delete, want) + diff = get_diff(obj, want, TEST_KEYS) + if diff: + delete_l3_interfaces_requests = self.get_delete_all_requests(want) + ret_requests.extend(delete_l3_interfaces_requests) + commands.extend(update_states(want, "deleted")) + l3_interfaces_to_create_requests = self.get_create_l3_interfaces_requests(want, have, want) + ret_requests.extend(l3_interfaces_to_create_requests) + commands.extend(update_states(want, "merged")) + return commands, ret_requests + + def _state_overridden(self, want, have, diff): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + ret_requests = list() + commands = list() + interfaces_to_delete = get_diff(have, want, TEST_KEYS) + if interfaces_to_delete: + delete_interfaces_requests = self.get_delete_l3_interfaces_requests(want, have) + ret_requests.extend(delete_interfaces_requests) + commands.extend(update_states(interfaces_to_delete, "deleted")) + + if diff: + interfaces_to_create_requests = self.get_create_l3_interfaces_requests(diff, have, want) + ret_requests.extend(interfaces_to_create_requests) + commands.extend(update_states(diff, "merged")) + + return commands, ret_requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + self.validate_primary_ips(want) + commands = diff + requests = self.get_create_l3_interfaces_requests(commands, have, want) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = list() + if not want: + commands = have + requests = self.get_delete_all_completely_requests(commands) + else: + commands = want + requests = self.get_delete_l3_interfaces_requests(commands, have) + if len(requests) == 0: + commands = [] + if commands: + commands = update_states(commands, "deleted") + return commands, requests + + def get_object(self, have, want): + objects = list() + names = [i.get('name', None) for i in want] + for obj in have: + if 'name' in obj and obj['name'] in names: + objects.append(obj.copy()) + return objects + + def get_address(self, ip_str, have_obj): + to_return = list() + for i in have_obj: + if i.get(ip_str) and i[ip_str].get('addresses'): + for ip in i[ip_str]['addresses']: + to_return.append(ip['address']) + return to_return + + def get_delete_l3_interfaces_requests(self, want, have): + requests = [] + ipv4_addrs_url_all = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv4/addresses' + ipv6_addrs_url_all = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/addresses' + ipv4_anycast_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv4' + ipv4_anycast_url += '/openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway={anycast_ip}' + ipv4_addr_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv4/addresses/address={address}' + ipv6_addr_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/addresses/address={address}' + ipv6_enabled_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/config/enabled' + + for each_l3 in want: + l3 = each_l3.copy() + name = l3.pop('name') + sub_intf = self.get_sub_interface_name(name) + have_obj = next((e_cfg for e_cfg in have if e_cfg['name'] == name), None) + if not have_obj: + continue + have_ipv4_addrs = list() + have_ipv4_anycast_addrs = list() + have_ipv6_addrs = list() + have_ipv6_enabled = None + + if have_obj.get('ipv4'): + if 'addresses' in have_obj['ipv4']: + have_ipv4_addrs = have_obj['ipv4']['addresses'] + if 'anycast_addresses' in have_obj['ipv4']: + have_ipv4_anycast_addrs = have_obj['ipv4']['anycast_addresses'] + + have_ipv6_addrs = self.get_address('ipv6', [have_obj]) + if have_obj.get('ipv6') and 'enabled' in have_obj['ipv6']: + have_ipv6_enabled = have_obj['ipv6']['enabled'] + + ipv4 = l3.get('ipv4', None) + ipv6 = l3.get('ipv6', None) + + ipv4_addrs = None + ipv6_addrs = None + + is_del_ipv4 = None + is_del_ipv6 = None + if name and ipv4 is None and ipv6 is None: + is_del_ipv4 = True + is_del_ipv6 = True + elif ipv4 and ipv4.get('addresses') and not ipv4.get('anycast_addresses'): + is_del_ipv4 = True + elif ipv6 and not ipv6.get('addresses') and ipv6.get('enabled') is None: + is_del_ipv6 = True + + if is_del_ipv4: + if have_ipv4_addrs and len(have_ipv4_addrs) != 0: + ipv4_addrs_delete_request = {"path": ipv4_addrs_url_all.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE} + requests.append(ipv4_addrs_delete_request) + if have_ipv4_anycast_addrs and len(have_ipv4_anycast_addrs) != 0: + for ip in have_ipv4_anycast_addrs: + ip = ip.replace('/', '%2f') + anycast_delete_request = {"path": ipv4_anycast_url.format(intf_name=name, sub_intf_name=sub_intf, anycast_ip=ip), "method": DELETE} + requests.append(anycast_delete_request) + else: + ipv4_addrs = [] + ipv4_anycast_addrs = [] + if l3.get('ipv4'): + if l3['ipv4'].get('addresses'): + ipv4_addrs = l3['ipv4']['addresses'] + if l3['ipv4'].get('anycast_addresses'): + ipv4_anycast_addrs = l3['ipv4']['anycast_addresses'] + + # Store the primary ip at end of the list. So primary ip will be deleted after the secondary ips + ipv4_del_reqs = [] + for ip in ipv4_addrs: + match_ip = next((addr for addr in have_ipv4_addrs if addr['address'] == ip['address']), None) + if match_ip: + addr = ip['address'].split('/')[0] + del_url = ipv4_addr_url.format(intf_name=name, sub_intf_name=sub_intf, address=addr) + if match_ip['secondary']: + del_url += '/config/secondary' + ipv4_del_reqs.insert(0, {"path": del_url, "method": DELETE}) + else: + ipv4_del_reqs.append({"path": del_url, "method": DELETE}) + if ipv4_del_reqs: + requests.extend(ipv4_del_reqs) + + for ip in ipv4_anycast_addrs: + if have_ipv4_addrs and ip in have_ipv4_addrs: + ip = ip.replace('/', '%2f') + anycast_delete_request = {"path": ipv4_anycast_url.format(intf_name=name, sub_intf_name=sub_intf, anycast_ip=ip), "method": DELETE} + requests.append(anycast_delete_request) + + if is_del_ipv6: + if have_ipv6_addrs and len(have_ipv6_addrs) != 0: + ipv6_addrs_delete_request = {"path": ipv6_addrs_url_all.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE} + requests.append(ipv6_addrs_delete_request) + + if have_ipv6_enabled: + ipv6_enabled_delete_request = {"path": ipv6_enabled_url.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE} + requests.append(ipv6_enabled_delete_request) + else: + ipv6_addrs = [] + ipv6_enabled = None + if l3.get('ipv6'): + if l3['ipv6'].get('addresses'): + ipv6_addrs = l3['ipv6']['addresses'] + if 'enabled' in l3['ipv6']: + ipv6_enabled = l3['ipv6']['enabled'] + + for ip in ipv6_addrs: + if have_ipv6_addrs and ip['address'] in have_ipv6_addrs: + addr = ip['address'].split('/')[0] + request = {"path": ipv6_addr_url.format(intf_name=name, sub_intf_name=sub_intf, address=addr), "method": DELETE} + requests.append(request) + + if have_ipv6_enabled and ipv6_enabled is not None: + request = {"path": ipv6_enabled_url.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE} + requests.append(request) + return requests + + def get_delete_all_completely_requests(self, configs): + delete_requests = list() + for l3 in configs: + if l3['ipv4'] or l3['ipv6']: + delete_requests.append(l3) + return self.get_delete_all_requests(delete_requests) + + def get_delete_all_requests(self, configs): + requests = [] + ipv4_addrs_url_all = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv4/addresses' + ipv4_anycast_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv4' + ipv4_anycast_url += '/openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway={anycast_ip}' + ipv6_addrs_url_all = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/addresses' + ipv6_enabled_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/config/enabled' + for l3 in configs: + name = l3.get('name') + ipv4_addrs = [] + ipv4_anycast = [] + if l3.get('ipv4'): + if l3['ipv4'].get('addresses'): + ipv4_addrs = l3['ipv4']['addresses'] + if l3['ipv4'].get('anycast_addresses', None): + ipv4_anycast = l3['ipv4']['anycast_addresses'] + + ipv6_addrs = [] + ipv6_enabled = None + if l3.get('ipv6'): + if l3['ipv6'].get('addresses'): + ipv6_addrs = l3['ipv6']['addresses'] + if 'enabled' in l3['ipv6']: + ipv6_enabled = l3['ipv6']['enabled'] + + sub_intf = self.get_sub_interface_name(name) + + if ipv4_addrs: + ipv4_addrs_delete_request = {"path": ipv4_addrs_url_all.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE} + requests.append(ipv4_addrs_delete_request) + if ipv4_anycast: + for ip in ipv4_anycast: + ip = ip.replace('/', '%2f') + anycast_delete_request = {"path": ipv4_anycast_url.format(intf_name=name, sub_intf_name=sub_intf, anycast_ip=ip), "method": DELETE} + requests.append(anycast_delete_request) + if ipv6_addrs: + ipv6_addrs_delete_request = {"path": ipv6_addrs_url_all.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE} + requests.append(ipv6_addrs_delete_request) + if ipv6_enabled: + ipv6_enabled_delete_request = {"path": ipv6_enabled_url.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE} + requests.append(ipv6_enabled_delete_request) + return requests + + def get_create_l3_interfaces_requests(self, configs, have, want): + requests = [] + if not configs: + return requests + + ipv4_addrs_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv4/addresses' + ipv4_anycast_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv4/' + ipv4_anycast_url += 'openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway' + ipv6_addrs_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/addresses' + ipv6_enabled_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/config' + + for l3 in configs: + l3_interface_name = l3.get('name') + if l3_interface_name == "eth0": + continue + + sub_intf = self.get_sub_interface_name(l3_interface_name) + + ipv4_addrs = [] + ipv4_anycast = [] + if l3.get('ipv4'): + if l3['ipv4'].get('addresses'): + ipv4_addrs = l3['ipv4']['addresses'] + if l3['ipv4'].get('anycast_addresses'): + ipv4_anycast = l3['ipv4']['anycast_addresses'] + + ipv6_addrs = [] + ipv6_enabled = None + if l3.get('ipv6'): + if l3['ipv6'].get('addresses'): + ipv6_addrs = l3['ipv6']['addresses'] + if 'enabled' in l3['ipv6']: + ipv6_enabled = l3['ipv6']['enabled'] + + if ipv4_addrs: + ipv4_addrs_pri_payload = [] + ipv4_addrs_sec_payload = [] + for item in ipv4_addrs: + ipv4_addr_mask = item['address'].split('/') + ipv4 = ipv4_addr_mask[0] + ipv4_mask = ipv4_addr_mask[1] + ipv4_secondary = item['secondary'] + if ipv4_secondary: + ipv4_addrs_sec_payload.append(self.build_create_addr_payload(ipv4, ipv4_mask, ipv4_secondary)) + else: + ipv4_addrs_pri_payload.append(self.build_create_addr_payload(ipv4, ipv4_mask, ipv4_secondary)) + if ipv4_addrs_pri_payload: + payload = self.build_create_payload(ipv4_addrs_pri_payload) + ipv4_addrs_req = {"path": ipv4_addrs_url.format(intf_name=l3_interface_name, sub_intf_name=sub_intf), "method": PATCH, "data": payload} + requests.append(ipv4_addrs_req) + if ipv4_addrs_sec_payload: + payload = self.build_create_payload(ipv4_addrs_sec_payload) + ipv4_addrs_req = {"path": ipv4_addrs_url.format(intf_name=l3_interface_name, sub_intf_name=sub_intf), "method": PATCH, "data": payload} + requests.append(ipv4_addrs_req) + + if ipv4_anycast: + anycast_payload = {'openconfig-interfaces-ext:static-anycast-gateway': ipv4_anycast} + anycast_url = ipv4_anycast_url.format(intf_name=l3_interface_name, sub_intf_name=sub_intf) + requests.append({'path': anycast_url, 'method': PATCH, 'data': anycast_payload}) + + if ipv6_addrs: + ipv6_addrs_payload = [] + for item in ipv6_addrs: + ipv6_addr_mask = item['address'].split('/') + ipv6 = ipv6_addr_mask[0] + ipv6_mask = ipv6_addr_mask[1] + ipv6_addrs_payload.append(self.build_create_addr_payload(ipv6, ipv6_mask)) + if ipv6_addrs_payload: + payload = self.build_create_payload(ipv6_addrs_payload) + ipv6_addrs_req = {"path": ipv6_addrs_url.format(intf_name=l3_interface_name, sub_intf_name=sub_intf), "method": PATCH, "data": payload} + requests.append(ipv6_addrs_req) + + if ipv6_enabled is not None: + payload = self.build_update_ipv6_enabled(ipv6_enabled) + ipv6_enabled_req = {"path": ipv6_enabled_url.format(intf_name=l3_interface_name, sub_intf_name=sub_intf), "method": PATCH, "data": payload} + requests.append(ipv6_enabled_req) + + return requests + + def validate_primary_ips(self, want): + error_intf = {} + for l3 in want: + l3_interface_name = l3.get('name') + + ipv4_addrs = [] + if l3.get('ipv4') and l3['ipv4'].get('addresses'): + ipv4_addrs = l3['ipv4']['addresses'] + + if ipv4_addrs: + ipv4_pri_addrs = [addr['address'] for addr in ipv4_addrs if not addr['secondary']] + if len(ipv4_pri_addrs) > 1: + error_intf[l3_interface_name] = ipv4_pri_addrs + + if error_intf: + err = "Multiple ipv4 primary ips found! " + str(error_intf) + self._module.fail_json(msg=str(err), code=300) + + def build_create_payload(self, addrs_payload): + payload = {'openconfig-if-ip:addresses': {'address': addrs_payload}} + return payload + + def build_create_addr_payload(self, ip, mask, secondary=None): + cfg = {'ip': ip, 'prefix-length': float(mask)} + if secondary: + cfg['secondary'] = secondary + addr_payload = {'ip': ip, 'openconfig-if-ip:config': cfg} + return addr_payload + + def get_sub_interface_name(self, name): + sub_intf = "subinterfaces/subinterface=0" + if name.startswith("Vlan"): + sub_intf = "openconfig-vlan:routed-vlan" + return sub_intf + + def build_update_ipv6_enabled(self, ipv6_enabled): + payload = {'config': {'enabled': ipv6_enabled}} + return payload diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py new file mode 100644 index 00000000..541de2c4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py @@ -0,0 +1,421 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_lag_interfaces class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +try: + from urllib import quote +except ImportError: + from urllib.parse import quote + +import json + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, + search_obj_in_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, + normalize_interface_name, + remove_empties_from_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils._text import to_native +from ansible.module_utils.connection import ConnectionError +import traceback + +LIB_IMP_ERR = None +ERR_MSG = None +try: + import jinja2 + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + + +PUT = 'put' +PATCH = 'patch' +DELETE = 'delete' +TEST_KEYS = [ + {'interfaces': {'member': ''}}, +] + + +class Lag_interfaces(ConfigBase): + """ + The sonic_lag_interfaces class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'lag_interfaces', + ] + + params = ('name', 'members') + + def __init__(self, module): + super(Lag_interfaces, self).__init__(module) + + def get_lag_interfaces_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + lag_interfaces_facts = facts['ansible_network_resources'].get('lag_interfaces') + if not lag_interfaces_facts: + return [] + return lag_interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + existing_lag_interfaces_facts = self.get_lag_interfaces_facts() + commands, requests = self.set_config(existing_lag_interfaces_facts) + if commands: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_lag_interfaces_facts = self.get_lag_interfaces_facts() + + result['before'] = existing_lag_interfaces_facts + if result['changed']: + result['after'] = changed_lag_interfaces_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_lag_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + normalize_interface_name(want, self._module) + have = existing_lag_interfaces_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + diff = get_diff(want, have, TEST_KEYS) + if diff: + diff_members, diff_portchannels = self.diff_list_for_member_creation(diff) + else: + diff_members = [] + diff_portchannels = [] + + state = self._module.params['state'] + if state in ('overridden', 'merged', 'replaced') and not want: + self._module.fail_json(msg='value of config parameter must not be empty for state {0}'.format(state)) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff_members, diff_portchannels) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff_members, diff_portchannels) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff_members, diff_portchannels) + + return commands, requests + + def _state_replaced(self, want, have, diff_members, diff_portchannels): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + requests = list() + commands = list() + delete_list = list() + delete_list = get_diff(have, want, TEST_KEYS) + delete_members, delete_portchannels = self.diff_list_for_member_creation(delete_list) + replaced_list = list() + + for i in want: + list_obj = search_obj_in_list(i['name'], delete_members, "name") + if list_obj: + replaced_list.append(list_obj) + requests = self.get_delete_lag_interfaces_requests(replaced_list) + if requests: + commands.extend(update_states(replaced_list, "replaced")) + replaced_commands, replaced_requests = self.template_for_lag_creation(have, diff_members, diff_portchannels, "replaced") + if replaced_requests: + commands.extend(replaced_commands) + requests.extend(replaced_requests) + + return commands, requests + + def _state_overridden(self, want, have, diff_members, diff_portchannels): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + requests = list() + commands = list() + delete_list = list() + delete_list = get_diff(have, want, TEST_KEYS) + delete_members, delete_portchannels = self.diff_list_for_member_creation(delete_list) + replaced_list = list() + for i in want: + list_obj = search_obj_in_list(i['name'], delete_members, "name") + if list_obj: + replaced_list.append(list_obj) + requests = self.get_delete_lag_interfaces_requests(replaced_list) + commands.extend(update_states(replaced_list, "overridden")) + delete_members = get_diff(delete_members, replaced_list, TEST_KEYS) + commands_overridden, requests_overridden = self.template_for_lag_deletion(have, delete_members, delete_portchannels, "overridden") + requests.extend(requests_overridden) + commands.extend(commands_overridden) + override_commands, override_requests = self.template_for_lag_creation(have, diff_members, diff_portchannels, "overridden") + commands.extend(override_commands) + requests.extend(override_requests) + return commands, requests + + def _state_merged(self, want, have, diff_members, diff_portchannels): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + return self.template_for_lag_creation(have, diff_members, diff_portchannels, "merged") + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = list() + requests = list() + portchannel_requests = list() + # if want is none, then delete all the lag interfaces and all portchannels + if not want: + requests = self.get_delete_all_lag_interfaces_requests() + portchannel_requests = self.get_delete_all_portchannel_requests() + requests.extend(portchannel_requests) + commands.extend(update_states(have, "Deleted")) + else: # delete specific lag interfaces and specific portchannels + commands = get_diff(want, diff, TEST_KEYS) + commands = remove_empties_from_list(commands) + want_members, want_portchannels = self.diff_list_for_member_creation(commands) + commands, requests = self.template_for_lag_deletion(have, want_members, want_portchannels, "deleted") + return commands, requests + + def diff_list_for_member_creation(self, diff): + diff_members = [x for x in diff if "members" in x.keys()] + diff_portchannels = [x for x in diff if ("name" in x.keys() and "members" not in x.keys())] + return diff_members, diff_portchannels + + def template_for_lag_creation(self, have, diff_members, diff_portchannels, state_name): + commands = list() + requests = list() + if diff_members: + commands_portchannels, requests = self.call_create_port_channel(diff_members, have) + if commands_portchannels: + po_list = [{'name': x['name']} for x in commands_portchannels if x['name']] + else: + po_list = [] + if po_list: + commands.extend(update_states(po_list, state_name)) + diff_members_remove_none = [x for x in diff_members if x["members"]] + if diff_members_remove_none: + request = self.create_lag_interfaces_requests(diff_members_remove_none) + if request: + requests.extend(request) + else: + requests = request + commands.extend(update_states(diff_members, state_name)) + if diff_portchannels: + portchannels, po_requests = self.call_create_port_channel(diff_portchannels, have) + requests.extend(po_requests) + commands.extend(update_states(portchannels, state_name)) + return commands, requests + + def template_for_lag_deletion(self, have, delete_members, delete_portchannels, state_name): + commands = list() + requests = list() + portchannel_requests = list() + if delete_members: + delete_members_remove_none = [x for x in delete_members if x["members"]] + requests = self.get_delete_lag_interfaces_requests(delete_members_remove_none) + delete_all_members = [x for x in delete_members if "members" in x.keys() and not x["members"]] + delete_all_list = list() + if delete_all_members: + for i in delete_all_members: + list_obj = search_obj_in_list(i['name'], have, "name") + if list_obj['members']: + delete_all_list.append(list_obj) + if delete_all_list: + deleteall_requests = self.get_delete_lag_interfaces_requests(delete_all_list) + else: + deleteall_requests = [] + if requests and deleteall_requests: + requests.extend(deleteall_requests) + elif deleteall_requests: + requests = deleteall_requests + if requests: + commands.extend(update_states(delete_members, state_name)) + if delete_portchannels: + portchannel_requests = self.get_delete_portchannel_requests(delete_portchannels) + commands.extend(update_states(delete_portchannels, state_name)) + if requests: + requests.extend(portchannel_requests) + else: + requests = portchannel_requests + return commands, requests + + def create_lag_interfaces_requests(self, commands): + requests = [] + for i in commands: + if i.get('members') and i['members'].get('interfaces'): + interfaces = i['members']['interfaces'] + else: + continue + for each in interfaces: + edit_payload = self.build_create_payload_member(i['name']) + template = 'data/openconfig-interfaces:interfaces/interface=%s/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id' + edit_path = template % quote(each['member'], safe='') + request = {'path': edit_path, 'method': PATCH, 'data': edit_payload} + requests.append(request) + return requests + + def build_create_payload_member(self, name): + payload_template = """{\n"openconfig-if-aggregate:aggregate-id": "{{name}}"\n}""" + temp = name.split("PortChannel", 1)[1] + input_data = {"name": temp} + env = jinja2.Environment(autoescape=False) + t = env.from_string(payload_template) + intended_payload = t.render(input_data) + ret_payload = json.loads(intended_payload) + return ret_payload + + def build_create_payload_portchannel(self, name, mode): + payload_template = """{\n"openconfig-interfaces:interfaces": {"interface": [{\n"name": "{{name}}",\n"config": {\n"name": "{{name}}"\n}""" + input_data = {"name": name} + if mode == "static": + payload_template += """,\n "openconfig-if-aggregation:aggregation": {\n"config": {\n"lag-type": "{{mode}}"\n}\n}\n""" + input_data["mode"] = mode.upper() + payload_template += """}\n]\n}\n}""" + env = jinja2.Environment(autoescape=False) + t = env.from_string(payload_template) + intended_payload = t.render(input_data) + ret_payload = json.loads(intended_payload) + return ret_payload + + def create_port_channel(self, cmd): + requests = [] + path = 'data/openconfig-interfaces:interfaces' + for i in cmd: + payload = self.build_create_payload_portchannel(i['name'], i.get('mode', None)) + request = {'path': path, 'method': PATCH, 'data': payload} + requests.append(request) + return requests + + def call_create_port_channel(self, commands, have): + commands_list = list() + for c in commands: + if not any(d['name'] == c['name'] for d in have): + commands_list.append(c) + requests = self.create_port_channel(commands_list) + return commands_list, requests + + def get_delete_all_lag_interfaces_requests(self): + requests = [] + delete_all_lag_url = 'data/sonic-portchannel:sonic-portchannel/PORTCHANNEL_MEMBER/PORTCHANNEL_MEMBER_LIST' + method = DELETE + delete_all_lag_request = {"path": delete_all_lag_url, "method": method} + requests.append(delete_all_lag_request) + return requests + + def get_delete_all_portchannel_requests(self): + requests = [] + delete_all_lag_url = 'data/sonic-portchannel:sonic-portchannel/PORTCHANNEL/PORTCHANNEL_LIST' + method = DELETE + delete_all_lag_request = {"path": delete_all_lag_url, "method": method} + requests.append(delete_all_lag_request) + return requests + + def get_delete_lag_interfaces_requests(self, commands): + requests = [] + # Create URL and payload + url = 'data/openconfig-interfaces:interfaces/interface={}/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id' + method = DELETE + for c in commands: + if c.get('members') and c['members'].get('interfaces'): + interfaces = c['members']['interfaces'] + else: + continue + + for each in interfaces: + ifname = each["member"] + request = {"path": url.format(ifname), "method": method} + requests.append(request) + + return requests + + def get_delete_portchannel_requests(self, commands): + requests = [] + # Create URL and payload + url = 'data/openconfig-interfaces:interfaces/interface={}' + method = DELETE + for c in commands: + name = c["name"] + request = {"path": url.format(name), "method": method} + requests.append(request) + + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py new file mode 100644 index 00000000..88215e8f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py @@ -0,0 +1,323 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_mclag class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, + get_normalize_interface_name, + normalize_interface_name +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +DELETE = 'delete' + +TEST_KEYS = [ + {'config': {'domain_id': ''}}, + {'vlans': {'vlan': ''}}, + {'portchannels': {'lag': ''}}, +] + + +class Mclag(ConfigBase): + """ + The sonic_mclag class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'mclag', + ] + + def __init__(self, module): + super(Mclag, self).__init__(module) + + def get_mclag_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + mclag_facts = facts['ansible_network_resources'].get('mclag') + if not mclag_facts: + return [] + return mclag_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + + existing_mclag_facts = self.get_mclag_facts() + commands, requests = self.set_config(existing_mclag_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + self.edit_config(requests) + result['changed'] = True + result['commands'] = commands + + changed_mclag_facts = self.get_mclag_facts() + + result['before'] = existing_mclag_facts + if result['changed']: + result['after'] = changed_mclag_facts + + result['warnings'] = warnings + return result + + def edit_config(self, requests): + try: + response = edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + def set_config(self, existing_mclag_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + if want: + peer_link = want.get("peer_link", None) + if peer_link: + want['peer_link'] = get_normalize_interface_name(want['peer_link'], self._module) + unique_ip = want.get('unique_ip', None) + if unique_ip: + vlans_list = unique_ip['vlans'] + if vlans_list: + normalize_interface_name(vlans_list, self._module, 'vlan') + members = want.get('members', None) + if members: + portchannels_list = members['portchannels'] + if portchannels_list: + normalize_interface_name(portchannels_list, self._module, 'lag') + have = existing_mclag_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + if state == 'deleted': + commands = self._state_deleted(want, have) + elif state == 'merged': + diff = get_diff(want, have, TEST_KEYS) + commands = self._state_merged(want, have, diff) + return commands + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + requests = [] + commands = [] + if diff: + requests = self.get_create_mclag_request(want, diff) + if len(requests) > 0: + commands = update_states(diff, "merged") + return commands, requests + + def _state_deleted(self, want, have): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = [] + requests = [] + if not want: + if have: + requests = self.get_delete_all_mclag_domain_request() + if len(requests) > 0: + commands = update_states(have, "deleted") + else: + new_have = self.remove_default_entries(have) + d_diff = get_diff(want, new_have, TEST_KEYS, is_skeleton=True) + diff_want = get_diff(want, d_diff, TEST_KEYS, is_skeleton=True) + if diff_want: + requests = self.get_delete_mclag_attribute_request(want, diff_want) + if len(requests) > 0: + commands = update_states(diff_want, "deleted") + return commands, requests + + def remove_default_entries(self, data): + new_data = {} + if not data: + return new_data + else: + default_val_dict = { + 'keepalive': 1, + 'session_timeout': 30, + } + for key, val in data.items(): + if not (val is None or (key in default_val_dict and val == default_val_dict[key])): + new_data[key] = val + + return new_data + + def get_delete_mclag_attribute_request(self, want, command): + requests = [] + url_common = 'data/openconfig-mclag:mclag/mclag-domains/mclag-domain=%s/config' % (want["domain_id"]) + method = DELETE + if 'source_address' in command and command["source_address"] is not None: + url = url_common + '/source-address' + request = {'path': url, 'method': method} + requests.append(request) + if 'peer_address' in command and command["peer_address"] is not None: + url = url_common + '/peer-address' + request = {'path': url, 'method': method} + requests.append(request) + if 'peer_link' in command and command["peer_link"] is not None: + url = url_common + '/peer-link' + request = {'path': url, 'method': method} + requests.append(request) + if 'keepalive' in command and command["keepalive"] is not None: + url = url_common + '/keepalive-interval' + request = {'path': url, 'method': method} + requests.append(request) + if 'session_timeout' in command and command["session_timeout"] is not None: + url = url_common + '/session-timeout' + request = {'path': url, 'method': method} + requests.append(request) + if 'system_mac' in command and command["system_mac"] is not None: + url = url_common + '/mclag-system-mac' + request = {'path': url, 'method': method} + requests.append(request) + if 'unique_ip' in command and command['unique_ip'] is not None: + if command['unique_ip']['vlans'] is None: + request = {'path': 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface', 'method': method} + requests.append(request) + elif command['unique_ip']['vlans'] is not None: + for each in command['unique_ip']['vlans']: + if each: + unique_ip_url = 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=%s' % (each['vlan']) + request = {'path': unique_ip_url, 'method': method} + requests.append(request) + if 'members' in command and command['members'] is not None: + if command['members']['portchannels'] is None: + request = {'path': 'data/openconfig-mclag:mclag/interfaces/interface', 'method': method} + requests.append(request) + elif command['members']['portchannels'] is not None: + for each in command['members']['portchannels']: + if each: + portchannel_url = 'data/openconfig-mclag:mclag/interfaces/interface=%s' % (each['lag']) + request = {'path': portchannel_url, 'method': method} + requests.append(request) + return requests + + def get_delete_all_mclag_domain_request(self): + requests = [] + path = 'data/openconfig-mclag:mclag/mclag-domains' + method = DELETE + request = {'path': path, 'method': method} + requests.append(request) + return requests + + def get_create_mclag_request(self, want, commands): + requests = [] + path = 'data/openconfig-mclag:mclag/mclag-domains/mclag-domain' + method = PATCH + payload = self.build_create_payload(want, commands) + if payload: + request = {'path': path, 'method': method, 'data': payload} + requests.append(request) + if 'unique_ip' in commands and commands['unique_ip'] is not None: + if commands['unique_ip']['vlans'] and commands['unique_ip']['vlans'] is not None: + unique_ip_path = 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface' + unique_ip_method = PATCH + unique_ip_payload = self.build_create_unique_ip_payload(commands['unique_ip']['vlans']) + request = {'path': unique_ip_path, 'method': unique_ip_method, 'data': unique_ip_payload} + requests.append(request) + if 'members' in commands and commands['members'] is not None: + if commands['members']['portchannels'] and commands['members']['portchannels'] is not None: + portchannel_path = 'data/openconfig-mclag:mclag/interfaces/interface' + portchannel_method = PATCH + portchannel_payload = self.build_create_portchannel_payload(want, commands['members']['portchannels']) + request = {'path': portchannel_path, 'method': portchannel_method, 'data': portchannel_payload} + requests.append(request) + return requests + + def build_create_payload(self, want, commands): + temp = {} + if 'session_timeout' in commands and commands['session_timeout'] is not None: + temp['session-timeout'] = commands['session_timeout'] + if 'keepalive' in commands and commands['keepalive'] is not None: + temp['keepalive-interval'] = commands['keepalive'] + if 'source_address' in commands and commands['source_address'] is not None: + temp['source-address'] = commands['source_address'] + if 'peer_address' in commands and commands['peer_address'] is not None: + temp['peer-address'] = commands['peer_address'] + if 'peer_link' in commands and commands['peer_link'] is not None: + temp['peer-link'] = str(commands['peer_link']) + if 'system_mac' in commands and commands['system_mac'] is not None: + temp['openconfig-mclag:mclag-system-mac'] = str(commands['system_mac']) + mclag_dict = {} + if temp: + domain_id = {"domain-id": want["domain_id"]} + mclag_dict.update(domain_id) + config = {"config": temp} + mclag_dict.update(config) + payload = {"openconfig-mclag:mclag-domain": [mclag_dict]} + else: + payload = {} + return payload + + def build_create_unique_ip_payload(self, commands): + payload = {"openconfig-mclag:vlan-interface": []} + for each in commands: + payload['openconfig-mclag:vlan-interface'].append({"name": each['vlan'], "config": {"name": each['vlan'], "unique-ip-enable": "ENABLE"}}) + return payload + + def build_create_portchannel_payload(self, want, commands): + payload = {"openconfig-mclag:interface": []} + for each in commands: + payload['openconfig-mclag:interface'].append({"name": each['lag'], "config": {"name": each['lag'], "mclag-domain-id": want['domain_id']}}) + return payload diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py new file mode 100644 index 00000000..a4fdc7e0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py @@ -0,0 +1,548 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_ntp class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, + update_states, + normalize_interface_name, + normalize_interface_name_list +) +from ansible.module_utils.connection import ConnectionError + +PATCH = 'PATCH' +DELETE = 'DELETE' + +TEST_KEYS = [ + { + "vrf": "", "enable_ntp_auth": "", "source_interfaces": "", "trusted_keys": "", + "servers": {"address": ""}, "ntp_keys": {"key_id": ""} + } +] + + +class Ntp(ConfigBase): + """ + The sonic_ntp class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'ntp', + ] + + def __init__(self, module): + super(Ntp, self).__init__(module) + + def get_ntp_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + ntp_facts = facts['ansible_network_resources'].get('ntp') + + if not ntp_facts: + return [] + return ntp_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + requests = list() + + existing_ntp_facts = self.get_ntp_facts() + + commands, requests = self.set_config(existing_ntp_facts) + + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_ntp_facts = self.get_ntp_facts() + + result['before'] = existing_ntp_facts + if result['changed']: + result['after'] = changed_ntp_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_ntp_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + if want is None: + want = [] + + have = existing_ntp_facts + + resp = self.set_state(want, have) + + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + + self.validate_want(want, state) + self.preprocess_want(want, state) + + if state == 'deleted': + commands, requests = self._state_deleted(want, have) + elif state == 'merged': + commands, requests = self._state_merged(want, have) + + return commands, requests + + def _state_merged(self, want, have): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param have: the current configuration as a dictionary + :returns: the commands necessary to merge the provided into + the current configuration + """ + diff = get_diff(want, have, TEST_KEYS) + + commands = diff + requests = [] + if commands: + requests = self.get_merge_requests(commands, have) + + if len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param have: the current configuration as a dictionary + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + diff = get_diff(want, have, TEST_KEYS) + + want_none = {'enable_ntp_auth': None, 'ntp_keys': None, + 'servers': None, 'source_interfaces': [], + 'trusted_keys': None, 'vrf': None} + want_any = get_diff(want, want_none, TEST_KEYS) + # if want_any is none, then delete all NTP configurations + + delete_all = False + if not want_any: + commands = have + delete_all = True + else: + if not diff: + commands = want_any + else: + commands = get_diff(want_any, diff, TEST_KEYS) + + requests = [] + if commands: + requests = self.get_delete_requests(commands, delete_all) + + if len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def validate_want(self, want, state): + + if state == 'deleted': + if 'servers' in want and want['servers'] is not None: + for server in want['servers']: + key_id_config = server.get('key_id', None) + minpoll_config = server.get('minpoll', None) + maxpoll_config = server.get('maxpoll', None) + if key_id_config or minpoll_config or maxpoll_config: + err_msg = "NTP server parameter(s) can not be deleted." + self._module.fail_json(msg=err_msg, code=405) + + if 'ntp_keys' in want and want['ntp_keys'] is not None: + for ntp_key in want['ntp_keys']: + encrypted_config = ntp_key.get('encrypted', None) + key_type_config = ntp_key.get('key_type', None) + key_value_config = ntp_key.get('key_value', None) + if encrypted_config or key_type_config or key_value_config: + err_msg = "NTP ntp_key parameter(s) can not be deleted." + self._module.fail_json(msg=err_msg, code=405) + + def preprocess_want(self, want, state): + + if 'source_interfaces' in want: + want['source_interfaces'] = normalize_interface_name_list(want['source_interfaces'], self._module) + + if state == 'deleted': + enable_auth_want = want.get('enable_ntp_auth', None) + if enable_auth_want is not None: + want['enable_ntp_auth'] = True + + elif state == 'merged': + if 'servers' in want and want['servers'] is not None: + for server in want['servers']: + if 'key_id' in server and not server['key_id']: + server.pop('key_id') + if 'minpoll' in server and not server['minpoll']: + server.pop('minpoll') + if 'maxpoll' in server and not server['maxpoll']: + server.pop('maxpoll') + + def get_merge_requests(self, configs, have): + + requests = [] + + enable_auth_config = configs.get('enable_ntp_auth', None) + if enable_auth_config is not None: + enable_auth_request = self.get_create_enable_ntp_auth_requests(enable_auth_config, have) + if enable_auth_request: + requests.extend(enable_auth_request) + + src_intf_config = configs.get('source_interfaces', None) + if src_intf_config: + src_intf_request = self.get_create_source_interface_requests(src_intf_config, have) + if src_intf_request: + requests.extend(src_intf_request) + + keys_config = configs.get('ntp_keys', None) + if keys_config: + keys_request = self.get_create_keys_requests(keys_config, have) + if keys_request: + requests.extend(keys_request) + + servers_config = configs.get('servers', None) + if servers_config: + servers_request = self.get_create_servers_requests(servers_config, have) + if servers_request: + requests.extend(servers_request) + + trusted_key_config = configs.get('trusted_keys', None) + if trusted_key_config: + trusted_key_request = self.get_create_trusted_key_requests(trusted_key_config, have) + if trusted_key_request: + requests.extend(trusted_key_request) + + vrf_config = configs.get('vrf', None) + if vrf_config: + vrf_request = self.get_create_vrf_requests(vrf_config, have) + if vrf_request: + requests.extend(vrf_request) + + return requests + + def get_delete_requests(self, configs, delete_all): + + requests = [] + + if delete_all: + all_ntp_request = self.get_delete_all_ntp_requests(configs) + if all_ntp_request: + requests.extend(all_ntp_request) + return requests + + src_intf_config = configs.get('source_interfaces', None) + if src_intf_config: + src_intf_request = self.get_delete_source_interface_requests(src_intf_config) + if src_intf_request: + requests.extend(src_intf_request) + + servers_config = configs.get('servers', None) + if servers_config: + servers_request = self.get_delete_servers_requests(servers_config) + if servers_request: + requests.extend(servers_request) + + trusted_key_config = configs.get('trusted_keys', None) + if trusted_key_config: + trusted_key_request = self.get_delete_trusted_key_requests(trusted_key_config) + if trusted_key_request: + requests.extend(trusted_key_request) + + keys_config = configs.get('ntp_keys', None) + if keys_config: + keys_request = self.get_delete_keys_requests(keys_config) + if keys_request: + requests.extend(keys_request) + + enable_auth_config = configs.get('enable_ntp_auth', None) + if enable_auth_config is not None: + enable_auth_request = self.get_delete_enable_ntp_auth_requests(enable_auth_config) + if enable_auth_request: + requests.extend(enable_auth_request) + + vrf_config = configs.get('vrf', None) + if vrf_config: + vrf_request = self.get_delete_vrf_requests(vrf_config) + if vrf_request: + requests.extend(vrf_request) + + return requests + + def get_create_source_interface_requests(self, configs, have): + + requests = [] + + # Create URL and payload + method = PATCH + url = 'data/openconfig-system:system/ntp/config/source-interface' + payload = {"openconfig-system:source-interface": configs} + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def get_create_servers_requests(self, configs, have): + + requests = [] + + # Create URL and payload + method = PATCH + url = 'data/openconfig-system:system/ntp/servers' + server_configs = [] + for config in configs: + if 'key_id' in config: + config['key-id'] = config['key_id'] + config.pop('key_id') + server_addr = config['address'] + server_config = {"address": server_addr, "config": config} + server_configs.append(server_config) + + payload = {"openconfig-system:servers": {"server": server_configs}} + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def get_create_vrf_requests(self, configs, have): + + requests = [] + + # Create URL and payload + method = PATCH + url = 'data/openconfig-system:system/ntp/config/network-instance' + payload = {"openconfig-system:network-instance": configs} + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def get_create_enable_ntp_auth_requests(self, configs, have): + + requests = [] + + # Create URL and payload + method = PATCH + url = 'data/openconfig-system:system/ntp/config/enable-ntp-auth' + payload = {"openconfig-system:enable-ntp-auth": configs} + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def get_create_trusted_key_requests(self, configs, have): + + requests = [] + + # Create URL and payload + method = PATCH + url = 'data/openconfig-system:system/ntp/config/trusted-key' + payload = {"openconfig-system:trusted-key": configs} + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def get_create_keys_requests(self, configs, have): + + requests = [] + + # Create URL and payload + method = PATCH + url = 'data/openconfig-system:system/ntp/ntp-keys' + key_configs = [] + for config in configs: + key_id = config['key_id'] + if 'key_id' in config: + config['key-id'] = config['key_id'] + config.pop('key_id') + if 'key_type' in config: + config['key-type'] = config['key_type'] + config.pop('key_type') + if 'key_value' in config: + config['key-value'] = config['key_value'] + config.pop('key_value') + + key_config = {"key-id": key_id, "config": config} + key_configs.append(key_config) + + payload = {"openconfig-system:ntp-keys": {"ntp-key": key_configs}} + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def get_delete_all_ntp_requests(self, configs): + + requests = [] + + # Create URL and payload + method = DELETE + + servers_config = configs.get('servers', None) + src_intf_config = configs.get('source_interfaces', None) + vrf_config = configs.get('vrf', None) + enable_auth_config = configs.get('enable_ntp_auth', None) + trusted_key_config = configs.get('trusted_keys', None) + + if servers_config or src_intf_config or vrf_config or \ + trusted_key_config or enable_auth_config is not None: + url = 'data/openconfig-system:system/ntp' + request = {"path": url, "method": method} + requests.append(request) + + keys_config = configs.get('ntp_keys', None) + if keys_config: + url = 'data/openconfig-system:system/ntp/ntp-keys' + request = {"path": url, "method": method} + requests.append(request) + + return requests + + def get_delete_source_interface_requests(self, configs): + + requests = [] + + # Create URL and payload + method = DELETE + for config in configs: + url = 'data/openconfig-system:system/ntp/config/source-interface={0}'.format(config) + request = {"path": url, "method": method} + requests.append(request) + + return requests + + def get_delete_servers_requests(self, configs): + + requests = [] + + # Create URL and payload + method = DELETE + for config in configs: + server_addr = config['address'] + url = 'data/openconfig-system:system/ntp/servers/server={0}'.format(server_addr) + request = {"path": url, "method": method} + requests.append(request) + + return requests + + def get_delete_vrf_requests(self, configs): + + requests = [] + + # Create URL and payload + method = DELETE + url = 'data/openconfig-system:system/ntp/config/network-instance' + request = {"path": url, "method": method} + requests.append(request) + + return requests + + def get_delete_enable_ntp_auth_requests(self, configs): + + requests = [] + + # Create URL and payload + method = DELETE + url = 'data/openconfig-system:system/ntp/config/enable-ntp-auth' + request = {"path": url, "method": method} + requests.append(request) + + return requests + + def get_delete_trusted_key_requests(self, configs): + + requests = [] + + # Create URL and payload + method = DELETE + for config in configs: + url = 'data/openconfig-system:system/ntp/config/trusted-key={0}'.format(config) + request = {"path": url, "method": method} + requests.append(request) + + return requests + + def get_delete_keys_requests(self, configs): + + requests = [] + + # Create URL and payload + method = DELETE + key_configs = [] + for config in configs: + key_id = config['key_id'] + url = 'data/openconfig-system:system/ntp/ntp-keys/ntp-key={0}'.format(key_id) + request = {"path": url, "method": method} + requests.append(request) + + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py new file mode 100644 index 00000000..371019d0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py @@ -0,0 +1,260 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_port_breakout class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible.module_utils.connection import ConnectionError +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, + get_speed_from_breakout_mode, + get_breakout_mode, +) + +PATCH = 'patch' +DELETE = 'delete' +POST = 'post' + + +class Port_breakout(ConfigBase): + """ + The sonic_port_breakout class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'port_breakout', + ] + + def __init__(self, module): + super(Port_breakout, self).__init__(module) + + def get_port_breakout_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + port_breakout_facts = facts['ansible_network_resources'].get('port_breakout') + if not port_breakout_facts: + return [] + return port_breakout_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_port_breakout_facts = self.get_port_breakout_facts() + commands, requests = self.set_config(existing_port_breakout_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_port_breakout_facts = self.get_port_breakout_facts() + + result['before'] = existing_port_breakout_facts + if result['changed']: + result['after'] = changed_port_breakout_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_port_breakout_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_port_breakout_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + if not want: + want = [] + + have_new = self.get_all_breakout_mode(have) + diff = get_diff(want, have_new) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_port_breakout_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the port_breakouti except admin + if not want: + commands = have + else: + commands = want + + requests = self.get_delete_port_breakout_requests(commands, have) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_port_breakout_payload(self, name, mode, match): + payload = {} + speed = get_speed_from_breakout_mode(mode) + if speed: + num_breakouts = int(mode[0]) + mode_cfg = {'groups': {'group': [{'index': 1, 'config': {'index': 1, 'num-breakouts': num_breakouts, 'breakout-speed': speed}}]}} + port_cfg = {'openconfig-platform-port:breakout-mode': mode_cfg} + compo_cfg = {'name': name, 'port': port_cfg} + payload = {'openconfig-platform:components': {'component': [compo_cfg]}} + return payload + + def get_delete_single_port_breakout(self, name, match): + del_req = None + if match: + del_url = 'data/openconfig-platform:components/component=%s/port/openconfig-platform-port:breakout-mode' % (name.replace('/', '%2f')) + del_req = {'path': del_url, 'method': DELETE} + return del_req + + def get_modify_port_breakout_request(self, conf, match): + request = None + name = conf.get('name', None) + mode = conf.get('mode', None) + url = 'data/openconfig-platform:components' + payload = self.get_port_breakout_payload(name, mode, match) + request = {'path': url, 'method': PATCH, 'data': payload} + return request + + def get_modify_port_breakout_requests(self, commands, have): + requests = [] + if not commands: + return requests + + for conf in commands: + match = next((cfg for cfg in have if cfg['name'] == conf['name']), None) + req = self.get_modify_port_breakout_request(conf, match) + if req: + requests.append(req) + return requests + + def get_default_port_breakout_modes(self): + def_port_breakout_modes = [] + request = [{"path": "operations/sonic-port-breakout:breakout_capabilities", "method": POST}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + raw_port_breakout_list = [] + if "sonic-port-breakout:output" in response[0][1]: + raw_port_breakout_list = response[0][1].get("sonic-port-breakout:output", {}).get('caps', []) + + for port_breakout in raw_port_breakout_list: + name = port_breakout.get('port', None) + mode = port_breakout.get('defmode', None) + if name and mode: + if '[' in mode: + mode = mode[:mode.index('[')] + def_port_breakout_modes.append({'name': name, 'mode': mode}) + return def_port_breakout_modes + + def get_delete_port_breakout_requests(self, commands, have): + requests = [] + if not commands: + return requests + + have_new = self.get_all_breakout_mode(have) + for conf in commands: + name = conf['name'] + match = next((cfg for cfg in have_new if cfg['name'] == name), None) + req = self.get_delete_single_port_breakout(name, match) + if req: + requests.append(req) + return requests + + def get_all_breakout_mode(self, have): + new_have = [] + for cfg in have: + name = cfg['name'] + mode = get_breakout_mode(self._module, name) + if mode: + new_have.append({'name': name, 'mode': mode}) + return new_have diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py new file mode 100644 index 00000000..d5c36d3e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py @@ -0,0 +1,458 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_prefix_lists class +It is in this file that the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) + +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts \ + import Facts + +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils \ + import ( + get_diff, + update_states, + ) + +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) + +# from ansible.module_utils.connection import ConnectionError + +TEST_KEYS = [ + {"config": {"afi": "", "name": ""}}, + {"prefixes": {"action": "", "ge": "", "le": "", "prefix": "", "sequence": ""}} +] + +DELETE = "delete" +PATCH = "patch" + + +class Prefix_lists(ConfigBase): + """ + The sonic_prefix_lists class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'prefix_lists', + ] + + prefix_sets_uri = 'data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets' + prefix_set_uri = 'data/openconfig-routing-policy:routing-policy/defined-sets/\ +prefix-sets/prefix-set' + prefix_set_delete_uri = 'data/openconfig-routing-policy:routing-policy/defined-sets/\ +prefix-sets/prefix-set={}' + prefix_set_delete_all_prefixes_uri = 'data/openconfig-routing-policy:routing-policy/\ +defined-sets/prefix-sets/prefix-set={}/openconfig-routing-policy-ext:extended-prefixes' + prefix_set_delete_prefix_uri = 'data/openconfig-routing-policy:routing-policy/\ +defined-sets/prefix-sets/prefix-set={}/\ +openconfig-routing-policy-ext:extended-prefixes/extended-prefix={},{},{}' + prefix_set_data_path = 'openconfig-routing-policy:prefix-set' + ext_prefix_set_data_path = 'openconfig-routing-policy-ext:extended-prefixes' + + def __init__(self, module): + super(Prefix_lists, self).__init__(module) + + def get_prefix_lists_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, + self.gather_network_resources) + prefix_lists_facts = facts['ansible_network_resources'].get('prefix_lists', None) + if not prefix_lists_facts: + return [] + return prefix_lists_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + + existing_prefix_lists_facts = self.get_prefix_lists_facts() + commands, requests = self.set_config(existing_prefix_lists_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_prefix_lists_facts = self.get_prefix_lists_facts() + + result['before'] = existing_prefix_lists_facts + if result['changed']: + result['after'] = changed_prefix_lists_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_prefix_lists_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_prefix_lists_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + diff = get_diff(want, have, TEST_KEYS) + if state == 'deleted': + commands, requests = self._state_deleted(want, have) + elif state == 'merged': + commands, requests = self._state_merged(diff) + ret_commands = commands + return ret_commands, requests + + def _state_merged(self, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_prefix_lists_requests(commands) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = list() + if not want or want == []: + commands = have + requests = self.get_delete_all_prefix_list_cfg_requests() + else: + commands = want + requests = self.get_delete_prefix_lists_cfg_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + return commands, requests + + def get_modify_prefix_lists_requests(self, commands): + '''Traverse the input list of configuration "modify" commands obtained + from parsing the input playbook parameters. For each command, + create and return the appropriate set of REST API requests to modify + the prefix set specified by the current command.''' + + requests = [] + if not commands: + return requests + + # Create URL and payload + prefix_set_payload_list = [] + for command in commands: + prefix_set_payload = self.get_modify_single_prefix_set_request(command) + if prefix_set_payload: + prefix_set_payload_list.append(prefix_set_payload) + prefix_set_data = {self.prefix_set_data_path: prefix_set_payload_list} + request = {'path': self.prefix_set_uri, 'method': PATCH, 'data': prefix_set_data} + requests.append(request) + return requests + + def get_modify_single_prefix_set_request(self, command): + '''Create and return the appropriate set of REST API requests to modfy + the prefix set configuration specified by the current "command".''' + + request = {} + if not command: + return request + + conf_afi = command.get('afi', None) + conf_name = command.get('name', None) + if not conf_afi or not conf_name: + return request + + prefix_set_payload_header = {'name': conf_name, + 'config': {'name': conf_name, 'mode': conf_afi.upper()}} + + pfx_conf_list = [] + prefixes = command.get('prefixes', None) + + if prefixes: + for prefix in prefixes: + pfx_payload = self.get_modify_prefix_request(prefix, conf_afi) + if pfx_payload: + pfx_conf_list.append(pfx_payload) + + ext_prefix_list_payload = {'extended-prefix': pfx_conf_list} + ext_prefix_list_data = {self.ext_prefix_set_data_path: ext_prefix_list_payload} + + prefix_set_payload = prefix_set_payload_header + prefix_set_payload.update(ext_prefix_list_data) + return prefix_set_payload + + def get_modify_prefix_request(self, prefix, conf_afi): + '''Create a REST API request to update/merge/create the prefix specified by the + "prefix" input parameter.''' + + pfx_payload = {} + prefix_val = prefix.get('prefix', None) + sequence = prefix.get('sequence', None) + action = prefix.get('action', None) + if not prefix_val or not sequence or not action: + return None + + prefix_net = self.set_ipaddress_net_attrs(prefix_val, conf_afi) + ge = prefix.get('ge', None) + le = prefix.get('le', None) + pfx_payload['ip-prefix'] = prefix_val + pfx_payload['sequence-number'] = sequence + masklength_range_str = self.get_masklength_range_string(ge, le, prefix_net) + pfx_payload['masklength-range'] = masklength_range_str + pfx_config = {} + pfx_config['sequence-number'] = sequence + pfx_config['ip-prefix'] = prefix_val + pfx_config['masklength-range'] = pfx_payload['masklength-range'] + pfx_config['openconfig-routing-policy-ext:action'] = action.upper() + pfx_payload['config'] = pfx_config + + return pfx_payload + + def get_create_prefix_lists_cfg_requests(self, commands): + '''Placeholder function Modify this function if necessary to enable + separate actions for "CREATE" vs "MERGE" ("PATCH") requests''' + + return self.get_modify_prefix_lists_requests(commands) + + def get_delete_prefix_lists_cfg_requests(self, commands, have): + '''Traverse the input list of configuration "delete" commands obtained + from parsing the input playbook parameters. For each command, + create and return the appropriate set of REST API requests to delete + the prefix set configuration specified by the current "command".''' + requests = [] + for command in commands: + new_requests = self.get_delete_single_prefix_cfg_requests(command, have) + if new_requests and len(new_requests) > 0: + requests.extend(new_requests) + return requests + + def get_delete_single_prefix_cfg_requests(self, command, have): + '''Create and return the appropriate set of REST API requests to delete + the prefix set configuration specified by the current "command".''' + + requests = list() + pfx_set_name = command.get('name', None) + if not pfx_set_name: + return requests + + cfg_prefix_set = self.prefix_set_in_config(pfx_set_name, have) + if not cfg_prefix_set: + return requests + + prefixes = command.get('prefixes', None) + if not prefixes or prefixes == []: + requests = self.get_delete_prefix_set_cfg(command) + else: + requests = self.get_delete_one_prefix_list_cfg(cfg_prefix_set, command) + return requests + + def get_delete_prefix_set_cfg(self, command): + '''Create and return a REST API request to delete the prefix set specified + by the current "command".''' + + pfx_set_name = command.get('name', None) + + requests = [{'path': self.prefix_set_delete_uri.format(pfx_set_name), 'method': DELETE}] + return requests + + def get_delete_one_prefix_list_cfg(self, cfg_prefix_set, command): + '''Create the list of REST API prefix deletion requests needed for deletion + of the the requested set of prefixes from the currently configured + prefix set specified by "cfg_prefix_set".''' + + pfx_delete_cfg_list = list() + prefixes = command.get('prefixes', None) + + for prefix in prefixes: + pfx_delete_cfg = self.prefix_get_delete_single_prefix_cfg(prefix, + cfg_prefix_set, + command) + if pfx_delete_cfg and len(pfx_delete_cfg) > 0: + pfx_delete_cfg_list.append(pfx_delete_cfg) + return pfx_delete_cfg_list + + def prefix_get_delete_single_prefix_cfg(self, prefix, cfg_prefix_set, command): + '''Create the REST API request to delete the prefix specified by the "prefix" + input parameter from the configured prefix set specified by "cfg_prefix_set". + Return an empty request if the prefix is not present in the confgured prefix set.''' + + pfx_delete_cfg_request = {} + if not self.prefix_in_prefix_list_cfg(prefix, cfg_prefix_set): + return pfx_delete_cfg_request + + conf_afi = command.get('afi', None) + if not conf_afi: + return pfx_delete_cfg_request + + pfx_set_name = command.get('name', None) + pfx_seq = prefix.get("sequence", None) + pfx_val = prefix.get("prefix", None) + pfx_ge = prefix.get("ge", None) + pfx_le = prefix.get("le", None) + + if not pfx_seq or not pfx_val: + return pfx_delete_cfg_request + + prefix_net = self.set_ipaddress_net_attrs(pfx_val, conf_afi) + masklength_range_str = self.get_masklength_range_string(pfx_ge, pfx_le, prefix_net) + prefix_string = pfx_val.replace("/", "%2F") + extended_pfx_cfg_str = self.prefix_set_delete_prefix_uri.format(pfx_set_name, + int(pfx_seq), + prefix_string, + masklength_range_str) + pfx_delete_cfg_request = {'path': extended_pfx_cfg_str, 'method': DELETE} + return pfx_delete_cfg_request + + def get_delete_all_prefix_list_cfg_requests(self): + '''Delete all prefix list configuration''' + requests = list() + requests = [{'path': self.prefix_sets_uri, 'method': DELETE}] + return requests + + def get_masklength_range_string(self, pfx_ge, pfx_le, prefix_net): + '''Determine the "masklength range" string required for the openconfig + REST API to configure the affected prefix.''' + if not pfx_ge and not pfx_le: + masklength_range_string = "exact" + elif pfx_ge and not pfx_le: + masklength_range_string = str(pfx_ge) + ".." + str(prefix_net['max_prefixlen']) + elif not pfx_ge and pfx_le: + masklength_range_string = str(prefix_net['prefixlen']) + ".." + str(pfx_le) + else: + masklength_range_string = str(pfx_ge) + ".." + str(pfx_le) + + return masklength_range_string + + def prefix_set_in_config(self, pfx_set_name, have): + '''Determine if the prefix set specifid by "pfx_set_name" is present in + the current switch configuration. If it is present, return the "found" + prefix set. (Otherwise, return "None"''' + for cfg_prefix_set in have: + cfg_prefix_set_name = cfg_prefix_set.get('name', None) + if cfg_prefix_set_name and cfg_prefix_set_name == pfx_set_name: + return cfg_prefix_set + + return None + + def prefix_in_prefix_list_cfg(self, prefix, cfg_prefix_set): + '''Determine, based on the keys, if the "target" prefix specified by the "prefix" + input parameter is present in the currently configured prefix set specified + ty the "cfg_prefix_set" input parameter. Return "True" if the prifix is found, + or "False" if it isn't.''' + req_pfx = prefix.get("prefix", None) + req_seq = prefix.get("sequence", None) + req_ge = prefix.get("ge", None) + req_le = prefix.get("le", None) + + cfg_prefix_list = cfg_prefix_set.get("prefixes", None) + if not cfg_prefix_list: # The configured prefix set has no prefix list + return False + + for cfg_prefix in cfg_prefix_list: + cfg_pfx = cfg_prefix.get("prefix", None) + cfg_seq = cfg_prefix.get("sequence", None) + cfg_ge = cfg_prefix.get("ge", None) + cfg_le = cfg_prefix.get("le", None) + + # Check for matching key attributes + if not (req_pfx and cfg_pfx and req_pfx == cfg_pfx): + continue + if not (req_seq and cfg_seq and req_seq == cfg_seq): + continue + + # Check for ge match + if not req_ge: + if cfg_ge: + continue + else: + if not cfg_ge or req_ge != cfg_ge: + continue + + # Check for le match + if not req_le: + if cfg_le: + continue + else: + if not cfg_le or req_le != cfg_le: + continue + + # All key attributes match for this cfg_prefix + return True + + # No matching configured prefixes were found in the prefix set. + return False + + def set_ipaddress_net_attrs(self, prefix_val, conf_afi): + '''Create and return a dictionary containing the values for any prefix-related + attributes needed for handling of prefix configuration requests. NOTE: This + method should be replaced with use of the Python "ipaddress" module after + Ansible drops downward compatibility support for Python 2.7.''' + + prefix_net = dict() + if conf_afi == 'ipv4': + prefix_net['max_prefixlen'] = 32 + else: # Assuming IPv6 for this case + prefix_net['max_prefixlen'] = 128 + + prefix_net['prefixlen'] = int(prefix_val.split("/")[1]) + return prefix_net diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py new file mode 100644 index 00000000..dfa65482 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py @@ -0,0 +1,362 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_radius_server class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible.module_utils.connection import ConnectionError +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, + normalize_interface_name, +) + +PATCH = 'patch' +DELETE = 'delete' +TEST_KEYS = [ + {'host': {'name': ''}}, +] + + +class Radius_server(ConfigBase): + """ + The sonic_radius_server class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'radius_server', + ] + + def __init__(self, module): + super(Radius_server, self).__init__(module) + + def get_radius_server_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + radius_server_facts = facts['ansible_network_resources'].get('radius_server') + if not radius_server_facts: + return [] + return radius_server_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_radius_server_facts = self.get_radius_server_facts() + commands, requests = self.set_config(existing_radius_server_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_radius_server_facts = self.get_radius_server_facts() + + result['before'] = existing_radius_server_facts + if result['changed']: + result['after'] = changed_radius_server_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_radius_server_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + + if want and want.get('servers', None) and want['servers'].get('host', None): + normalize_interface_name(want['servers']['host'], self._module, 'source_interface') + + have = existing_radius_server_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + if not want: + want = {} + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = [] + command = diff + requests = self.get_modify_radius_server_requests(command, have) + if command and len(requests) > 0: + commands = update_states([command], "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the radius_serveri except admin + commands = [] + if not want: + command = have + else: + command = want + + requests = self.get_delete_radius_server_requests(command, have) + + if command and len(requests) > 0: + commands = update_states([command], "deleted") + + return commands, requests + + def get_radius_global_payload(self, conf): + payload = {} + global_cfg = {} + + if conf.get('auth_type', None): + global_cfg['auth-type'] = conf['auth_type'] + if conf.get('key', None): + global_cfg['secret-key'] = conf['key'] + if conf.get('timeout', None): + global_cfg['timeout'] = conf['timeout'] + + if global_cfg: + payload = {'openconfig-system:config': global_cfg} + + return payload + + def get_radius_global_ext_payload(self, conf): + payload = {} + global_ext_cfg = {} + + if conf.get('nas_ip', None): + global_ext_cfg['nas-ip-address'] = conf['nas_ip'] + if conf.get('retransmit', None): + global_ext_cfg['retransmit-attempts'] = conf['retransmit'] + if conf.get('statistics', None): + global_ext_cfg['statistics'] = conf['statistics'] + + if global_ext_cfg: + payload = {'openconfig-aaa-radius-ext:config': global_ext_cfg} + + return payload + + def get_radius_server_payload(self, hosts): + payload = {} + servers_load = [] + for host in hosts: + if host.get('name', None): + host_cfg = {'address': host['name']} + if host.get('auth_type', None): + host_cfg['auth-type'] = host['auth_type'] + if host.get('priority', None): + host_cfg['priority'] = host['priority'] + if host.get('vrf', None): + host_cfg['vrf'] = host['vrf'] + if host.get('timeout', None): + host_cfg['timeout'] = host['timeout'] + + radius_port_key_cfg = {} + if host.get('port', None): + radius_port_key_cfg['auth-port'] = host['port'] + if host.get('key', None): + radius_port_key_cfg['secret-key'] = host['key'] + if host.get('retransmit', None): + radius_port_key_cfg['retransmit-attempts'] = host['retransmit'] + if host.get('source_interface', None): + radius_port_key_cfg['openconfig-aaa-radius-ext:source-interface'] = host['source_interface'] + + if radius_port_key_cfg: + consolidated_load = {'address': host['name']} + consolidated_load['config'] = host_cfg + consolidated_load['radius'] = {'config': radius_port_key_cfg} + servers_load.append(consolidated_load) + + if servers_load: + payload = {'openconfig-system:servers': {'server': servers_load}} + + return payload + + def get_modify_servers_request(self, command): + request = None + + hosts = [] + if command.get('servers', None) and command['servers'].get('host', None): + hosts = command['servers']['host'] + if hosts: + url = 'data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers' + payload = self.get_radius_server_payload(hosts) + if payload: + request = {'path': url, 'method': PATCH, 'data': payload} + + return request + + def get_modify_global_config_request(self, conf): + request = None + + url = 'data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config' + payload = self.get_radius_global_payload(conf) + if payload: + request = {'path': url, 'method': PATCH, 'data': payload} + + return request + + def get_modify_global_ext_config_request(self, conf): + request = None + + url = 'data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config' + payload = self.get_radius_global_ext_payload(conf) + if payload: + request = {'path': url, 'method': PATCH, 'data': payload} + + return request + + def get_modify_radius_server_requests(self, command, have): + requests = [] + if not command: + return requests + + request = self.get_modify_global_config_request(command) + if request: + requests.append(request) + + request = self.get_modify_global_ext_config_request(command) + if request: + requests.append(request) + + request = self.get_modify_servers_request(command) + if request: + requests.append(request) + + return requests + + def get_delete_global_ext_params(self, conf, match): + + requests = [] + + url = 'data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config/' + if conf.get('nas_ip', None) and match.get('nas_ip', None): + requests.append({'path': url + 'nas-ip-address', 'method': DELETE}) + if conf.get('retransmit', None) and match.get('retransmit', None): + requests.append({'path': url + 'retransmit-attempts', 'method': DELETE}) + if conf.get('statistics', None) and match.get('statistics', None): + requests.append({'path': url + 'statistics', 'method': DELETE}) + + return requests + + def get_delete_global_params(self, conf, match): + + requests = [] + + url = 'data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config/' + if conf.get('auth_type', None) and match.get('auth_type', None) and match['auth_type'] != 'pap': + requests.append({'path': url + 'auth-type', 'method': DELETE}) + if conf.get('key', None) and match.get('key', None): + requests.append({'path': url + 'secret-key', 'method': DELETE}) + if conf.get('timeout', None) and match.get('timeout', None) and match['timeout'] != 5: + requests.append({'path': url + 'timeout', 'method': DELETE}) + + return requests + + def get_delete_servers(self, command, have): + requests = [] + url = 'data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers/server=' + + mat_hosts = [] + if have.get('servers', None) and have['servers'].get('host', None): + mat_hosts = have['servers']['host'] + + if command.get('servers', None): + if command['servers'].get('host', None): + hosts = command['servers']['host'] + else: + hosts = mat_hosts + + if mat_hosts and hosts: + for host in hosts: + if next((m_host for m_host in mat_hosts if m_host['name'] == host['name']), None): + requests.append({'path': url + host['name'], 'method': DELETE}) + + return requests + + def get_delete_radius_server_requests(self, command, have): + requests = [] + if not command: + return requests + + requests.extend(self.get_delete_global_params(command, have)) + requests.extend(self.get_delete_global_ext_params(command, have)) + requests.extend(self.get_delete_servers(command, have)) + + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py new file mode 100644 index 00000000..04735747 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py @@ -0,0 +1,344 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_static_routes class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) + +network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance' +protocol_static_routes_path = 'protocols/protocol=STATIC,static/static-routes' + +PATCH = 'patch' +DELETE = 'delete' +TEST_KEYS = [ + {'config': {'vrf_name': ''}}, + {'static_list': {'prefix': ''}}, + {'next_hops': {'index': ''}}, +] + + +class Static_routes(ConfigBase): + """ + The sonic_static_routes class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'static_routes', + ] + + def __init__(self, module): + super(Static_routes, self).__init__(module) + + def get_static_routes_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + static_routes_facts = facts['ansible_network_resources'].get('static_routes') + if not static_routes_facts: + return [] + return static_routes_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = [] + commands = [] + existing_static_routes_facts = self.get_static_routes_facts() + commands, requests = self.set_config(existing_static_routes_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_static_routes_facts = self.get_static_routes_facts() + + result['before'] = existing_static_routes_facts + if result['changed']: + result['after'] = changed_static_routes_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_static_routes_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_static_routes_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + + commands = [] + requests = [] + state = self._module.params['state'] + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = self.get_modify_static_routes_requests(commands, have) + + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + is_delete_all = False + # if want is none, then delete ALL + if not want: + commands = have + is_delete_all = True + else: + commands = want + + requests = self.get_delete_static_routes_requests(commands, have, is_delete_all) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_modify_static_routes_requests(self, commands, have): + requests = [] + + if not commands: + return requests + + for conf in commands: + vrf_name = conf.get('vrf_name', None) + static_list = conf.get('static_list', []) + for static in static_list: + prefix = static.get('prefix', None) + next_hops = static.get('next_hops', []) + if next_hops: + for next_hop in next_hops: + requests.append(self.get_modify_static_route_request(vrf_name, prefix, next_hop)) + + return requests + + def get_modify_static_route_request(self, vrf_name, prefix, next_hop): + request = None + next_hop_cfg = {} + index = next_hop.get('index', {}) + blackhole = index.get('blackhole', None) + interface = index.get('interface', None) + nexthop_vrf = index.get('nexthop_vrf', None) + next_hop_attr = index.get('next_hop', None) + metric = next_hop.get('metric', None) + track = next_hop.get('track', None) + tag = next_hop.get('tag', None) + idx = self.generate_index(index) + if idx: + next_hop_cfg['index'] = idx + if blackhole: + next_hop_cfg['blackhole'] = blackhole + if nexthop_vrf: + next_hop_cfg['network-instance'] = nexthop_vrf + if next_hop: + next_hop_cfg['next-hop'] = next_hop_attr + if metric: + next_hop_cfg['metric'] = metric + if track: + next_hop_cfg['track'] = track + if tag: + next_hop_cfg['tag'] = tag + + url = '%s=%s/%s' % (network_instance_path, vrf_name, protocol_static_routes_path) + next_hops_cfg = {'next-hop': [{'index': idx, 'config': next_hop_cfg}]} + if interface: + next_hops_cfg['next-hop'][0]['interface-ref'] = {'config': {'interface': interface}} + payload = {'openconfig-network-instance:static-routes': {'static': [{'prefix': prefix, 'config': {'prefix': prefix}, 'next-hops': next_hops_cfg}]}} + request = {'path': url, 'method': PATCH, 'data': payload} + + return request + + def generate_index(self, index): + idx = None + blackhole = index.get('blackhole', None) + interface = index.get('interface', None) + nexthop_vrf = index.get('nexthop_vrf', None) + next_hop = index.get('next_hop', None) + + if blackhole is True: + idx = 'DROP' + else: + if interface: + if not next_hop and not nexthop_vrf: + idx = interface + elif next_hop and not nexthop_vrf: + idx = interface + '_' + next_hop + elif nexthop_vrf and not next_hop: + idx = interface + '_' + nexthop_vrf + else: + idx = interface + '_' + next_hop + '_' + nexthop_vrf + else: + if next_hop and not nexthop_vrf: + idx = next_hop + elif next_hop and nexthop_vrf: + idx = next_hop + '_' + nexthop_vrf + + return idx + + def get_delete_static_routes_requests(self, commands, have, is_delete_all): + requests = [] + if is_delete_all: + for cmd in commands: + vrf_name = cmd.get('vrf_name', None) + if vrf_name: + requests.append(self.get_delete_static_routes_for_vrf(vrf_name)) + else: + for cmd in commands: + vrf_name = cmd.get('vrf_name', None) + static_list = cmd.get('static_list', []) + for cfg in have: + cfg_vrf_name = cfg.get('vrf_name', None) + if vrf_name == cfg_vrf_name: + if not static_list: + requests.append(self.get_delete_static_routes_for_vrf(vrf_name)) + else: + for static in static_list: + prefix = static.get('prefix', None) + next_hops = static.get('next_hops', []) + cfg_static_list = cfg.get('static_list', []) + for cfg_static in cfg_static_list: + cfg_prefix = cfg_static.get('prefix', None) + if prefix == cfg_prefix: + if prefix and not next_hops: + requests.append(self.get_delete_static_routes_prefix_request(vrf_name, prefix)) + else: + for next_hop in next_hops: + index = next_hop.get('index', {}) + idx = self.generate_index(index) + metric = next_hop.get('metric', None) + track = next_hop.get('track', None) + tag = next_hop.get('tag', None) + + cfg_next_hops = cfg_static.get('next_hops', []) + if cfg_next_hops: + for cfg_next_hop in cfg_next_hops: + cfg_index = cfg_next_hop.get('index', {}) + cfg_idx = self.generate_index(cfg_index) + if idx == cfg_idx: + cfg_metric = cfg_next_hop.get('metric', None) + cfg_track = cfg_next_hop.get('track', None) + cfg_tag = cfg_next_hop.get('tag', None) + if not metric and not track and not tag: + requests.append(self.get_delete_static_routes_next_hop_request(vrf_name, prefix, idx)) + else: + if metric == cfg_metric: + requests.append(self.get_delete_next_hop_config_attr_request(vrf_name, prefix, idx, + 'metric')) + if track == cfg_track: + requests.append(self.get_delete_next_hop_config_attr_request(vrf_name, prefix, idx, + 'track')) + if tag == cfg_tag: + requests.append(self.get_delete_next_hop_config_attr_request(vrf_name, prefix, idx, 'tag')) + + return requests + + def get_delete_static_routes_for_vrf(self, vrf_name): + url = '%s=%s/%s' % (network_instance_path, vrf_name, protocol_static_routes_path) + request = {'path': url, 'method': DELETE} + + return request + + def get_delete_static_routes_prefix_request(self, vrf_name, prefix): + prefix = prefix.replace('/', '%2F') + url = '%s=%s/%s/static=%s' % (network_instance_path, vrf_name, protocol_static_routes_path, prefix) + request = {'path': url, 'method': DELETE} + + return request + + def get_delete_static_routes_next_hop_request(self, vrf_name, prefix, index): + prefix = prefix.replace('/', '%2F') + url = '%s=%s/%s/static=%s' % (network_instance_path, vrf_name, protocol_static_routes_path, prefix) + url += '/next-hops/next-hop=%s' % (index) + request = {'path': url, 'method': DELETE} + + return request + + def get_delete_next_hop_config_attr_request(self, vrf_name, prefix, index, attr): + prefix = prefix.replace('/', '%2F') + url = '%s=%s/%s/static=%s' % (network_instance_path, vrf_name, protocol_static_routes_path, prefix) + url += '/next-hops/next-hop=%s/config/%s' % (index, attr) + request = {'path': url, 'method': DELETE} + + return request diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py new file mode 100644 index 00000000..21d575a1 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py @@ -0,0 +1,294 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_system class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) + +PATCH = 'patch' +DELETE = 'delete' + + +class System(ConfigBase): + """ + The sonic_system class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'system', + ] + + def __init__(self, module): + super(System, self).__init__(module) + + def get_system_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + system_facts = facts['ansible_network_resources'].get('system') + if not system_facts: + return [] + return system_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + + existing_system_facts = self.get_system_facts() + commands, requests = self.set_config(existing_system_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + self.edit_config(requests) + result['changed'] = True + result['commands'] = commands + + changed_system_facts = self.get_system_facts() + + result['before'] = existing_system_facts + if result['changed']: + result['after'] = changed_system_facts + + result['warnings'] = warnings + return result + + def edit_config(self, requests): + try: + response = edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + def set_config(self, existing_system_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_system_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + if state == 'deleted': + commands = self._state_deleted(want, have) + elif state == 'merged': + diff = get_diff(want, have) + commands = self._state_merged(want, have, diff) + return commands + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + requests = [] + commands = [] + if diff: + requests = self.get_create_system_request(want, diff) + if len(requests) > 0: + commands = update_states(diff, "merged") + return commands, requests + + def _state_deleted(self, want, have): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = [] + requests = [] + new_have = self.remove_default_entries(have) + if not want: + if have: + requests = self.get_delete_all_system_request(new_have) + if len(requests) > 0: + commands = update_states(have, "deleted") + else: + want = utils.remove_empties(want) + d_diff = get_diff(want, new_have, is_skeleton=True) + diff_want = get_diff(want, d_diff, is_skeleton=True) + if diff_want: + requests = self.get_delete_all_system_request(diff_want) + if len(requests) > 0: + commands = update_states(diff_want, "deleted") + return commands, requests + + def get_create_system_request(self, want, commands): + requests = [] + host_path = 'data/openconfig-system:system/config' + method = PATCH + hostname_payload = self.build_create_hostname_payload(commands) + if hostname_payload: + request = {'path': host_path, 'method': method, 'data': hostname_payload} + requests.append(request) + name_path = 'data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost/intf_naming_mode' + name_payload = self.build_create_name_payload(commands) + if name_payload: + request = {'path': name_path, 'method': method, 'data': name_payload} + requests.append(request) + anycast_path = 'data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/' + anycast_payload = self.build_create_anycast_payload(commands) + if anycast_payload: + request = {'path': anycast_path, 'method': method, 'data': anycast_payload} + requests.append(request) + return requests + + def build_create_hostname_payload(self, commands): + payload = {"openconfig-system:config": {}} + if "hostname" in commands and commands["hostname"]: + payload['openconfig-system:config'].update({"hostname": commands["hostname"]}) + return payload + + def build_create_name_payload(self, commands): + payload = {} + if "interface_naming" in commands and commands["interface_naming"]: + payload.update({'sonic-device-metadata:intf_naming_mode': commands["interface_naming"]}) + return payload + + def build_create_anycast_payload(self, commands): + payload = {} + if "anycast_address" in commands and commands["anycast_address"]: + payload = {"sonic-sag:SAG_GLOBAL_LIST": []} + temp = {} + if "ipv4" in commands["anycast_address"] and commands["anycast_address"]["ipv4"]: + temp.update({'IPv4': "enable"}) + if "ipv4" in commands["anycast_address"] and not commands["anycast_address"]["ipv4"]: + temp.update({'IPv4': "disable"}) + if "ipv6" in commands["anycast_address"] and commands["anycast_address"]["ipv6"]: + temp.update({'IPv6': "enable"}) + if "ipv6" in commands["anycast_address"] and not commands["anycast_address"]["ipv6"]: + temp.update({'IPv6': "disable"}) + if "mac_address" in commands["anycast_address"] and commands["anycast_address"]["mac_address"]: + temp.update({'gwmac': commands["anycast_address"]["mac_address"]}) + if temp: + temp.update({"table_distinguisher": "IP"}) + payload["sonic-sag:SAG_GLOBAL_LIST"].append(temp) + return payload + + def remove_default_entries(self, data): + new_data = {} + if not data: + return new_data + else: + hostname = data.get('hostname', None) + if hostname != "sonic": + new_data["hostname"] = hostname + intf_name = data.get('interface_naming', None) + if intf_name != "native": + new_data["interface_naming"] = intf_name + new_anycast = {} + anycast = data.get('anycast_address', None) + if anycast: + ipv4 = anycast.get("ipv4", None) + if ipv4 is not True: + new_anycast["ipv4"] = ipv4 + ipv6 = anycast.get("ipv6", None) + if ipv6 is not True: + new_anycast["ipv6"] = ipv6 + mac = anycast.get("mac_address", None) + if mac is not None: + new_anycast["mac_address"] = mac + new_data["anycast_address"] = new_anycast + return new_data + + def get_delete_all_system_request(self, have): + requests = [] + if "hostname" in have: + request = self.get_hostname_delete_request() + requests.append(request) + if "interface_naming" in have: + request = self.get_intfname_delete_request() + requests.append(request) + if "anycast_address" in have: + request = self.get_anycast_delete_request(have["anycast_address"]) + requests.extend(request) + return requests + + def get_hostname_delete_request(self): + path = 'data/openconfig-system:system/config/' + method = PATCH + payload = {"openconfig-system:config": {}} + payload['openconfig-system:config'].update({"hostname": "sonic"}) + request = {'path': path, 'method': method, 'data': payload} + return request + + def get_intfname_delete_request(self): + path = 'data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost/intf_naming_mode' + method = DELETE + request = {'path': path, 'method': method} + return request + + def get_anycast_delete_request(self, anycast): + requests = [] + if "ipv4" in anycast: + path = 'data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST=IP/IPv4' + method = DELETE + request = {'path': path, 'method': method} + requests.append(request) + if "ipv6" in anycast: + path = 'data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST=IP/IPv6' + method = DELETE + request = {'path': path, 'method': method} + requests.append(request) + if "mac_address" in anycast: + path = 'data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST=IP/gwmac' + method = DELETE + request = {'path': path, 'method': method} + requests.append(request) + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py new file mode 100644 index 00000000..498fcbe2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py @@ -0,0 +1,318 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_tacacs_server class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible.module_utils.connection import ConnectionError +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + update_states, + get_diff, + get_normalize_interface_name, +) + +PATCH = 'patch' +DELETE = 'delete' +TEST_KEYS = [ + {'host': {'name': ''}}, +] + + +class Tacacs_server(ConfigBase): + """ + The sonic_tacacs_server class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'tacacs_server', + ] + + def __init__(self, module): + super(Tacacs_server, self).__init__(module) + + def get_tacacs_server_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + tacacs_server_facts = facts['ansible_network_resources'].get('tacacs_server') + if not tacacs_server_facts: + return [] + return tacacs_server_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_tacacs_server_facts = self.get_tacacs_server_facts() + commands, requests = self.set_config(existing_tacacs_server_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_tacacs_server_facts = self.get_tacacs_server_facts() + + result['before'] = existing_tacacs_server_facts + if result['changed']: + result['after'] = changed_tacacs_server_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_tacacs_server_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + + if want and want.get('source_interface', None): + want['source_interface'] = get_normalize_interface_name(want['source_interface'], self._module) + + have = existing_tacacs_server_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + if not want: + want = {} + + diff = get_diff(want, have, TEST_KEYS) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = [] + command = diff + requests = self.get_modify_tacacs_server_requests(command, have) + if command and len(requests) > 0: + commands = update_states([command], "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the tacacs_serveri except admin + commands = [] + if not want: + command = have + else: + command = want + + requests = self.get_delete_tacacs_server_requests(command, have) + + if command and len(requests) > 0: + commands = update_states([command], "deleted") + + return commands, requests + + def get_tacacs_global_payload(self, conf): + payload = {} + global_cfg = {} + + if conf.get('auth_type', None): + global_cfg['auth-type'] = conf['auth_type'] + if conf.get('key', None): + global_cfg['secret-key'] = conf['key'] + if conf.get('source_interface', None): + global_cfg['source-interface'] = conf['source_interface'] + if conf.get('timeout', None): + global_cfg['timeout'] = conf['timeout'] + + if global_cfg: + payload = {'openconfig-system:config': global_cfg} + + return payload + + def get_tacacs_server_payload(self, hosts): + payload = {} + servers_load = [] + for host in hosts: + if host.get('name', None): + host_cfg = {'address': host['name']} + if host.get('auth_type', None): + host_cfg['auth-type'] = host['auth_type'] + if host.get('priority', None): + host_cfg['priority'] = host['priority'] + if host.get('vrf', None): + host_cfg['vrf'] = host['vrf'] + if host.get('timeout', None): + host_cfg['timeout'] = host['timeout'] + + tacacs_port_key_cfg = {} + if host.get('port', None): + tacacs_port_key_cfg['port'] = host['port'] + if host.get('key', None): + tacacs_port_key_cfg['secret-key'] = host['key'] + + if tacacs_port_key_cfg: + consolidated_load = {'address': host['name']} + consolidated_load['config'] = host_cfg + consolidated_load['tacacs'] = {'config': tacacs_port_key_cfg} + servers_load.append(consolidated_load) + + if servers_load: + payload = {'openconfig-system:servers': {'server': servers_load}} + + return payload + + def get_modify_servers_request(self, command): + request = None + + hosts = [] + if command.get('servers', None) and command['servers'].get('host', None): + hosts = command['servers']['host'] + if hosts: + url = 'data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers' + payload = self.get_tacacs_server_payload(hosts) + if payload: + request = {'path': url, 'method': PATCH, 'data': payload} + + return request + + def get_modify_global_config_request(self, conf): + request = None + + url = 'data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config' + payload = self.get_tacacs_global_payload(conf) + if payload: + request = {'path': url, 'method': PATCH, 'data': payload} + + return request + + def get_modify_tacacs_server_requests(self, command, have): + requests = [] + if not command: + return requests + + request = self.get_modify_global_config_request(command) + if request: + requests.append(request) + + request = self.get_modify_servers_request(command) + if request: + requests.append(request) + + return requests + + def get_delete_global_params(self, conf, match): + + requests = [] + + url = 'data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config/' + if conf.get('auth_type', None) and match.get('auth_type', None) and match['auth_type'] != 'pap': + requests.append({'path': url + 'auth-type', 'method': DELETE}) + if conf.get('key', None) and match.get('key', None): + requests.append({'path': url + 'secret-key', 'method': DELETE}) + if conf.get('source_interface', None) and match.get('source_interface', None): + requests.append({'path': url + 'source-interface', 'method': DELETE}) + if conf.get('timeout', None) and match.get('timeout', None) and match['timeout'] != 5: + requests.append({'path': url + 'timeout', 'method': DELETE}) + + return requests + + def get_delete_servers(self, command, have): + requests = [] + url = 'data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers/server=' + + mat_hosts = [] + if have.get('servers', None) and have['servers'].get('host', None): + mat_hosts = have['servers']['host'] + + hosts = [] + if command.get('servers', None): + if command['servers'].get('host', None): + hosts = command['servers']['host'] + else: + hosts = mat_hosts + + if mat_hosts and hosts: + for host in hosts: + if next((m_host for m_host in mat_hosts if m_host['name'] == host['name']), None): + requests.append({'path': url + host['name'], 'method': DELETE}) + + return requests + + def get_delete_tacacs_server_requests(self, command, have): + requests = [] + if not command: + return requests + + requests.extend(self.get_delete_global_params(command, have)) + requests.extend(self.get_delete_servers(command, have)) + + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py new file mode 100644 index 00000000..73398cf7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py @@ -0,0 +1,299 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_users class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import json + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + dict_to_set, + update_states, + get_diff, +) +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +DELETE = 'delete' + + +class Users(ConfigBase): + """ + The sonic_users class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'users', + ] + + def __init__(self, module): + super(Users, self).__init__(module) + + def get_users_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + users_facts = facts['ansible_network_resources'].get('users') + if not users_facts: + return [] + return users_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + existing_users_facts = self.get_users_facts() + commands, requests = self.set_config(existing_users_facts) + auth_error = False + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + try: + json_obj = json.loads(str(exc).replace("'", '"')) + if json_obj and type(json_obj) is dict and 401 == json_obj['code']: + auth_error = True + warnings.append("Unable to get after configs as password got changed for current user") + else: + self._module.fail_json(msg=str(exc), code=exc.code) + except Exception as err: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_users_facts = [] + if not auth_error: + changed_users_facts = self.get_users_facts() + + result['before'] = existing_users_facts + if result['changed']: + result['after'] = changed_users_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_users_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_users_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + commands = [] + requests = [] + state = self._module.params['state'] + if not want: + want = [] + + new_want = [{'name': conf['name'], 'role': conf['role']} for conf in want] + new_have = [{'name': conf['name'], 'role': conf['role']} for conf in have] + new_diff = get_diff(new_want, new_have) + + diff = [] + for cfg in new_diff: + match = next((w_cfg for w_cfg in want if w_cfg['name'] == cfg['name']), None) + if match: + diff.append(match) + + for cfg in want: + if cfg['password'] and cfg['update_password'] == 'always': + d_match = next((d_cfg for d_cfg in diff if d_cfg['name'] == cfg['name']), None) + if d_match is None: + diff.append(cfg) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + self.validate_new_users(want, have) + + commands = diff + requests = self.get_modify_users_requests(commands, have) + if commands and len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the usersi except admin + if not want: + commands = have + else: + commands = want + + requests = self.get_delete_users_requests(commands, have) + + if commands and len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_pwd(self, pw): + clear_pwd = hashed_pwd = "" + pwd = pw.replace("\\", "") + if pwd[:3] == '$6$': + hashed_pwd = pwd + else: + clear_pwd = pwd + return clear_pwd, hashed_pwd + + def get_single_user_payload(self, name, role, password, update_pass, match): + user_cfg = {'username': name} + if not role and match: + role = match['role'] + + if not password and match: + password = match['password'] + + if role: + user_cfg['role'] = role + + if password: + clear_pwd, hashed_pwd = self.get_pwd(password) + user_cfg['password'] = clear_pwd + user_cfg['password-hashed'] = hashed_pwd + + pay_load = {'openconfig-system:user': [{'username': name, 'config': user_cfg}]} + return pay_load + + def get_modify_single_user_request(self, conf, match): + request = None + name = conf.get('name', None) + role = conf.get('role', None) + password = conf.get('password', None) + update_pass = conf.get('update_password', None) + if role or (password and update_pass == 'always'): + url = 'data/openconfig-system:system/aaa/authentication/users/user=%s' % (name) + payload = self.get_single_user_payload(name, role, password, update_pass, match) + request = {'path': url, 'method': PATCH, 'data': payload} + return request + + def get_modify_users_requests(self, commands, have): + requests = [] + if not commands: + return requests + + for conf in commands: + match = next((cfg for cfg in have if cfg['name'] == conf['name']), None) + req = self.get_modify_single_user_request(conf, match) + if req: + requests.append(req) + return requests + + def get_new_users(self, want, have): + new_users = [] + for user in want: + if not next((h_user for h_user in have if h_user['name'] == user['name']), None): + new_users.append(user) + return new_users + + def validate_new_users(self, want, have): + new_users = self.get_new_users(want, have) + invalid_users = [] + for user in new_users: + params = [] + if not user['role']: + params.append('role') + if not user['password']: + params.append('password') + if params: + invalid_users.append({user['name']: params}) + if invalid_users: + err_msg = "Missing parameter(s) for new users! " + str(invalid_users) + self._module.fail_json(msg=err_msg, code=513) + + def get_delete_users_requests(self, commands, have): + requests = [] + if not commands: + return requests + + # Skip the asmin user in 'deleted' state. we cannot delete all users + admin_usr = None + + for conf in commands: + # Skip the asmin user in 'deleted' state. we cannot delete all users + if conf['name'] == 'admin': + admin_usr = conf + continue + match = next((cfg for cfg in have if cfg['name'] == conf['name']), None) + if match: + url = 'data/openconfig-system:system/aaa/authentication/users/user=%s' % (conf['name']) + requests.append({'path': url, 'method': DELETE}) + + if admin_usr: + commands.remove(admin_usr) + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py new file mode 100644 index 00000000..40405107 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py @@ -0,0 +1,265 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_vlans class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, + update_states, + remove_empties_from_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.interfaces_util import ( + build_interfaces_create_request, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils._text import to_native +from ansible.module_utils.connection import ConnectionError +import traceback + +LIB_IMP_ERR = None +ERR_MSG = None +try: + import jinja2 + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + +TEST_KEYS = [ + {'config': {'vlan_id': ''}}, +] + + +class Vlans(ConfigBase): + """ + The sonic_vlans class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'vlans', + ] + + def __init__(self, module): + super(Vlans, self).__init__(module) + + def get_vlans_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + vlans_facts = facts['ansible_network_resources'].get('vlans') + if not vlans_facts: + return [] + return vlans_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + + existing_vlans_facts = self.get_vlans_facts() + commands, requests = self.set_config(existing_vlans_facts) + if commands: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_vlans_facts = self.get_vlans_facts() + + result['before'] = existing_vlans_facts + if result['changed']: + result['after'] = changed_vlans_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_vlans_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = remove_empties_from_list(self._module.params['config']) + have = existing_vlans_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + # diff method works on dict, so creating temp dict + diff = get_diff(want, have, TEST_KEYS) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + + ret_commands = remove_empties_from_list(commands) + return ret_commands, requests + + def _state_replaced(self, want, have, diff): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + return self._state_merged(want, have, diff) + + def _state_overridden(self, want, have, diff): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + ret_requests = list() + commands = list() + vlans_to_delete = get_diff(have, want, TEST_KEYS) + if vlans_to_delete: + delete_vlans_requests = self.get_delete_vlans_requests(vlans_to_delete) + ret_requests.extend(delete_vlans_requests) + commands.extend(update_states(vlans_to_delete, "deleted")) + + if diff: + vlans_to_create_requests = self.get_create_vlans_requests(diff) + ret_requests.extend(vlans_to_create_requests) + commands.extend(update_states(diff, "merged")) + + return commands, ret_requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration at position-0 + Requests necessary to merge to the current configuration + at position-1 + """ + commands = update_states(diff, "merged") + requests = self.get_create_vlans_requests(commands) + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + commands = list() + # if want is none, then delete all the vlans + if not want: + commands = have + else: # delete specific vlans + commands = get_diff(want, diff, TEST_KEYS) + + requests = self.get_delete_vlans_requests(commands) + commands = update_states(commands, "deleted") + return commands, requests + + def get_delete_vlans_requests(self, configs): + requests = [] + if not configs: + return requests + # Create URL and payload + url = "data/openconfig-interfaces:interfaces/interface=Vlan{}" + method = "DELETE" + for vlan in configs: + vlan_id = vlan.get("vlan_id") + description = vlan.get("description") + if description: + path = self.get_delete_vlan_config_attr(vlan_id, "description") + else: + path = url.format(vlan_id) + + request = {"path": path, + "method": method, + } + requests.append(request) + + return requests + + def get_delete_vlan_config_attr(self, vlan_id, attr_name): + url = "data/openconfig-interfaces:interfaces/interface=Vlan{}/config/{}" + path = url.format(vlan_id, attr_name) + + return path + + def get_create_vlans_requests(self, configs): + requests = [] + if not configs: + return requests + for vlan in configs: + vlan_id = vlan.get("vlan_id") + interface_name = "Vlan" + str(vlan_id) + description = vlan.get("description", None) + request = build_interfaces_create_request(interface_name=interface_name) + requests.append(request) + if description: + requests.append(self.get_modify_vlan_config_attr(interface_name, 'description', description)) + + return requests + + def get_modify_vlan_config_attr(self, intf_name, attr_name, attr_value): + url = "data/openconfig-interfaces:interfaces/interface={}/config" + payload = {"openconfig-interfaces:config": {"name": intf_name, attr_name: attr_value}} + method = "PATCH" + request = {"path": url.format(intf_name), "method": method, "data": payload} + + return request diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py new file mode 100644 index 00000000..83deb0ec --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py @@ -0,0 +1,303 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_vrfs class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, + update_states, + normalize_interface_name +) +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +DELETE = 'DELETE' +TEST_KEYS = [ + {'interfaces': {'name': ''}}, +] + + +class Vrfs(ConfigBase): + """ + The sonic_vrfs class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'vrfs', + ] + + delete_all_flag = False + + def __init__(self, module): + super(Vrfs, self).__init__(module) + + def get_vrf_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + vrf_interfaces_facts = facts['ansible_network_resources'].get('vrfs') + if not vrf_interfaces_facts: + return [] + return vrf_interfaces_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + commands = list() + + existing_vrf_interfaces_facts = self.get_vrf_facts() + commands, requests = self.set_config(existing_vrf_interfaces_facts) + if commands and len(requests) > 0: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_vrf_interfaces_facts = self.get_vrf_facts() + + result['before'] = existing_vrf_interfaces_facts + if result['changed']: + result['after'] = changed_vrf_interfaces_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_vrf_interfaces_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_vrf_interfaces_facts + if want is None: + want = [] + + for each in want: + if each.get("members", None): + interfaces = each["members"].get("interfaces", None) + if interfaces: + interfaces = normalize_interface_name(interfaces, self._module) + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + diff = get_diff(want, have, TEST_KEYS) + + if state == 'deleted': + commands, requests = self._state_deleted(want, have) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :param want: the additive configuration as a dictionary + :param obj_in_have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration + """ + commands = diff + requests = [] + if commands: + requests = self.get_create_requests(commands, have) + + if len(requests) > 0: + commands = update_states(commands, "merged") + else: + commands = [] + return commands, requests + + def _state_deleted(self, want, have): + """ The command generator when state is deleted + + :param want: the objects from which the configuration should be removed + :param obj_in_have: the current configuration as a dictionary + :param interface_type: interface type + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + # if want is none, then delete all the vrfs + if not want: + commands = have + self.delete_all_flag = True + else: + commands = want + self.delete_all_flag = False + + requests = [] + if commands: + requests = self.get_delete_vrf_interface_requests(commands, have, want) + + if len(requests) > 0: + commands = update_states(commands, "deleted") + else: + commands = [] + + return commands, requests + + def get_delete_vrf_interface_requests(self, configs, have, want): + requests = [] + if not configs: + return requests + + # Create URL and payload + method = DELETE + for conf in configs: + name = conf['name'] + empty_flag = False + members = conf.get('members', None) + if members: + interfaces = members.get('interfaces', None) + if members is None: + empty_flag = True + elif members is not None and interfaces is None: + empty_flag = True + matched = next((have_cfg for have_cfg in have if have_cfg['name'] == name), None) + if not matched: + continue + + # if members are not mentioned delet the vrf name + if (self._module.params['state'] == 'deleted' and self.delete_all_flag) or empty_flag: + url = 'data/openconfig-network-instance:network-instances/network-instance={0}'.format(name) + request = {"path": url, "method": method} + requests.append(request) + else: + matched_members = matched.get('members', None) + + if matched_members: + matched_intf = matched_members.get('interfaces', None) + if matched_intf: + for del_mem in matched_intf: + url = 'data/openconfig-network-instance:network-instances/' + url = url + 'network-instance={0}/interfaces/interface={1}'.format(name, del_mem['name']) + request = {"path": url, "method": method} + requests.append(request) + + return requests + + def get_create_requests(self, configs, have): + requests = [] + if not configs: + return requests + + requests_vrf = self.get_create_vrf_requests(configs, have) + if requests_vrf: + requests.extend(requests_vrf) + + requests_vrf_intf = self.get_create_vrf_interface_requests(configs, have) + if requests_vrf_intf: + requests.extend(requests_vrf_intf) + return requests + + def get_create_vrf_requests(self, configs, have): + requests = [] + if not configs: + return requests + # Create URL and payload + method = PATCH + for conf in configs: + if conf.get("name", None): + name = conf["name"] + matched = next((have_cfg for have_cfg in have if have_cfg['name'] == name), None) + if not matched: + url = 'data/openconfig-network-instance:network-instances' + payload = self.build_create_vrf_payload(conf) + request = {"path": url, "method": method, "data": payload} + requests.append(request) + return requests + + def get_create_vrf_interface_requests(self, configs, have): + requests = [] + if not configs: + return requests + + # Create URL and payload + method = PATCH + for conf in configs: + if conf.get("members", None): + if conf["members"].get("interfaces", None): + url = 'data/openconfig-network-instance:network-instances/network-instance={0}/interfaces/interface'.format(conf["name"]) + payload = self.build_create_vrf_interface_payload(conf) + if payload: + request = {"path": url, "method": method, "data": payload} + requests.append(request) + + return requests + + def build_create_vrf_payload(self, conf): + name = conf['name'] + + netw_inst = dict({'name': name}) + netw_inst['config'] = dict({'name': name}) + netw_inst['config'].update({'enabled': True}) + netw_inst['config'].update({'type': 'L3VRF'}) + netw_inst_arr = [netw_inst] + + return dict({'openconfig-network-instance:network-instances': {'network-instance': netw_inst_arr}}) + + def build_create_vrf_interface_payload(self, conf): + members = conf["members"].get("interfaces", None) + network_inst_payload = dict() + if members: + network_inst_payload.update({"openconfig-network-instance:interface": []}) + for member in members: + if member["name"]: + member_config_payload = dict({"id": member["name"]}) + member_payload = dict({"id": member["name"], "config": member_config_payload}) + network_inst_payload["openconfig-network-instance:interface"].append(member_payload) + + return network_inst_payload diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py new file mode 100644 index 00000000..d44adced --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py @@ -0,0 +1,606 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic_vxlans class +It is in this file where the current configuration (as dict) +is compared to the provided configuration (as dict) and the command set +necessary to bring the current configuration to it's desired end-state is +created +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ( + ConfigBase, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, + update_states +) +from ansible.module_utils.connection import ConnectionError + +PATCH = 'patch' +DELETE = 'delete' +test_keys = [ + {'vlan_map': {'vlan': '', 'vni': ''}}, + {'vrf_map': {'vni': '', 'vrf': ''}}, +] + + +class Vxlans(ConfigBase): + """ + The sonic_vxlans class + """ + + gather_subset = [ + '!all', + '!min', + ] + + gather_network_resources = [ + 'vxlans', + ] + + def __init__(self, module): + super(Vxlans, self).__init__(module) + + def get_vxlans_facts(self): + """ Get the 'facts' (the current configuration) + + :rtype: A dictionary + :returns: The current configuration as a dictionary + """ + facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) + vxlans_facts = facts['ansible_network_resources'].get('vxlans') + if not vxlans_facts: + return [] + return vxlans_facts + + def execute_module(self): + """ Execute the module + + :rtype: A dictionary + :returns: The result from module execution + """ + result = {'changed': False} + warnings = list() + + existing_vxlans_facts = self.get_vxlans_facts() + commands, requests = self.set_config(existing_vxlans_facts) + + if commands and requests: + if not self._module.check_mode: + try: + edit_config(self._module, to_request(self._module, requests)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + result['changed'] = True + result['commands'] = commands + + changed_vxlans_facts = self.get_vxlans_facts() + + result['before'] = existing_vxlans_facts + if result['changed']: + result['after'] = changed_vxlans_facts + + result['warnings'] = warnings + return result + + def set_config(self, existing_vxlans_facts): + """ Collect the configuration from the args passed to the module, + collect the current configuration (as a dict from facts) + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + want = self._module.params['config'] + have = existing_vxlans_facts + resp = self.set_state(want, have) + return to_list(resp) + + def set_state(self, want, have): + """ Select the appropriate function based on the state provided + + :param want: the desired configuration as a dictionary + :param have: the current configuration as a dictionary + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + state = self._module.params['state'] + + diff = get_diff(want, have, test_keys) + + if state == 'overridden': + commands, requests = self._state_overridden(want, have, diff) + elif state == 'deleted': + commands, requests = self._state_deleted(want, have, diff) + elif state == 'merged': + commands, requests = self._state_merged(want, have, diff) + elif state == 'replaced': + commands, requests = self._state_replaced(want, have, diff) + + return commands, requests + + def _state_replaced(self, want, have, diff): + """ The command generator when state is replaced + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + + requests = [] + commands = [] + + commands_del = get_diff(have, want, test_keys) + requests_del = [] + if commands_del: + requests_del = self.get_delete_vxlan_request(commands_del, have) + if requests_del: + requests.extend(requests_del) + commands_del = update_states(commands_del, "deleted") + commands.extend(commands_del) + + commands_rep = diff + requests_rep = [] + if commands_rep: + requests_rep = self.get_create_vxlans_request(commands_rep, have) + if requests_rep: + requests.extend(requests_rep) + commands_rep = update_states(commands_rep, "replaced") + commands.extend(commands_rep) + + return commands, requests + + def _state_overridden(self, want, have, diff): + """ The command generator when state is overridden + + :rtype: A list + :returns: the commands necessary to migrate the current configuration + to the desired configuration + """ + requests = [] + commands = [] + + commands_del = get_diff(have, want) + requests_del = [] + if commands_del: + requests_del = self.get_delete_vxlan_request(commands_del, have) + if requests_del: + requests.extend(requests_del) + commands_del = update_states(commands_del, "deleted") + commands.extend(commands_del) + + commands_over = diff + requests_over = [] + if commands_over: + requests_over = self.get_create_vxlans_request(commands_over, have) + if requests_over: + requests.extend(requests_over) + commands_over = update_states(commands_over, "overridden") + commands.extend(commands_over) + + return commands, requests + + def _state_merged(self, want, have, diff): + """ The command generator when state is merged + + :rtype: A list + :returns: the commands necessary to merge the provided into + the current configuration at position-0 + Requests necessary to merge to the current configuration + at position-1 + """ + commands = diff + requests = self.get_create_vxlans_request(commands, have) + + if len(requests) == 0: + commands = [] + + if commands: + commands = update_states(commands, "merged") + + return commands, requests + + def _state_deleted(self, want, have, diff): + """ The command generator when state is deleted + + :rtype: A list + :returns: the commands necessary to remove the current configuration + of the provided objects + """ + + requests = [] + is_delete_all = False + # if want is none, then delete all the vxlans + if not want or len(have) == 0: + commands = have + is_delete_all = True + else: + commands = want + + if is_delete_all: + requests = self.get_delete_all_vxlan_request(have) + else: + requests = self.get_delete_vxlan_request(commands, have) + + if len(requests) == 0: + commands = [] + + if commands: + commands = update_states(commands, "deleted") + + return commands, requests + + def get_create_vxlans_request(self, configs, have): + requests = [] + + if not configs: + return requests + + tunnel_requests = self.get_create_tunnel_request(configs, have) + vlan_map_requests = self.get_create_vlan_map_request(configs, have) + vrf_map_requests = self.get_create_vrf_map_request(configs, have) + + if tunnel_requests: + requests.extend(tunnel_requests) + if vlan_map_requests: + requests.extend(vlan_map_requests) + if vrf_map_requests: + requests.extend(vrf_map_requests) + + return requests + + def get_delete_all_vxlan_request(self, have): + requests = [] + + vrf_map_requests = [] + vlan_map_requests = [] + src_ip_requests = [] + primary_ip_requests = [] + tunnel_requests = [] + + # Need to delete in reverse order of creation. + # vrf_map needs to be cleared before vlan_map + # vlan_map needs to be cleared before tunnel(source-ip) + for conf in have: + name = conf['name'] + vlan_map_list = conf.get('vlan_map', []) + vrf_map_list = conf.get('vrf_map', []) + src_ip = conf.get('source_ip', None) + primary_ip = conf.get('primary_ip', None) + + if vrf_map_list: + vrf_map_requests.extend(self.get_delete_vrf_map_request(conf, conf, name, vrf_map_list)) + if vlan_map_list: + vlan_map_requests.extend(self.get_delete_vlan_map_request(conf, conf, name, vlan_map_list)) + if src_ip: + src_ip_requests.extend(self.get_delete_src_ip_request(conf, conf, name, src_ip)) + if primary_ip: + primary_ip_requests.extend(self.get_delete_primary_ip_request(conf, conf, name, primary_ip)) + tunnel_requests.extend(self.get_delete_tunnel_request(conf, conf, name)) + + if vrf_map_requests: + requests.extend(vrf_map_requests) + if vlan_map_requests: + requests.extend(vlan_map_requests) + if src_ip_requests: + requests.extend(src_ip_requests) + if primary_ip_requests: + requests.extend(primary_ip_requests) + if tunnel_requests: + requests.extend(tunnel_requests) + + return requests + + def get_delete_vxlan_request(self, configs, have): + requests = [] + + if not configs: + return requests + + vrf_map_requests = [] + vlan_map_requests = [] + src_ip_requests = [] + primary_ip_requests = [] + tunnel_requests = [] + + # Need to delete in the reverse order of creation. + # vrf_map needs to be cleared before vlan_map + # vlan_map needs to be cleared before tunnel(source-ip) + for conf in configs: + + name = conf['name'] + src_ip = conf.get('source_ip', None) + primary_ip = conf.get('primary_ip', None) + vlan_map_list = conf.get('vlan_map', None) + vrf_map_list = conf.get('vrf_map', None) + + have_vlan_map_count = 0 + have_vrf_map_count = 0 + matched = next((each_vxlan for each_vxlan in have if each_vxlan['name'] == name), None) + if matched: + have_vlan_map = matched.get('vlan_map', []) + have_vrf_map = matched.get('vrf_map', []) + if have_vlan_map: + have_vlan_map_count = len(have_vlan_map) + if have_vrf_map: + have_vrf_map_count = len(have_vrf_map) + + is_delete_full = False + if (name and vlan_map_list is None and vrf_map_list is None and + src_ip is None and primary_ip is None): + is_delete_full = True + vrf_map_list = matched.get("vrf_map", []) + vlan_map_list = matched.get("vlan_map", []) + + if vlan_map_list is not None and len(vlan_map_list) == 0 and matched: + vlan_map_list = matched.get("vlan_map", []) + if vrf_map_list is not None and len(vrf_map_list) == 0 and matched: + vrf_map_list = matched.get("vrf_map", []) + + if vrf_map_list: + temp_vrf_map_requests = self.get_delete_vrf_map_request(conf, matched, name, vrf_map_list) + if temp_vrf_map_requests: + vrf_map_requests.extend(temp_vrf_map_requests) + have_vrf_map_count -= len(temp_vrf_map_requests) + if vlan_map_list: + temp_vlan_map_requests = self.get_delete_vlan_map_request(conf, matched, name, vlan_map_list) + if temp_vlan_map_requests: + vlan_map_requests.extend(temp_vlan_map_requests) + have_vlan_map_count -= len(temp_vlan_map_requests) + if src_ip: + src_ip_requests.extend(self.get_delete_src_ip_request(conf, matched, name, src_ip)) + + if primary_ip: + primary_ip_requests.extend(self.get_delete_primary_ip_request(conf, matched, name, primary_ip)) + if is_delete_full: + tunnel_requests.extend(self.get_delete_tunnel_request(conf, matched, name)) + + if vrf_map_requests: + requests.extend(vrf_map_requests) + if vlan_map_requests: + requests.extend(vlan_map_requests) + if src_ip_requests: + requests.extend(src_ip_requests) + if primary_ip_requests: + requests.extend(primary_ip_requests) + if tunnel_requests: + requests.extend(tunnel_requests) + + return requests + + def get_create_evpn_request(self, conf): + # Create URL and payload + url = "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST" + payload = self.build_create_evpn_payload(conf) + request = {"path": url, "method": PATCH, "data": payload} + + return request + + def get_create_tunnel_request(self, configs, have): + # Create URL and payload + requests = [] + url = "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL" + for conf in configs: + payload = self.build_create_tunnel_payload(conf) + request = {"path": url, "method": PATCH, "data": payload} + requests.append(request) + if conf.get('source_ip', None): + requests.append(self.get_create_evpn_request(conf)) + + return requests + + def build_create_evpn_payload(self, conf): + + evpn_nvo_list = [{'name': conf['evpn_nvo'], 'source_vtep': conf['name']}] + evpn_dict = {'sonic-vxlan:EVPN_NVO_LIST': evpn_nvo_list} + + return evpn_dict + + def build_create_tunnel_payload(self, conf): + payload_url = dict() + + vtep_ip_dict = dict() + vtep_ip_dict['name'] = conf['name'] + if conf.get('source_ip', None): + vtep_ip_dict['src_ip'] = conf['source_ip'] + if conf.get('primary_ip', None): + vtep_ip_dict['primary_ip'] = conf['primary_ip'] + + payload_url['sonic-vxlan:VXLAN_TUNNEL'] = {'VXLAN_TUNNEL_LIST': [vtep_ip_dict]} + + return payload_url + + def get_create_vlan_map_request(self, configs, have): + # Create URL and payload + requests = [] + for conf in configs: + new_vlan_map_list = conf.get('vlan_map', []) + if new_vlan_map_list: + for each_vlan_map in new_vlan_map_list: + name = conf['name'] + vlan = each_vlan_map.get('vlan') + vni = each_vlan_map.get('vni') + matched = next((each_vxlan for each_vxlan in have if each_vxlan['name'] == name), None) + + is_change_needed = True + if matched: + matched_vlan_map_list = matched.get('vlan_map', []) + if matched_vlan_map_list: + matched_vlan_map = next((e_vlan_map for e_vlan_map in matched_vlan_map_list if e_vlan_map['vni'] == vni), None) + if matched_vlan_map: + if matched_vlan_map['vlan'] == vlan: + is_change_needed = False + + if is_change_needed: + map_name = "map_{0}_Vlan{1}".format(vni, vlan) + payload = self.build_create_vlan_map_payload(conf, each_vlan_map) + url = "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP" + request = {"path": url, "method": PATCH, "data": payload} + requests.append(request) + + return requests + + def build_create_vlan_map_payload(self, conf, vlan_map): + payload_url = dict() + + vlan_map_dict = dict() + vlan_map_dict['name'] = conf['name'] + vlan_map_dict['mapname'] = "map_{vni}_Vlan{vlan}".format(vni=vlan_map['vni'], vlan=vlan_map['vlan']) + vlan_map_dict['vlan'] = "Vlan{vlan}".format(vlan=vlan_map['vlan']) + vlan_map_dict['vni'] = vlan_map['vni'] + + payload_url['sonic-vxlan:VXLAN_TUNNEL_MAP'] = {'VXLAN_TUNNEL_MAP_LIST': [vlan_map_dict]} + + return payload_url + + def get_create_vrf_map_request(self, configs, have): + # Create URL and payload + requests = [] + for conf in configs: + new_vrf_map_list = conf.get('vrf_map', []) + if new_vrf_map_list: + for each_vrf_map in new_vrf_map_list: + name = conf['name'] + vrf = each_vrf_map.get('vrf') + vni = each_vrf_map.get('vni') + matched = next((each_vxlan for each_vxlan in have if each_vxlan['name'] == name), None) + + is_change_needed = True + if matched: + matched_vrf_map_list = matched.get('vrf_map', []) + if matched_vrf_map_list: + matched_vrf_map = next((e_vrf_map for e_vrf_map in matched_vrf_map_list if e_vrf_map['vni'] == vni), None) + if matched_vrf_map: + if matched_vrf_map['vrf'] == vrf: + is_change_needed = False + + if is_change_needed: + payload = self.build_create_vrf_map_payload(conf, each_vrf_map) + url = "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST={vrf}/vni".format(vrf=vrf) + request = {"path": url, "method": PATCH, "data": payload} + requests.append(request) + + return requests + + def build_create_vrf_map_payload(self, conf, vrf_map): + + payload_url = dict({"sonic-vrf:vni": vrf_map['vni']}) + return payload_url + + def get_delete_evpn_request(self, conf): + # Create URL and payload + url = "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST={evpn_nvo}".format(evpn_nvo=conf['evpn_nvo']) + request = {"path": url, "method": DELETE} + + return request + + def get_delete_tunnel_request(self, conf, matched, name): + # Create URL and payload + requests = [] + + url = "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST={name}".format(name=name) + requests.append({"path": url, "method": DELETE}) + + return requests + + def get_delete_src_ip_request(self, conf, matched, name, del_source_ip): + # Create URL and payload + requests = [] + + url = "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST={name}/src_ip" + + is_change_needed = False + if matched: + matched_source_ip = matched.get('source_ip', None) + if matched_source_ip and matched_source_ip == del_source_ip: + is_change_needed = True + + # Delete the EVPN NVO if the source_ip address is being deleted. + if is_change_needed: + requests.append(self.get_delete_evpn_request(conf)) + request = {"path": url.format(name=name), "method": DELETE} + requests.append(request) + + return requests + + def get_delete_primary_ip_request(self, conf, matched, name, del_primary_ip): + # Create URL and payload + requests = [] + + url = "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST={name}/primary_ip" + + is_change_needed = False + if matched: + matched_primary_ip = matched.get('primary_ip', None) + if matched_primary_ip and matched_primary_ip == del_primary_ip: + is_change_needed = True + + if is_change_needed: + request = {"path": url.format(name=name), "method": DELETE} + requests.append(request) + + return requests + + def get_delete_vlan_map_request(self, conf, matched, name, del_vlan_map_list): + # Create URL and payload + requests = [] + + for each_vlan_map in del_vlan_map_list: + vlan = each_vlan_map.get('vlan') + vni = each_vlan_map.get('vni') + + is_change_needed = False + if matched: + matched_vlan_map_list = matched.get('vlan_map', None) + if matched_vlan_map_list: + matched_vlan_map = next((e_vlan_map for e_vlan_map in matched_vlan_map_list if e_vlan_map['vni'] == vni), None) + if matched_vlan_map: + if matched_vlan_map['vlan'] == vlan: + is_change_needed = True + + if is_change_needed: + map_name = "map_{0}_Vlan{1}".format(vni, vlan) + url = "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP/VXLAN_TUNNEL_MAP_LIST={name},{map_name}".format(name=name, map_name=map_name) + request = {"path": url, "method": DELETE} + requests.append(request) + + return requests + + def get_delete_vrf_map_request(self, conf, matched, name, del_vrf_map_list): + # Create URL and payload + requests = [] + + for each_vrf_map in del_vrf_map_list: + vrf = each_vrf_map.get('vrf') + vni = each_vrf_map.get('vni') + + is_change_needed = False + if matched: + matched_vrf_map_list = matched.get('vrf_map', None) + if matched_vrf_map_list: + matched_vrf_map = next((e_vrf_map for e_vrf_map in matched_vrf_map_list if e_vrf_map['vni'] == vni), None) + if matched_vrf_map: + if matched_vrf_map['vrf'] == vrf: + is_change_needed = True + + if is_change_needed: + url = "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST={vrf}/vni".format(vrf=vrf) + request = {"path": url, "method": DELETE} + requests.append(request) + + return requests diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py new file mode 100644 index 00000000..5a7bd05c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py @@ -0,0 +1,111 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic aaa fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.aaa.aaa import AaaArgs + +GET = "get" + + +class AaaFacts(object): + """ The sonic aaa fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = AaaArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_aaa(self): + """Get aaa details available in chassis""" + request = [{"path": "data/openconfig-system:system/aaa", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + data = {} + if ('openconfig-system:aaa' in response[0][1]): + if ('authentication' in response[0][1]['openconfig-system:aaa']): + if ('config' in response[0][1]['openconfig-system:aaa']['authentication']): + data = response[0][1]['openconfig-system:aaa']['authentication']['config'] + return data + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for aaa + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = self.get_aaa() + objs = [] + objs = self.render_config(self.generated_spec, data) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['aaa'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = self.parse_sonic_aaa(spec, conf) + return config + + def parse_sonic_aaa(self, spec, conf): + config = deepcopy(spec) + if conf: + temp = {} + if ('authentication-method' in conf) and (conf['authentication-method']): + if 'local' in conf['authentication-method']: + temp['local'] = True + choices = ['tacacs+', 'ldap', 'radius'] + for i, word in enumerate(conf['authentication-method']): + if word in choices: + temp['group'] = conf['authentication-method'][i] + if ('failthrough' in conf): + temp['fail_through'] = conf['failthrough'] + if temp: + config['authentication']['data'] = temp + return utils.remove_empties(config) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py new file mode 100644 index 00000000..c86b53c2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py @@ -0,0 +1,156 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + remove_empties_from_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp.bgp import BgpArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + get_bgp_data, +) + + +class BgpFacts(object): + """ The sonic bgp fact class + """ + + global_params_map = { + 'bgp_as': 'as', + 'router_id': 'router-id', + 'holdtime': 'hold-time', + 'keepalive_interval': 'keepalive-interval', + 'log_neighbor_changes': ['logging-options', 'log-neighbor-state-changes'], + 'as_path_confed': ['route-selection-options', 'compare-confed-as-path'], + 'as_path_ignore': ['route-selection-options', 'ignore-as-path-length'], + 'as_path_multipath_relax': ['use-multiple-paths', 'ebgp', 'config', 'allow-multiple-as'], + 'as_path_multipath_relax_as_set': ['use-multiple-paths', 'ebgp', 'config', 'as-set'], + 'compare_routerid': ['route-selection-options', 'external-compare-router-id'], + 'med_confed': ['route-selection-options', 'med-confed'], + 'med_missing_as_worst': ['route-selection-options', 'med-missing-as-worst'], + 'always_compare_med': ['route-selection-options', 'always-compare-med'], + 'admin_max_med': ['max-med', 'admin-max-med-val'], + 'max_med_on_startup_timer': ['max-med', 'time'], + 'max_med_on_startup_med_val': ['max-med', 'max-med-val'], + } + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = BgpArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for BGP + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = list() + if connection: # just for linting purposes, remove + pass + + if not data: + data = get_bgp_data(self._module, self.global_params_map) + self.normalise_bgp_data(data) + + # operate on a collection of resource x + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + # split the config into instances of the resource + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('bgp', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)}) + facts['bgp'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def normalise_bgp_data(self, data): + for conf in data: + bestpath = {} + med = {} + timers = {} + as_path = {} + max_med_on_start_up = {} + + conf['log_neighbor_changes'] = conf.get('log_neighbor_changes', False) + + as_path['confed'] = conf.get('as_path_confed', False) + as_path['ignore'] = conf.get('as_path_ignore', False) + as_path['multipath_relax'] = conf.get('as_path_multipath_relax', False) + as_path['multipath_relax_as_set'] = conf.get('as_path_multipath_relax_as_set', False) + bestpath['as_path'] = as_path + + med['confed'] = conf.get('med_confed', False) + med['missing_as_worst'] = conf.get('med_missing_as_worst', False) + med['always_compare_med'] = conf.get('always_compare_med', False) + bestpath['med'] = med + + timers['holdtime'] = conf.get('holdtime', None) + timers['keepalive_interval'] = conf.get('keepalive_interval', None) + conf['timers'] = timers + bestpath['compare_routerid'] = conf.get('compare_routerid', False) + + conf['bestpath'] = bestpath + + max_med_on_start_up["timer"] = conf.get('max_med_on_startup_timer', None) + max_med_on_start_up["med_val"] = conf.get('max_med_on_startup_med_val', None) + + conf['max_med'] = { + 'on_startup': max_med_on_start_up, + } + + keys = [ + 'as_path_confed', 'as_path_ignore', 'as_path_multipath_relax', 'as_path_multipath_relax_as_set', + 'med_confed', 'med_missing_as_worst', 'always_compare_med', 'max_med_val', 'holdtime', + 'keepalive_interval', 'compare_routerid', 'admin_max_med', 'max_med_on_startup_timer', + 'max_med_on_startup_med_val', + ] + for key in keys: + if key in conf: + conf.pop(key) + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + + return conf diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py new file mode 100644 index 00000000..fd37533e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py @@ -0,0 +1,258 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp_af fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + remove_empties_from_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_af.bgp_af import Bgp_afArgs + +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + get_bgp_af_data, + get_all_bgp_af_redistribute, +) + + +class Bgp_afFacts(object): + """ The sonic bgp_af fact class + """ + + afi_safi_types_map = { + 'openconfig-bgp-types:IPV4_UNICAST': 'ipv4_unicast', + 'openconfig-bgp-types:IPV6_UNICAST': 'ipv6_unicast', + 'openconfig-bgp-types:L2VPN_EVPN': 'l2vpn_evpn', + } + + af_params_map = { + 'afi': 'afi-safi-name', + 'route_map': 'policy-name', + 'prefix': 'prefix', + 'neighbor': 'neighbor-address', + 'route_reflector_client': 'route-reflector-client', + 'route_server_client': 'route-server-client', + 'next_hop_self': ['next-hop-self', 'enabled'], + 'remove_private_as': ['remove-private-as', 'enabled'], + 'prefix_list_in': ['prefix-list', 'import-policy'], + 'prefix_list_out': ['prefix-list', 'export-policy'], + 'maximum_prefix': ['prefix-limit', 'max-prefixes'], + 'activate': 'enabled', + 'advertise_pip': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'advertise-pip'], + 'advertise_pip_ip': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'advertise-pip-ip'], + 'advertise_pip_peer_ip': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'advertise-pip-peer-ip'], + 'advertise_svi_ip': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'advertise-svi-ip'], + 'advertise_all_vni': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'advertise-all-vni'], + 'advertise_default_gw': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'advertise-default-gw'], + 'ebgp': ['use-multiple-paths', 'ebgp', 'maximum-paths'], + 'ibgp': ['use-multiple-paths', 'ibgp', 'maximum-paths'], + 'network': ['network-config', 'network'], + 'dampening': ['route-flap-damping', 'config', 'enabled'], + 'route_advertise_list': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:route-advertise', 'route-advertise-list'], + } + + af_redis_params_map = { + 'protocol': 'src-protocol', + 'afi': 'address-family', + 'metric': 'metric', + 'route_map': 'import-policy' + } + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Bgp_afArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for BGP + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = list() + if connection: # just for linting purposes, remove + pass + if not data: + data = get_bgp_af_data(self._module, self.af_params_map) + vrf_list = [e_bgp_af['vrf_name'] for e_bgp_af in data] + self.update_max_paths(data) + self.update_network(data) + self.update_route_advertise_list(data) + bgp_redis_data = get_all_bgp_af_redistribute(self._module, vrf_list, self.af_redis_params_map) + self.update_redis_data(data, bgp_redis_data) + self.update_afis(data) + + # operate on a collection of resource x + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + # split the config into instances of the resource + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('bgp_af', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)}) + facts['bgp_af'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + + return conf + + def check_afi(self, afi, redis_data): + afi_rhs = afi + afi_lhs = redis_data.get('afi', None) + return (afi_lhs and (afi_rhs == afi_lhs)) + + def update_redis_data(self, objs, af_redis_data): + if not (af_redis_data or objs): + return + + for conf in objs: + vrf_name = conf['vrf_name'] + raw_af_redis_data = next((e_af_redis for e_af_redis in af_redis_data if vrf_name in e_af_redis), None) + if not raw_af_redis_data: + continue + norm_af_redis_data = self.normalize_af_redis_params(raw_af_redis_data[vrf_name]) + if norm_af_redis_data: + if 'address_family' in conf: + afs = conf['address_family'] + if not afs: + continue + for e_af in afs: + if 'afi' in e_af: + afi = e_af['afi'] + redis_arr = [] + for e_redis_data in norm_af_redis_data: + if self.check_afi(afi, e_redis_data): + e_redis_data.pop('afi') + redis_arr.append(e_redis_data) + e_af.update({'redistribute': redis_arr}) + else: + addr_fams = [] + for e_norm_af_redis in norm_af_redis_data: + afi = e_norm_af_redis['afi'] + e_norm_af_redis.pop('afi') + mat_addr_fam = next((each_addr_fam for each_addr_fam in addr_fams if each_addr_fam['afi'] == afi), None) + if mat_addr_fam: + mat_addr_fam['redistribute'].append(e_norm_af_redis) + else: + addr_fams.append({'redistribute': [e_norm_af_redis], 'afi': afi}) + + if addr_fams: + conf.update({'address_family': addr_fams}) + + def update_max_paths(self, data): + for conf in data: + afs = conf.get('address_family', []) + if afs: + for af in afs: + max_path = {} + ebgp = af.get('ebgp', None) + if ebgp: + af.pop('ebgp') + max_path['ebgp'] = ebgp + ibgp = af.get('ibgp', None) + if ibgp: + af.pop('ibgp') + max_path['ibgp'] = ibgp + if max_path: + af['max_path'] = max_path + + def update_network(self, data): + for conf in data: + afs = conf.get('address_family', []) + if afs: + for af in afs: + temp = [] + network = af.get('network', None) + if network: + for e in network: + prefix = e.get('prefix', None) + if prefix: + temp.append(prefix) + af['network'] = temp + dampening = af.get('dampening', None) + if dampening: + af.pop('dampening') + af['dampening'] = dampening + + def update_afis(self, data): + for conf in data: + if 'address_family' in conf: + conf['address_family'] = {'afis': conf['address_family']} + + def update_route_advertise_list(self, data): + for conf in data: + afs = conf.get('address_family', []) + if afs: + for af in afs: + rt_adv_lst = [] + route_advertise_list = af.get('route_advertise_list', None) + if route_advertise_list: + for rt in route_advertise_list: + rt_adv_dict = {} + advertise_afi = rt['advertise-afi-safi'].split(':')[1].split('_')[0].lower() + route_map_config = rt['config'] + route_map = route_map_config.get('route-map', None) + if advertise_afi: + rt_adv_dict['advertise_afi'] = advertise_afi + if route_map: + rt_adv_dict['route_map'] = route_map[0] + if rt_adv_dict and rt_adv_dict not in rt_adv_lst: + rt_adv_lst.append(rt_adv_dict) + af['route_advertise_list'] = rt_adv_lst + + def normalize_af_redis_params(self, af): + norm_af = list() + for e_af in af: + temp = e_af.copy() + for key, val in e_af.items(): + if 'afi' == key or 'protocol' == key and val: + if ':' in val: + temp[key] = val.split(':')[1].lower() + if '_' in val: + temp[key] = val.split('_')[1].lower() + elif 'route_map' == key and val: + temp['route_map'] = val[0] + + norm_af.append(temp) + return norm_af diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py new file mode 100644 index 00000000..822db22a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py @@ -0,0 +1,129 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp_as_paths fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_as_paths.bgp_as_paths import Bgp_as_pathsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + + +class Bgp_as_pathsFacts(object): + """ The sonic bgp_as_paths fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Bgp_as_pathsArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_as_path_list(self): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets" + method = "GET" + request = [{"path": url, "method": method}] + + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + as_path_lists = [] + if "openconfig-bgp-policy:as-path-sets" in response[0][1]: + temp = response[0][1].get("openconfig-bgp-policy:as-path-sets", {}) + if "as-path-set" in temp: + as_path_lists = temp["as-path-set"] + + as_path_list_configs = [] + for as_path in as_path_lists: + result = dict() + as_name = as_path["as-path-set-name"] + member_config = as_path['config'] + members = member_config.get("as-path-set-member", []) + permit_str = member_config.get("openconfig-bgp-policy-ext:action", None) + result['name'] = as_name + result['members'] = members + if permit_str and permit_str == "PERMIT": + result['permit'] = True + else: + result['permit'] = False + as_path_list_configs.append(result) + # with open('/root/ansible_log.log', 'a+') as fp: + # fp.write('as_path_list: ' + str(as_path_list_configs) + '\n') + return as_path_list_configs + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for as_path_list + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + resources = self.get_as_path_list() + + objs = [] + for resource in resources: + if resource: + obj = self.render_config(self.generated_spec, resource) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('bgp_as_paths', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['bgp_as_paths'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + try: + config['name'] = str(conf['name']) + config['members'] = conf['members'] + config['permit'] = conf['permit'] + except TypeError: + config['name'] = None + config['members'] = None + config['permit'] = None + return utils.remove_empties(config) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py new file mode 100644 index 00000000..ffa29422 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py @@ -0,0 +1,145 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp_communities fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_communities.bgp_communities import Bgp_communitiesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + + +class Bgp_communitiesFacts(object): + """ The sonic bgp_communities fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Bgp_communitiesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_bgp_communities(self): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets" + method = "GET" + request = [{"path": url, "method": method}] + + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + bgp_communities = [] + if "openconfig-bgp-policy:community-sets" in response[0][1]: + temp = response[0][1].get("openconfig-bgp-policy:community-sets", {}) + if "community-set" in temp: + bgp_communities = temp["community-set"] + + bgp_communities_configs = [] + for bgp_community in bgp_communities: + result = dict() + name = bgp_community["community-set-name"] + member_config = bgp_community['config'] + match = member_config['match-set-options'] + permit_str = member_config.get('openconfig-bgp-policy-ext:action', None) + members = member_config.get("community-member", []) + result['name'] = name + result['match'] = match + if permit_str and permit_str == 'PERMIT': + result['permit'] = True + else: + result['permit'] = False + if members: + result['type'] = 'expanded' if 'REGEX' in members[0] else 'standard' + else: + result['type'] = '' + if result['type'] == 'expanded': + members = [':'.join(i.split(':')[1:]) for i in members] + result['local_as'] = True if "NO_EXPORT_SUBCONFED" in members else False + result['no_advertise'] = True if "NO_ADVERTISE" in members else False + result['no_export'] = True if "NO_EXPORT" in members else False + result['no_peer'] = True if "NOPEER" in members else False + result['members'] = {'regex': members} + bgp_communities_configs.append(result) + # with open('/root/ansible_log.log', 'a+') as fp: + # fp.write('bgp_communities: ' + str(bgp_communities_configs) + '\n') + return bgp_communities_configs + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for bgp_communities + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + resources = self.get_bgp_communities() + + objs = [] + for resource in resources: + if resource: + obj = self.render_config(self.generated_spec, resource) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('bgp_communities', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['bgp_communities'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + try: + config['name'] = str(conf['name']) + config['members'] = conf['members'] + config['match'] = conf['match'] + config['type'] = conf['type'] + config['permit'] = conf['permit'] + except TypeError: + config['name'] = None + config['members'] = None + config['match'] = None + config['type'] = None + config['permit'] = None + return utils.remove_empties(config) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py new file mode 100644 index 00000000..b1d7c4ad --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py @@ -0,0 +1,158 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp_ext_communities fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_ext_communities.bgp_ext_communities import ( + Bgp_ext_communitiesArgs, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + + +class Bgp_ext_communitiesFacts(object): + """ The sonic bgp_ext_communities fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Bgp_ext_communitiesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_bgp_extcommunities(self): + url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets" + method = "GET" + request = [{"path": url, "method": method}] + + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + bgp_extcommunities = [] + if "openconfig-bgp-policy:ext-community-sets" in response[0][1]: + temp = response[0][1].get("openconfig-bgp-policy:ext-community-sets", {}) + if "ext-community-set" in temp: + bgp_extcommunities = temp["ext-community-set"] + + bgp_extcommunities_configs = [] + for bgp_extcommunity in bgp_extcommunities: + result = dict() + name = bgp_extcommunity["ext-community-set-name"] + member_config = bgp_extcommunity['config'] + match = member_config['match-set-options'] + permit_str = member_config.get('openconfig-bgp-policy-ext:action', None) + members = member_config.get("ext-community-member", []) + result['name'] = name + result['match'] = match.lower() + + if permit_str and permit_str == 'PERMIT': + result['permit'] = True + else: + result['permit'] = False + + result['members'] = dict() + rt = list() + soo = list() + regex = list() + for member in members: + if member.startswith('route-target'): + rt.append(':'.join(member.split(':')[1:])) + elif member.startswith('route-origin'): + soo.append(':'.join(member.split(':')[1:])) + elif member.startswith('REGEX'): + regex.append(':'.join(member.split(':')[1:])) + + result['type'] = 'standard' + if regex and len(regex) > 0: + result['type'] = 'expanded' + result['members']['regex'] = regex + if rt and len(rt) > 0: + result['members']['route_target'] = rt + if soo and len(soo) > 0: + result['members']['route_origin'] = soo + + bgp_extcommunities_configs.append(result) + + return bgp_extcommunities_configs + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for bgp_ext_communities + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + resources = self.get_bgp_extcommunities() + + objs = [] + for resource in resources: + if resource: + obj = self.render_config(self.generated_spec, resource) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('bgp_ext_communities', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['bgp_ext_communities'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + try: + config['name'] = str(conf['name']) + config['members'] = conf['members'] + config['match'] = conf['match'] + config['type'] = conf['type'] + config['permit'] = conf['permit'] + except TypeError: + config['name'] = None + config['members'] = None + config['match'] = None + config['type'] = None + config['permit'] = None + return utils.remove_empties(config) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py new file mode 100644 index 00000000..903b93de --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py @@ -0,0 +1,229 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp_neighbors fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + remove_empties_from_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_neighbors.bgp_neighbors import Bgp_neighborsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + get_all_bgp_neighbors, + get_from_params_map, + get_peergroups, +) + + +class Bgp_neighborsFacts(object): + """ The sonic bgp_neighbors fact class + """ + + neighbor_params_map = { + 'neighbor': 'neighbor-address', + 'peer_as': 'peer-as', + 'peer_type': 'peer-type', + 'peer_group': 'peer-group', + 'keepalive': 'keepalive-interval', + 'holdtime': 'hold-time', + 'connect_retry': 'connect-retry', + 'advertisement_interval': 'minimum-advertisement-interval', + 'bfd_enabled': ['enable-bfd', 'enabled'], + 'check_failure': ['enable-bfd', 'check-control-plane-failure'], + 'profile': ['enable-bfd', 'bfd-profile'], + 'dynamic': 'capability-dynamic', + 'extended_nexthop': 'capability-extended-nexthop', + 'pwd': ['auth-password', 'password'], + 'encrypted': ['auth-password', 'encrypted'], + 'nbr_description': 'description', + 'disable_connected_check': 'disable-ebgp-connected-route-check', + 'dont_negotiate_capability': 'dont-negotiate-capability', + 'enforce_first_as': 'enforce-first-as', + 'enforce_multihop': 'enforce-multihop', + 'local_address': ['transport', 'config', 'local-address'], + 'as': 'local-as', + 'no_prepend': 'local-as-no-prepend', + 'replace_as': 'local-as-replace-as', + 'override_capability': 'override-capability', + 'port': 'peer-port', + 'shutdown_msg': 'shutdown-message', + 'solo': 'solo-peer', + 'strict_capability_match': 'strict-capability-match', + 'ttl_security': 'ttl-security-hops', + 'enabled': ['ebgp-multihop', 'enabled'], + 'multihop_ttl': ['ebgp-multihop', 'multihop-ttl'], + 'v6only': 'openconfig-bgp-ext:v6only', + 'passive': ['transport', 'config', 'passive-mode'] + } + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Bgp_neighborsArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for BGP + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = list() + + if not data: + data = get_all_bgp_neighbors(self._module) + filtered_data = self.filter_neighbors_data(data) + if filtered_data: + data = filtered_data + + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('bgp_neighbors', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)}) + facts['bgp_neighbors'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + + return conf + + def filter_neighbors_data(self, data): + filtered_data = [] + for conf in data: + vrf_name = conf['vrf_name'] + tmp = {} + bgp_as = conf['bgp_as'] + val = None + if 'neighbors' in conf and 'neighbor' in conf['neighbors']: + val = conf['neighbors']['neighbor'] + + tmp['vrf_name'] = vrf_name + tmp['bgp_as'] = bgp_as + peergroup = get_peergroups(self._module, vrf_name) + if peergroup: + tmp['peer_group'] = peergroup + fil_neighbors = [] + if val: + for neighbor in val: + fil_neighbor = get_from_params_map(self.neighbor_params_map, neighbor) + capability = {} + capability_dynamic = fil_neighbor.get('dynamic', None) + if capability_dynamic is not None: + capability['dynamic'] = capability_dynamic + fil_neighbor.pop('dynamic') + capability_extended_nexthop = fil_neighbor.get('extended_nexthop', None) + if capability_extended_nexthop is not None: + capability['extended_nexthop'] = capability_extended_nexthop + fil_neighbor.pop('extended_nexthop') + if capability: + fil_neighbor['capability'] = capability + remote = {} + peer_as = fil_neighbor.get('peer_as', None) + if peer_as is not None: + remote['peer_as'] = peer_as + fil_neighbor.pop('peer_as') + peer_type = fil_neighbor.get('peer_type', None) + if peer_type is not None: + remote['peer_type'] = peer_type.lower() + fil_neighbor.pop('peer_type') + if remote: + fil_neighbor['remote_as'] = remote + auth_pwd = {} + pwd = fil_neighbor.get('pwd', None) + if pwd is not None: + auth_pwd['pwd'] = pwd + fil_neighbor.pop('pwd') + encrypted = fil_neighbor.get('encrypted', None) + if encrypted is not None: + auth_pwd['encrypted'] = encrypted + fil_neighbor.pop('encrypted') + ebgp_multihop = {} + enabled = fil_neighbor.get('enabled', None) + if enabled is not None: + ebgp_multihop['enabled'] = enabled + fil_neighbor.pop('enabled') + multihop_ttl = fil_neighbor.get('multihop_ttl', None) + if multihop_ttl is not None: + ebgp_multihop['multihop_ttl'] = multihop_ttl + fil_neighbor.pop('multihop_ttl') + local_as = {} + asn = fil_neighbor.get('as', None) + if asn is not None: + local_as['as'] = asn + fil_neighbor.pop('as') + no_prepend = fil_neighbor.get('no_prepend', None) + if no_prepend is not None: + local_as['no_prepend'] = no_prepend + fil_neighbor.pop('no_prepend') + replace_as = fil_neighbor.get('replace_as', None) + if replace_as is not None: + local_as['replace_as'] = replace_as + fil_neighbor.pop('replace_as') + bfd = {} + bfd_enabled = fil_neighbor.get('bfd_enabled', None) + if bfd_enabled is not None: + bfd['enabled'] = bfd_enabled + fil_neighbor.pop('bfd_enabled') + check_failure = fil_neighbor.get('check_failure', None) + if check_failure is not None: + bfd['check_failure'] = check_failure + fil_neighbor.pop('check_failure') + profile = fil_neighbor.get('profile', None) + if profile is not None: + bfd['profile'] = profile + fil_neighbor.pop('profile') + if auth_pwd: + fil_neighbor['auth_pwd'] = auth_pwd + if ebgp_multihop: + fil_neighbor['ebgp_multihop'] = ebgp_multihop + if local_as: + fil_neighbor['local_as'] = local_as + if bfd: + fil_neighbor['bfd'] = bfd + if fil_neighbor: + fil_neighbors.append(fil_neighbor) + if fil_neighbors: + tmp['neighbors'] = fil_neighbors + filtered_data.append(tmp) + return filtered_data diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py new file mode 100644 index 00000000..26119b61 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py @@ -0,0 +1,222 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp_neighbors_af fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + remove_empties_from_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_neighbors_af.bgp_neighbors_af import Bgp_neighbors_afArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + get_all_bgp_neighbors, + get_from_params_map, + update_bgp_nbr_pg_ip_afi_dict, + update_bgp_nbr_pg_prefix_limit_dict +) + + +class Bgp_neighbors_afFacts(object): + """ The sonic bgp_neighbors_af fact class + """ + + neighbor_af_params_map = { + 'afi': 'afi-safi-name', + 'route_reflector_client': 'route-reflector-client', + 'route_server_client': 'route-server-client', + 'allowas_in_origin': ['allow-own-as', 'origin'], + 'allowas_in_value': ['allow-own-as', 'as-count'], + 'in_route_name': ['apply-policy', 'import-policy'], + 'out_route_name': ['apply-policy', 'export-policy'], + 'activate': 'enabled', + 'prefix_list_in': ['prefix-list', 'import-policy'], + 'prefix_list_out': ['prefix-list', 'export-policy'], + 'ipv4_unicast': 'ipv4-unicast', + 'ipv6_unicast': 'ipv6-unicast', + 'l2vpn_evpn': ['l2vpn-evpn', 'prefix-limit'] + } + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Bgp_neighbors_afArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def fill_route_map(self, data): + for route_map_key in ['out_route_name', 'in_route_name']: + if route_map_key in data: + route_map = data['route_map'] + for e_route in data[route_map_key]: + direction = route_map_key.split('_', maxsplit=1)[0] + route_map.append({'name': e_route, 'direction': direction}) + data.pop(route_map_key) + + def normalize_neighbors_af_data(self, neighbors): + norm_neighbors = [] + + for nei_data in neighbors: + norm_neighbor = {} + + neighbor = nei_data.get('neighbor-address', None) + if not neighbor: + continue + norm_neighbor['neighbor'] = neighbor + norm_neighbor['address_family'] = [] + nei_afs = nei_data.get('afi-safis', None) + if not nei_afs: + if norm_neighbor: + norm_neighbors.append(norm_neighbor) + continue + nei_afs = nei_afs.get('afi-safi', None) + if not nei_afs: + if norm_neighbor: + norm_neighbors.append(norm_neighbor) + continue + norm_neighbor_afs = [] + for nei_af in nei_afs: + norm_nei_af = get_from_params_map(self.neighbor_af_params_map, nei_af) + if norm_nei_af: + if 'activate' not in norm_nei_af: + norm_nei_af['activate'] = False + if 'route_server_client' not in norm_nei_af: + norm_nei_af['route_server_client'] = False + norm_nei_af['route_map'] = [] + self.fill_route_map(norm_nei_af) + + allowas_in = {} + allowas_in_origin = norm_nei_af.get('allowas_in_origin', None) + if allowas_in_origin is not None: + allowas_in['origin'] = allowas_in_origin + norm_nei_af.pop('allowas_in_origin') + + allowas_in_value = norm_nei_af.get('allowas_in_value', None) + if allowas_in_value is not None: + allowas_in['value'] = allowas_in_value + norm_nei_af.pop('allowas_in_value') + if allowas_in: + norm_nei_af['allowas_in'] = allowas_in + + ipv4_unicast = norm_nei_af.get('ipv4_unicast', None) + ipv6_unicast = norm_nei_af.get('ipv6_unicast', None) + l2vpn_evpn = norm_nei_af.get('l2vpn_evpn', None) + if ipv4_unicast: + if 'config' in ipv4_unicast: + ip_afi = update_bgp_nbr_pg_ip_afi_dict(ipv4_unicast['config']) + if ip_afi: + norm_nei_af['ip_afi'] = ip_afi + if 'prefix-limit' in ipv4_unicast and 'config' in ipv4_unicast['prefix-limit']: + prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(ipv4_unicast['prefix-limit']['config']) + if prefix_limit: + norm_nei_af['prefix_limit'] = prefix_limit + norm_nei_af.pop('ipv4_unicast') + elif ipv6_unicast: + if 'config' in ipv6_unicast: + ip_afi = update_bgp_nbr_pg_ip_afi_dict(ipv6_unicast['config']) + if ip_afi: + norm_nei_af['ip_afi'] = ip_afi + if 'prefix-limit' in ipv6_unicast and 'config' in ipv6_unicast['prefix-limit']: + prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(ipv6_unicast['prefix-limit']['config']) + if prefix_limit: + norm_nei_af['prefix_limit'] = prefix_limit + norm_nei_af.pop('ipv6_unicast') + elif l2vpn_evpn: + if 'config' in l2vpn_evpn: + prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(l2vpn_evpn['config']) + if prefix_limit: + norm_nei_af['prefix_limit'] = prefix_limit + norm_nei_af.pop('l2vpn_evpn') + + norm_neighbor_afs.append(norm_nei_af) + if norm_neighbor_afs: + norm_neighbor['address_family'] = norm_neighbor_afs + if norm_neighbor: + norm_neighbors.append(norm_neighbor) + return norm_neighbors + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for BGP + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = list() + if connection: # just for linting purposes, remove + pass + + if not data: + data = get_all_bgp_neighbors(self._module) + + new_data = [] + for conf in data: + if not conf: + continue + new_item = {} + new_item['bgp_as'] = conf['bgp_as'] + new_item['vrf_name'] = conf['vrf_name'] + neighbors = conf.get('neighbors', None) + if not neighbors: + new_data.append(new_item) + continue + neighbors = neighbors.get('neighbor', None) + if not neighbors: + new_data.append(new_item) + continue + + new_neighbors = self.normalize_neighbors_af_data(neighbors) + if new_neighbors: + new_item['neighbors'] = new_neighbors + if new_item: + new_data.append(new_item) + + # operate on a collection of resource x + for conf in new_data: + if conf: + obj = self.render_config(self.generated_spec, conf) + # split the config into instances of the resource + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('bgp_neighbors_af', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)}) + facts['bgp_neighbors_af'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py new file mode 100644 index 00000000..75622632 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py @@ -0,0 +1,101 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The facts class for sonic +this file validates each subset of facts and selectively +calls the appropriate facts gathering function +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.facts.facts import FactsArgs +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts import ( + FactsBase, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vlans.vlans import VlansFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.interfaces.interfaces import InterfacesFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l2_interfaces.l2_interfaces import L2_interfacesFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l3_interfaces.l3_interfaces import L3_interfacesFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.lag_interfaces.lag_interfaces import Lag_interfacesFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp.bgp import BgpFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_af.bgp_af import Bgp_afFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_neighbors.bgp_neighbors import Bgp_neighborsFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_neighbors_af.bgp_neighbors_af import Bgp_neighbors_afFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_as_paths.bgp_as_paths import Bgp_as_pathsFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_communities.bgp_communities import Bgp_communitiesFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_ext_communities.bgp_ext_communities import ( + Bgp_ext_communitiesFacts, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.mclag.mclag import MclagFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.prefix_lists.prefix_lists import Prefix_listsFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vrfs.vrfs import VrfsFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vxlans.vxlans import VxlansFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.users.users import UsersFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.port_breakout.port_breakout import Port_breakoutFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.aaa.aaa import AaaFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.tacacs_server.tacacs_server import Tacacs_serverFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.system.system import SystemFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.radius_server.radius_server import Radius_serverFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.static_routes.static_routes import Static_routesFacts +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.ntp.ntp import NtpFacts + +FACT_LEGACY_SUBSETS = {} +FACT_RESOURCE_SUBSETS = dict( + vlans=VlansFacts, + interfaces=InterfacesFacts, + l2_interfaces=L2_interfacesFacts, + l3_interfaces=L3_interfacesFacts, + lag_interfaces=Lag_interfacesFacts, + bgp=BgpFacts, + bgp_af=Bgp_afFacts, + bgp_neighbors=Bgp_neighborsFacts, + bgp_neighbors_af=Bgp_neighbors_afFacts, + bgp_as_paths=Bgp_as_pathsFacts, + bgp_communities=Bgp_communitiesFacts, + bgp_ext_communities=Bgp_ext_communitiesFacts, + mclag=MclagFacts, + prefix_lists=Prefix_listsFacts, + vrfs=VrfsFacts, + vxlans=VxlansFacts, + users=UsersFacts, + system=SystemFacts, + port_breakout=Port_breakoutFacts, + aaa=AaaFacts, + tacacs_server=Tacacs_serverFacts, + radius_server=Radius_serverFacts, + static_routes=Static_routesFacts, + ntp=NtpFacts, +) + + +class Facts(FactsBase): + """ The fact class for sonic + """ + + VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys()) + VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys()) + + def __init__(self, module): + super(Facts, self).__init__(module) + + def get_facts(self, legacy_facts_type=None, resource_facts_type=None, data=None): + """ Collect the facts for sonic + + :param legacy_facts_type: List of legacy facts types + :param resource_facts_type: List of resource fact types + :param data: previously collected conf + :rtype: dict + :return: the facts gathered + """ + netres_choices = FactsArgs.argument_spec['gather_network_resources'].get('choices', []) + if self.VALID_RESOURCE_SUBSETS: + self.get_network_resources_facts(FACT_RESOURCE_SUBSETS, resource_facts_type, data) + + if self.VALID_LEGACY_GATHER_SUBSETS: + self.get_network_legacy_facts(FACT_LEGACY_SUBSETS, legacy_facts_type) + + return self.ansible_facts, self._warnings diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py new file mode 100644 index 00000000..a36b5d3c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py @@ -0,0 +1,147 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.interfaces.interfaces import InterfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class InterfacesFacts(object): + """ The sonic interfaces fact class + """ + loop_backs = "," + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = InterfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_all_interfaces(self): + """Get all the interfaces available in chassis""" + all_interfaces = {} + request = [{"path": "data/openconfig-interfaces:interfaces", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + if "openconfig-interfaces:interfaces" in response[0][1]: + all_interfaces = response[0][1].get("openconfig-interfaces:interfaces", {}) + return all_interfaces['interface'] + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = [] + if connection: # just for linting purposes, remove + pass + + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_interfaces() + # operate on a collection of resource x + self.reset_loop_backs() + + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + obj = self.transform_config(obj) + # split the config into instances of the resource + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('interfaces', None) + facts = {} + if objs: + facts['interfaces'] = [] + params = utils.validate_config(self.argument_spec, {'config': objs}) + if params: + facts['interfaces'].extend(params['config']) + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def transform_config(self, conf): + + exist_cfg = conf['config'] + trans_cfg = None + + is_loop_back = False + name = conf['name'] + if name.startswith('Loopback'): + is_loop_back = True + pos = name.find('|') + if pos > 0: + name = name[0:pos] + + if not (is_loop_back and self.is_loop_back_already_esist(name)) and (name != "eth0"): + trans_cfg = dict() + trans_cfg['name'] = name + if is_loop_back: + self.update_loop_backs(name) + else: + trans_cfg['enabled'] = exist_cfg['enabled'] if exist_cfg.get('enabled') is not None else True + trans_cfg['description'] = exist_cfg['description'] if exist_cfg.get('description') else "" + trans_cfg['mtu'] = exist_cfg['mtu'] if exist_cfg.get('mtu') else 9100 + + return trans_cfg + + def reset_loop_backs(self): + self.loop_backs = "," + + def update_loop_backs(self, loop_back): + self.loop_backs += "{0},".format(loop_back) + + def is_loop_back_already_esist(self, loop_back): + return (",{0},".format(loop_back) in self.loop_backs) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py new file mode 100644 index 00000000..07d7f97d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py @@ -0,0 +1,160 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic l2_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l2_interfaces.l2_interfaces import L2_interfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class L2_interfacesFacts(object): + """ The sonic l2_interfaces fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = L2_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def vlan_range_to_list(self, in_range): + range_bounds = in_range.split('-') + range_bottom = int(range_bounds[0]) + range_top = int(range_bounds[1]) + 1 + vlan_list = list(range(range_bottom, range_top)) + vlan_dict_list = [] + for vlan in vlan_list: + vlan_dict_list.append({'vlan': vlan}) + return vlan_dict_list + + def get_l2_interfaces_from_interfaces(self, interfaces): + l2_interfaces = [] + + for intf in interfaces: + name = intf['name'] + key = 'openconfig-if-ethernet:ethernet' + if name.startswith('PortChannel'): + key = 'openconfig-if-aggregate:aggregation' + eth_det = intf.get(key) + if eth_det: + open_cfg_vlan = eth_det.get('openconfig-vlan:switched-vlan') + if open_cfg_vlan: + new_det = dict() + new_det['name'] = name + if name == "eth0": + continue + if (open_cfg_vlan['config'].get('access-vlan')): + new_det['access'] = dict({'vlan': open_cfg_vlan['config'].get('access-vlan')}) + if (open_cfg_vlan['config'].get('trunk-vlans')): + new_det['trunk'] = {} + new_det['trunk']['allowed_vlans'] = [] + + # Save trunk vlans as a list of single vlan dicts: Convert + # any ranges to lists of individual vlan dicts and merge + # each resulting "range list" onto the main list for the + # interface. + for vlan in open_cfg_vlan['config'].get('trunk-vlans'): + if isinstance(vlan, str) and '-' in vlan: + new_det['trunk']['allowed_vlans'].extend(self.vlan_range_to_list(vlan)) + else: + new_det['trunk']['allowed_vlans'].append({'vlan': vlan}) + l2_interfaces.append(new_det) + + return l2_interfaces + + def get_all_l2_interfaces(self): + """Get all the l2_interfaces available in chassis""" + l2_interfaces = {} + request = [{"path": "data/openconfig-interfaces:interfaces", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + if "openconfig-interfaces:interfaces" in response[0][1]: + interfaces = response[0][1].get("openconfig-interfaces:interfaces", {}) + if interfaces.get("interface"): + interfaces = interfaces['interface'] + l2_interfaces = self.get_l2_interfaces_from_interfaces(interfaces) + else: + l2_interfaces = {} + + return l2_interfaces + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for l2_interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_l2_interfaces() + + objs = list() + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + # split the config into instances of the resource + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('l2_interfaces', None) + facts = {} + if objs: + facts['l2_interfaces'] = [] + params = utils.validate_config(self.argument_spec, {'config': objs}) + for cfg in params['config']: + facts['l2_interfaces'].append(utils.remove_empties(cfg)) + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py new file mode 100644 index 00000000..69a6dcd4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py @@ -0,0 +1,185 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic l3_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l3_interfaces.l3_interfaces import L3_interfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + + +class L3_interfacesFacts(object): + """ The sonic l3_interfaces fact class + """ + + loop_backs = "," + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = L3_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_l3_interfaces(self): + url = "data/openconfig-interfaces:interfaces/interface" + method = "GET" + request = [{"path": url, "method": method}] + + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + l3_lists = [] + if "openconfig-interfaces:interface" in response[0][1]: + l3_lists = response[0][1].get("openconfig-interfaces:interface", []) + + l3_configs = [] + for l3 in l3_lists: + l3_dict = dict() + l3_name = l3["name"] + if l3_name == "eth0": + continue + + l3_dict['name'] = l3_name + + ip = None + anycast_addr = list() + if l3.get('openconfig-vlan:routed-vlan'): + ip = l3['openconfig-vlan:routed-vlan'] + if ip.get('openconfig-if-ip:ipv4', None) and ip['openconfig-if-ip:ipv4'].get('openconfig-interfaces-ext:sag-ipv4', None): + if ip['openconfig-if-ip:ipv4']['openconfig-interfaces-ext:sag-ipv4'].get('config', None): + if ip['openconfig-if-ip:ipv4']['openconfig-interfaces-ext:sag-ipv4']['config'].get('static-anycast-gateway', None): + anycast_addr = ip['openconfig-if-ip:ipv4']['openconfig-interfaces-ext:sag-ipv4']['config']['static-anycast-gateway'] + else: + ip = l3.get('subinterfaces', {}).get('subinterface', [{}])[0] + + l3_dict['ipv4'] = dict() + l3_ipv4 = list() + if anycast_addr: + l3_dict['ipv4']['anycast_addresses'] = anycast_addr + elif 'openconfig-if-ip:ipv4' in ip and 'addresses' in ip['openconfig-if-ip:ipv4'] and 'address' in ip['openconfig-if-ip:ipv4']['addresses']: + for ipv4 in ip['openconfig-if-ip:ipv4']['addresses']['address']: + if ipv4.get('config') and ipv4.get('config').get('ip'): + temp = dict() + temp['address'] = str(ipv4['config']['ip']) + '/' + str(ipv4['config']['prefix-length']) + temp['secondary'] = ipv4['config']['secondary'] + l3_ipv4.append(temp) + if l3_ipv4: + l3_dict['ipv4']['addresses'] = l3_ipv4 + + l3_dict['ipv6'] = dict() + l3_ipv6 = list() + if 'openconfig-if-ip:ipv6' in ip: + if 'addresses' in ip['openconfig-if-ip:ipv6'] and 'address' in ip['openconfig-if-ip:ipv6']['addresses']: + for ipv6 in ip['openconfig-if-ip:ipv6']['addresses']['address']: + if ipv6.get('config') and ipv6.get('config').get('ip'): + temp = dict() + temp['address'] = str(ipv6['config']['ip']) + '/' + str(ipv6['config']['prefix-length']) + l3_ipv6.append(temp) + if l3_ipv6: + l3_dict['ipv6']['addresses'] = l3_ipv6 + if 'config' in ip['openconfig-if-ip:ipv6'] and 'enabled' in ip['openconfig-if-ip:ipv6']['config']: + l3_dict['ipv6']['enabled'] = ip['openconfig-if-ip:ipv6']['config']['enabled'] + + l3_configs.append(l3_dict) + return l3_configs + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for l3_interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + if not data: + resources = self.get_l3_interfaces() + objs = [] + for resource in resources: + if resource: + obj = self.render_config(self.generated_spec, resource) + obj = self.transform_config(obj) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('l3_interfaces', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['l3_interfaces'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def transform_config(self, conf): + exist_cfg = conf + trans_cfg = None + + is_loop_back = False + name = exist_cfg['name'] + if name.startswith('Loopback'): + is_loop_back = True + pos = name.find('|') + if pos > 0: + name = name[0:pos] + + if not (is_loop_back and self.is_loop_back_already_esist(name)) and (name != "eth0"): + trans_cfg = dict() + trans_cfg['name'] = name + if is_loop_back: + self.update_loop_backs(name) + trans_cfg['ipv4'] = exist_cfg.get('ipv4', {}) + trans_cfg['ipv6'] = exist_cfg.get('ipv6', {}) + + return trans_cfg + + def reset_loop_backs(self): + self.loop_backs = "," + + def update_loop_backs(self, loop_back): + self.loop_backs += "{Loopback},".format(Loopback=loop_back) + + def is_loop_back_already_esist(self, loop_back): + return (",{0},".format(loop_back) in self.loop_backs) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py new file mode 100644 index 00000000..72819681 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py @@ -0,0 +1,135 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic lag_interfaces fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.lag_interfaces.lag_interfaces import Lag_interfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class Lag_interfacesFacts(object): + """ The sonic lag_interfaces fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Lag_interfacesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_all_portchannels(self): + """Get all the interfaces available in chassis""" + request = [{"path": "data/sonic-portchannel:sonic-portchannel", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + if response[0][1]: + data = response[0][1]['sonic-portchannel:sonic-portchannel'] + else: + data = [] + if data is not None: + if "PORTCHANNEL_MEMBER" in data: + portchannel_members_list = data["PORTCHANNEL_MEMBER"]["PORTCHANNEL_MEMBER_LIST"] + else: + portchannel_members_list = [] + if "PORTCHANNEL" in data: + portchannel_list = data["PORTCHANNEL"]["PORTCHANNEL_LIST"] + else: + portchannel_list = [] + if portchannel_list: + for i in portchannel_list: + if not any(d["name"] == i["name"] for d in portchannel_members_list): + portchannel_members_list.append({'ifname': None, 'name': i['name']}) + if data: + return portchannel_members_list + else: + return [] + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for lag_interfaces + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = [] + if not data: + data = self.get_all_portchannels() + # operate on a collection of resource x + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + obj = self.transform_config(obj) + if obj: + self.merge_portchannels(objs, obj) + facts = {} + if objs: + facts['lag_interfaces'] = [] + params = utils.validate_config(self.argument_spec, {'config': objs}) + for cfg in params['config']: + facts['lag_interfaces'].append(cfg) + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + return conf + + def transform_config(self, conf): + trans_cfg = dict() + trans_cfg['name'] = conf['name'] + trans_cfg['members'] = dict() + if conf['ifname']: + interfaces = list() + interface = {'member': conf['ifname']} + interfaces.append(interface) + trans_cfg['members'] = {'interfaces': interfaces} + return trans_cfg + + def merge_portchannels(self, configs, conf): + if len(configs) == 0: + configs.append(conf) + else: + new_interface = None + if conf.get('members') and conf['members'].get('interfaces'): + new_interface = conf['members']['interfaces'][0] + else: + configs.append(conf) + if new_interface: + matched = next((cfg for cfg in configs if cfg['name'] == conf['name']), None) + if matched and matched.get('members'): + ext_interfaces = matched.get('members').get('interfaces', []) + ext_interfaces.append(new_interface) + else: + configs.append(conf) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py new file mode 100644 index 00000000..69864cdf --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py @@ -0,0 +1,139 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic mclag fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.mclag.mclag import MclagArgs +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class MclagFacts(object): + """ The sonic mclag fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = MclagArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_all_mclag(self): + """Get all the mclag available in chassis""" + request = [{"path": "data/openconfig-mclag:mclag", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + if ('openconfig-mclag:mclag' in response[0][1]): + data = response[0][1]['openconfig-mclag:mclag'] + else: + data = {} + return data + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for mclag + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = None + if not data: + data = self.get_all_mclag() + if data: + objs = self.render_config(self.generated_spec, data) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['mclag'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = self.parse_sonic_mclag(spec, conf) + return config + + def parse_sonic_mclag(self, spec, conf): + config = {} + portchannels_list = [] + if conf: + domain_data = None + if conf.get('mclag-domains', None) and conf['mclag-domains'].get('mclag-domain', None): + domain_data = conf['mclag-domains']['mclag-domain'][0] + if domain_data: + domain_id = domain_data['domain-id'] + config['domain_id'] = domain_id + domain_config = domain_data.get('config', None) + if domain_config: + if domain_config.get('session-timeout', None): + config['session_timeout'] = domain_config['session-timeout'] + if domain_config.get('keepalive-interval', None): + config['keepalive'] = domain_config['keepalive-interval'] + if domain_config.get('source-address', None): + config['source_address'] = domain_config['source-address'] + if domain_config.get('peer-address', None): + config['peer_address'] = domain_config['peer-address'] + if domain_config.get('peer-link', None): + config['peer_link'] = domain_config['peer-link'] + if domain_config.get('mclag-system-mac', None): + config['system_mac'] = domain_config['mclag-system-mac'] + + if conf.get('vlan-interfaces', None) and conf['vlan-interfaces'].get('vlan-interface', None): + vlans_list = [] + vlan_data = conf['vlan-interfaces']['vlan-interface'] + for vlan in vlan_data: + vlans_list.append({'vlan': vlan['name']}) + if vlans_list: + config['unique_ip'] = {'vlans': vlans_list} + + if conf.get('interfaces', None) and conf['interfaces'].get('interface', None): + portchannels_list = [] + po_data = conf['interfaces']['interface'] + for po in po_data: + if po.get('config', None) and po['config'].get('mclag-domain-id', None) and domain_id == domain_data['domain-id']: + portchannels_list.append({'lag': po['name']}) + if portchannels_list: + config['members'] = {'portchannels': portchannels_list} + + return config diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py new file mode 100644 index 00000000..a47142b4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py @@ -0,0 +1,153 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic ntp fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.ntp.ntp import NtpArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class NtpFacts(object): + """ The sonic ntp fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = NtpArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for ntp + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_ntp_configuration() + + obj = self.render_config(self.generated_spec, data) + + ansible_facts['ansible_network_resources'].pop('ntp', None) + facts = {} + if obj: + params = utils.validate_config(self.argument_spec, {'config': obj}) + facts['ntp'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def get_ntp_configuration(self): + """Get all NTP configuration""" + + all_ntp_request = [{"path": "data/openconfig-system:system/ntp", "method": GET}] + all_ntp_response = [] + try: + all_ntp_response = edit_config(self._module, to_request(self._module, all_ntp_request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + all_ntp_config = dict() + if 'openconfig-system:ntp' in all_ntp_response[0][1]: + all_ntp_config = all_ntp_response[0][1].get('openconfig-system:ntp', {}) + + ntp_global_config = dict() + if 'config' in all_ntp_config: + ntp_global_config = all_ntp_config.get('config', {}) + + ntp_servers = [] + if 'servers' in all_ntp_config: + ntp_servers = all_ntp_config['servers'].get('server', []) + + ntp_keys = [] + if 'ntp-keys' in all_ntp_config: + ntp_keys = all_ntp_config['ntp-keys'].get('ntp-key', []) + + ntp_config = dict() + + if 'network-instance' in ntp_global_config: + ntp_config['vrf'] = ntp_global_config['network-instance'] + + if 'enable-ntp-auth' in ntp_global_config: + ntp_config['enable_ntp_auth'] = ntp_global_config['enable-ntp-auth'] + + if 'source-interface' in ntp_global_config: + ntp_config['source_interfaces'] = ntp_global_config['source-interface'] + + if 'trusted-key' in ntp_global_config: + ntp_config['trusted_keys'] = ntp_global_config['trusted-key'] + + servers = [] + for ntp_server in ntp_servers: + if 'config' in ntp_server: + server = {} + server['address'] = ntp_server['config'].get('address', None) + if 'key-id' in ntp_server['config']: + server['key_id'] = ntp_server['config']['key-id'] + server['minpoll'] = ntp_server['config'].get('minpoll', None) + server['maxpoll'] = ntp_server['config'].get('maxpoll', None) + servers.append(server) + ntp_config['servers'] = servers + + keys = [] + for ntp_key in ntp_keys: + if 'config' in ntp_key: + key = {} + key['encrypted'] = ntp_key['config'].get('encrypted', None) + key['key_id'] = ntp_key['config'].get('key-id', None) + key_type_str = ntp_key['config'].get('key-type', None) + key_type = key_type_str.split(":", 1)[-1] + key['key_type'] = key_type + key['key_value'] = ntp_key['config'].get('key-value', None) + keys.append(key) + ntp_config['ntp_keys'] = keys + + return ntp_config diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py new file mode 100644 index 00000000..938bd642 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py @@ -0,0 +1,125 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic port breakout fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +import json +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.port_breakout.port_breakout import Port_breakoutArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_breakout_mode, +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" +POST = "post" + + +class Port_breakoutFacts(object): + """ The sonic port breakout fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Port_breakoutArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for port_breakout + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_port_breakout() + + objs = list() + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('port_breakout', None) + facts = {} + if objs: + facts['port_breakout'] = [] + params = utils.validate_config(self.argument_spec, {'config': objs}) + if params: + facts['port_breakout'].extend(params['config']) + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def get_all_port_breakout(self): + """Get all the port_breakout configured in the device""" + request = [{"path": "operations/sonic-port-breakout:breakout_capabilities", "method": POST}] + port_breakout_list = [] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + raw_port_breakout_list = [] + if "sonic-port-breakout:output" in response[0][1]: + raw_port_breakout_list = response[0][1].get("sonic-port-breakout:output", {}).get('caps', []) + + for port_breakout in raw_port_breakout_list: + name = port_breakout.get('port', None) + mode = port_breakout.get('defmode', None) + if name and mode: + if '[' in mode: + mode = mode[:mode.index('[')] + norm_port_breakout = {'name': name, 'mode': mode} + mode = get_breakout_mode(self._module, name) + if mode: + norm_port_breakout['mode'] = mode + port_breakout_list.append(norm_port_breakout) + + return port_breakout_list diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/prefix_lists/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/prefix_lists/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/prefix_lists/prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/prefix_lists/prefix_lists.py new file mode 100644 index 00000000..e246b572 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/prefix_lists/prefix_lists.py @@ -0,0 +1,158 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +The sonic prefix_lists fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) + +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils \ + import ( + remove_empties_from_list + ) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.prefix_lists.prefix_lists import Prefix_listsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) + + +def prefix_set_cfg_parse(unparsed_prefix_set): + '''Parse the raw input configuration JSON representation for the prefix set specified + by the input "unparsed_prefix_set" input parameter. Parse the information to + convert it to a dictionary matching the "argspec" for the "prefix_lists" resource + module.''' + + parsed_prefix_set = dict() + if not unparsed_prefix_set.get("config"): + return parsed_prefix_set + parsed_prefix_set['name'] = unparsed_prefix_set['name'] + pfx_cfg = unparsed_prefix_set['config'] + if pfx_cfg.get('mode') and isinstance((pfx_cfg['mode']), str): + parsed_prefix_set['afi'] = pfx_cfg['mode'].lower() + if unparsed_prefix_set.get('openconfig-routing-policy-ext:extended-prefixes'): + prefix_lists_container = \ + unparsed_prefix_set['openconfig-routing-policy-ext:extended-prefixes'] + if not prefix_lists_container.get("extended-prefix"): + return parsed_prefix_set + prefix_lists_unparsed = prefix_lists_container['extended-prefix'] + + prefix_lists_parsed = [] + for prefix_entry_unparsed in prefix_lists_unparsed: + if not prefix_entry_unparsed.get('config'): + continue + if not prefix_entry_unparsed['config'].get('action'): + continue + prefix_entry_cfg = prefix_entry_unparsed['config'] + prefix_parsed = dict() + prefix_parsed['action'] = prefix_entry_cfg['action'].lower() + if not prefix_entry_unparsed.get('ip-prefix'): + continue + if not prefix_entry_unparsed.get('sequence-number'): + continue + + prefix_parsed['prefix'] = prefix_entry_unparsed['ip-prefix'] + prefix_parsed['sequence'] = prefix_entry_unparsed['sequence-number'] + if (prefix_entry_unparsed.get('masklength-range') and + (not prefix_entry_unparsed['masklength-range'] == 'exact')): + mask = int(prefix_parsed['prefix'].split('/')[1]) + ge_le = prefix_entry_unparsed['masklength-range'].split('..') + ge_bound = int(ge_le[0]) + if ge_bound != mask: + prefix_parsed['ge'] = ge_bound + pfx_len = 32 if parsed_prefix_set['afi'] == 'ipv4' else 128 + le_bound = int(ge_le[1]) + if le_bound != pfx_len: + prefix_parsed['le'] = le_bound + prefix_lists_parsed.append(prefix_parsed) + parsed_prefix_set['prefixes'] = prefix_lists_parsed + return parsed_prefix_set + + +class Prefix_listsFacts: + """ The sonic prefix_lists fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Prefix_listsArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_all_prefix_sets(self): + '''Execute a REST "GET" API to fetch all of the current prefix list configuration + from the target device.''' + + pfx_fetch_spec = "openconfig-routing-policy:routing-policy/defined-sets/prefix-sets" + pfx_resp_key = "openconfig-routing-policy:prefix-sets" + pfx_set_key = "prefix-set" + # pfx_short_spec = "openconfig-routing-policy:prefix-set" + url = "data/%s" % pfx_fetch_spec + method = "GET" + request = [{"path": url, "method": method}] + + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc)) + + prefix_lists_unparsed = [] + resp_prefix_set = response[0][1].get(pfx_resp_key, None) + if resp_prefix_set: + prefix_lists_unparsed = resp_prefix_set.get(pfx_set_key, None) + return prefix_lists_unparsed + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for prefix_lists + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # (comment by Ansible): just for linting purposes, remove + pass + + if not data: + # Fetch data from the current device configuration + # (Skip if operating on previously fetched configuration.) + data = self.get_all_prefix_sets() + + # split the unparsed prefix configuration list into a list + # of parsed prefix set "instances" (dictonary "objects"). + prefix_sets = list() + for prefix_set_cfg in data: + prefix_set = prefix_set_cfg_parse(prefix_set_cfg) + if prefix_set: + prefix_sets.append(prefix_set) + + ansible_facts['ansible_network_resources'].pop('prefix_lists', None) + facts = {} + if prefix_sets: + params = utils.validate_config(self.argument_spec, + {'config': remove_empties_from_list(prefix_sets)}) + facts['prefix_lists'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py new file mode 100644 index 00000000..72593b22 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py @@ -0,0 +1,168 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic tacas server fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +import json +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.radius_server.radius_server import Radius_serverArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class Radius_serverFacts(object): + """ The sonic tacas server fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Radius_serverArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for radius_server + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + obj = None + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_radius_server() + + obj = self.render_config(self.generated_spec, data) + + ansible_facts['ansible_network_resources'].pop('radius_server', None) + facts = {} + if obj: + facts['radius_server'] = {} + params = utils.validate_config(self.argument_spec, {'config': obj}) + if params: + facts['radius_server'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def get_all_radius_server(self): + """Get all the radius_server configured in the device""" + request = [{"path": "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config", "method": GET}] + radius_server_data = {} + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + if "openconfig-system:config" in response[0][1]: + raw_radius_global_data = response[0][1].get("openconfig-system:config", {}) + + if 'auth-type' in raw_radius_global_data: + radius_server_data['auth_type'] = raw_radius_global_data['auth-type'] + if 'secret-key' in raw_radius_global_data: + radius_server_data['key'] = raw_radius_global_data['secret-key'] + if 'timeout' in raw_radius_global_data: + radius_server_data['timeout'] = raw_radius_global_data['timeout'] + + request = [{"path": "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + if "openconfig-aaa-radius-ext:config" in response[0][1]: + raw_radius_ext_global_data = response[0][1].get("openconfig-aaa-radius-ext:config", {}) + + if 'nas-ip-address' in raw_radius_ext_global_data: + radius_server_data['nas_ip'] = raw_radius_ext_global_data['nas-ip-address'] + if 'retransmit-attempts' in raw_radius_ext_global_data: + radius_server_data['retransmit'] = raw_radius_ext_global_data['retransmit-attempts'] + if 'statistics' in raw_radius_ext_global_data: + radius_server_data['statistics'] = raw_radius_ext_global_data['statistics'] + + request = [{"path": "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers", "method": GET}] + hosts = [] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + raw_radius_server_list = [] + if "openconfig-system:servers" in response[0][1]: + raw_radius_server_list = response[0][1].get("openconfig-system:servers", {}).get('server', []) + + for radius_host in raw_radius_server_list: + host_data = {} + if 'address' in radius_host: + host_data['name'] = radius_host['address'] + cfg = radius_host.get('config', None) + if cfg: + if 'auth-type' in cfg: + host_data['auth_type'] = cfg['auth-type'] + if 'priority' in cfg: + host_data['priority'] = cfg['priority'] + if 'vrf' in cfg: + host_data['vrf'] = cfg['vrf'] + if 'timeout' in cfg: + host_data['timeout'] = cfg['timeout'] + if radius_host.get('radius', None) and radius_host['radius'].get('config', None): + tacas_cfg = radius_host['radius']['config'] + if tacas_cfg.get('auth-port', None): + host_data['port'] = tacas_cfg['auth-port'] + if tacas_cfg.get('secret-key', None): + host_data['key'] = tacas_cfg['secret-key'] + if tacas_cfg.get('openconfig-aaa-radius-ext:source-interface', None): + host_data['source_interface'] = tacas_cfg['openconfig-aaa-radius-ext:source-interface'] + if tacas_cfg.get('retransmit-attempts', None): + host_data['retransmit'] = tacas_cfg['retransmit-attempts'] + if host_data: + hosts.append(host_data) + + if hosts: + radius_server_data['servers'] = {'host': hosts} + + return radius_server_data diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py new file mode 100644 index 00000000..f8356644 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py @@ -0,0 +1,173 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic static_routes fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + remove_empties_from_list +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.static_routes.static_routes import Static_routesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import ( + get_all_vrfs, +) + +network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance' +protocol_static_routes_path = 'protocols/protocol=STATIC,static/static-routes' + + +class Static_routesFacts(object): + """ The sonic static_routes fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Static_routesArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for static_routes + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + objs = [] + if connection: # just for linting purposes, remove + pass + + if not data: + static_routes_config = self.get_static_routes(self._module) + data = self.update_static_routes(static_routes_config) + # operate on a collection of resource x + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + # split the config into instances of the resource + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('static_routes', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)}) + facts['static_routes'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + + return conf + + def get_static_routes(self, module): + all_static_routes = [] + vrfs = get_all_vrfs(module) + for vrf_name in vrfs: + get_path = '%s=%s/%s' % (network_instance_path, vrf_name, protocol_static_routes_path) + request = {'path': get_path, 'method': 'get'} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + for resp in response: + if 'openconfig-network-instance:static-routes' in resp[1]: + static_routes_dict = resp[1].get('openconfig-network-instance:static-routes', {}) + static_routes_dict['vrf'] = vrf_name + all_static_routes.append(static_routes_dict) + return all_static_routes + + def update_static_routes(self, data): + static_vrf_list = [] + for static_route in data: + static_vrf_dict = {} + static_route_list = static_route.get('static', []) + vrf_name = static_route.get('vrf', None) + static_list = [] + for static in static_route_list: + static_dict = {} + prefix = static.get('prefix', None) + next_hops = static.get('next-hops', None) + next_hop_list = next_hops.get('next-hop', []) + next_hop_dict_list = [] + for next_hop in next_hop_list: + next_hop_dict = {} + index_dict = {} + inf_ref = next_hop.get('interface-ref', {}) + inf_ref_cfg = inf_ref.get('config', {}) + interface = inf_ref_cfg.get('interface', None) + config = next_hop.get('config', {}) + next_hop_attr = config.get('next-hop', None) + metric = config.get('metric', None) + nexthop_vrf = config.get('network-instance', None) + blackhole = config.get('blackhole', None) + track = config.get('track', None) + tag = config.get('tag', None) + if blackhole: + index_dict['blackhole'] = blackhole + if interface: + index_dict['interface'] = interface + if nexthop_vrf: + index_dict['nexthop_vrf'] = nexthop_vrf + if next_hop_attr: + index_dict['next_hop'] = next_hop_attr + if index_dict: + next_hop_dict['index'] = index_dict + if metric: + next_hop_dict['metric'] = metric + if track: + next_hop_dict['track'] = track + if tag: + next_hop_dict['tag'] = tag + if next_hop_dict: + next_hop_dict_list.append(next_hop_dict) + if prefix: + static_dict['prefix'] = prefix + if next_hop_dict_list: + static_dict['next_hops'] = next_hop_dict_list + if static_dict: + static_list.append(static_dict) + if static_list: + static_vrf_dict['static_list'] = static_list + if vrf_name: + static_vrf_dict['vrf_name'] = vrf_name + if static_vrf_dict: + static_vrf_list.append(static_vrf_dict) + + return static_vrf_list diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py new file mode 100644 index 00000000..1d7a82d8 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py @@ -0,0 +1,143 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic system fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.system.system import SystemArgs + +GET = "get" + + +class SystemFacts(object): + """ The sonic system fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = SystemArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def get_system(self): + """Get system hostname available in chassis""" + request = [{"path": "data/openconfig-system:system/config", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + if ('openconfig-system:config' in response[0][1]): + data = response[0][1]['openconfig-system:config'] + else: + data = {} + return data + + def get_naming(self): + """Get interface_naming type available in chassis""" + request = [{"path": "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + if ('sonic-device-metadata:DEVICE_METADATA_LIST' in response[0][1]): + intf_data = response[0][1]['sonic-device-metadata:DEVICE_METADATA_LIST'] + if 'intf_naming_mode' in intf_data[0]: + data = intf_data[0] + else: + data = {} + return data + + def get_anycast_addr(self): + """Get system anycast address available in chassis""" + request = [{"path": "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + if ('sonic-sag:SAG_GLOBAL_LIST' in response[0][1]): + data = response[0][1]['sonic-sag:SAG_GLOBAL_LIST'][0] + else: + data = {} + return data + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for system + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + data = self.get_system() + intf_naming = self.get_naming() + if intf_naming: + data.update(intf_naming) + anycast_addr = self.get_anycast_addr() + if anycast_addr: + data.update(anycast_addr) + objs = [] + objs = self.render_config(self.generated_spec, data) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['system'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = self.parse_sonic_system(spec, conf) + return config + + def parse_sonic_system(self, spec, conf): + config = deepcopy(spec) + if conf: + if ('hostname' in conf) and (conf['hostname']): + config['hostname'] = conf['hostname'] + if ('intf_naming_mode' in conf) and (conf['intf_naming_mode']): + config['interface_naming'] = conf['intf_naming_mode'] + if ('IPv4' in conf) and (conf['IPv4'] == "enable"): + config['anycast_address']['ipv4'] = True + if ('IPv4' in conf) and (conf['IPv4'] == "disable"): + config['anycast_address']['ipv4'] = False + if ('IPv6' in conf) and (conf['IPv6'] == "enable"): + config['anycast_address']['ipv6'] = True + if ('IPv6' in conf) and (conf['IPv6'] == "disable"): + config['anycast_address']['ipv6'] = False + if ('gwmac' in conf) and (conf['gwmac']): + config['anycast_address']['mac_address'] = conf['gwmac'] + return utils.remove_empties(config) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py new file mode 100644 index 00000000..a1e79910 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py @@ -0,0 +1,150 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic tacas server fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +import json +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.tacacs_server.tacacs_server import Tacacs_serverArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class Tacacs_serverFacts(object): + """ The sonic tacas server fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = Tacacs_serverArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for tacacs_server + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + obj = None + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_tacacs_server() + + obj = self.render_config(self.generated_spec, data) + + ansible_facts['ansible_network_resources'].pop('tacacs_server', None) + facts = {} + if obj: + facts['tacacs_server'] = {} + params = utils.validate_config(self.argument_spec, {'config': obj}) + if params: + facts['tacacs_server'] = params['config'] + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def get_all_tacacs_server(self): + """Get all the tacacs_server configured in the device""" + request = [{"path": "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config", "method": GET}] + tacacs_server_data = {} + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + if "openconfig-system:config" in response[0][1]: + raw_tacacs_global_data = response[0][1].get("openconfig-system:config", {}) + + if 'auth-type' in raw_tacacs_global_data: + tacacs_server_data['auth_type'] = raw_tacacs_global_data['auth-type'] + if 'secret-key' in raw_tacacs_global_data: + tacacs_server_data['key'] = raw_tacacs_global_data['secret-key'] + if 'source-interface' in raw_tacacs_global_data: + tacacs_server_data['source_interface'] = raw_tacacs_global_data['source-interface'] + if 'timeout' in raw_tacacs_global_data: + tacacs_server_data['timeout'] = raw_tacacs_global_data['timeout'] + + request = [{"path": "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers", "method": GET}] + hosts = [] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + raw_tacacs_server_list = [] + if "openconfig-system:servers" in response[0][1]: + raw_tacacs_server_list = response[0][1].get("openconfig-system:servers", {}).get('server', []) + + for tacacs_host in raw_tacacs_server_list: + host_data = {} + if 'address' in tacacs_host: + host_data['name'] = tacacs_host['address'] + cfg = tacacs_host.get('config', None) + if cfg: + if 'auth-type' in cfg: + host_data['auth_type'] = cfg['auth-type'] + if 'priority' in cfg: + host_data['priority'] = cfg['priority'] + if 'vrf' in cfg: + host_data['vrf'] = cfg['vrf'] + if 'timeout' in cfg: + host_data['timeout'] = cfg['timeout'] + if tacacs_host.get('tacacs', None) and tacacs_host['tacacs'].get('config', None): + tacas_cfg = tacacs_host['tacacs']['config'] + if tacas_cfg.get('port', None): + host_data['port'] = tacas_cfg['port'] + if tacas_cfg.get('secret-key', None): + host_data['key'] = tacas_cfg['secret-key'] + if host_data: + hosts.append(host_data) + + if hosts: + tacacs_server_data['servers'] = {'host': hosts} + + return tacacs_server_data diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py new file mode 100644 index 00000000..038e97f8 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py @@ -0,0 +1,122 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic users fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.users.users import UsersArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class UsersFacts(object): + """ The sonic users fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = UsersArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for users + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_users() + + objs = list() + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('users', None) + facts = {} + if objs: + facts['users'] = [] + params = utils.validate_config(self.argument_spec, {'config': objs}) + if params: + facts['users'].extend(params['config']) + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def get_all_users(self): + """Get all the users configured in the device""" + request = [{"path": "data/sonic-system-aaa:sonic-system-aaa/USER", "method": GET}] + users = [] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + raw_users = [] + if "sonic-system-aaa:USER" in response[0][1]: + raw_users = response[0][1].get("sonic-system-aaa:USER", {}).get('USER_LIST', []) + + for raw_user in raw_users: + name = raw_user.get('username', None) + role = raw_user.get('role', []) + if role and len(role) > 0: + role = role[0] + password = raw_user.get('password', None) + user = {} + if name and role: + user['name'] = name + user['role'] = role + if password: + user['password'] = password + if user: + users.append(user) + return users diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py new file mode 100644 index 00000000..7c4af2ea --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py @@ -0,0 +1,126 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic vlans fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vlans.vlans import VlansArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class VlansFacts(object): + """ The sonic vlans fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = VlansArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for vlans + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + vlans = self.get_vlans() + objs = [] + for vlan_id, vlan_config in vlans.items(): + obj = self.render_config(self.generated_spec, vlan_config) + if obj: + objs.append(obj) + ansible_facts['ansible_network_resources'].pop('vlans', None) + facts = {} + if objs: + params = utils.validate_config(self.argument_spec, {'config': objs}) + facts['vlans'] = params['config'] + + ansible_facts['ansible_network_resources'].update(facts) + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + config = deepcopy(spec) + try: + config['vlan_id'] = int(conf['vlan_id']) + if conf.get('description', None): + config['description'] = conf['description'] + except TypeError: + config['vlan_id'] = None + config['description'] = None + return utils.remove_empties(config) + + def get_vlans(self): + """Get all the l2_interfaces available in chassis""" + request = [{"path": "data/openconfig-interfaces:interfaces", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + interfaces = {} + if "openconfig-interfaces:interfaces" in response[0][1]: + interfaces = response[0][1].get("openconfig-interfaces:interfaces", {}) + if interfaces.get("interface"): + interfaces = interfaces['interface'] + + ret_vlan_configs = {} + + for interface in interfaces: + interface_name = interface.get("config").get("name") + description = interface.get("config").get("description", None) + if "Vlan" in interface_name: + vlan_id = interface_name.split("Vlan")[1] + vlan_configs = {"vlan_id": vlan_id, + "name": interface_name, + } + if description: + vlan_configs['description'] = description + + ret_vlan_configs.update({vlan_id: vlan_configs}) + + return ret_vlan_configs diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py new file mode 100644 index 00000000..797612bc --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py @@ -0,0 +1,120 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic vrfs fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vrfs.vrfs import VrfsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class VrfsFacts(object): + """ The sonic vrfs fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = VrfsArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for vrf + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_vrf_interfaces() + + objs = list() + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('vrfs', None) + facts = {} + if objs: + facts['vrfs'] = [] + params = utils.validate_config(self.argument_spec, {'config': objs}) + if params: + facts['vrfs'].extend(params['config']) + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def get_all_vrf_interfaces(self): + """Get all the interfaces available in chassis""" + all_network_instatnces = {} + request = [{"path": "data/openconfig-network-instance:network-instances", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + if "openconfig-network-instance:network-instances" in response[0][1]: + all_network_instatnces = response[0][1].get("openconfig-network-instance:network-instances", {}) + return self.get_vrf_interfaces_from_network_instances(all_network_instatnces['network-instance']) + + def get_vrf_interfaces_from_network_instances(self, network_instances): + vrf_interfaces = [] + + for each_ins in network_instances: + vrf_interface = dict() + name = each_ins['name'] + if name.startswith('Vrf') or name == 'mgmt': + vrf_interface['name'] = name + if each_ins.get("interfaces"): + interfaces = [{"name": intf.get("id")} for intf in each_ins["interfaces"]["interface"]] + vrf_interface["members"] = {"interfaces": interfaces} + + vrf_interfaces.append(vrf_interface) + return vrf_interfaces diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py new file mode 100644 index 00000000..51aec656 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py @@ -0,0 +1,207 @@ +# +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic vxlans fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vxlans.vxlans import VxlansArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +GET = "get" + + +class VxlansFacts(object): + """ The sonic vxlans fact class + """ + + def __init__(self, module, subspec='config', options='options'): + self._module = module + self.argument_spec = VxlansArgs.argument_spec + spec = deepcopy(self.argument_spec) + if subspec: + if options: + facts_argument_spec = spec[subspec][options] + else: + facts_argument_spec = spec[subspec] + else: + facts_argument_spec = spec + + self.generated_spec = utils.generate_dict(facts_argument_spec) + + def populate_facts(self, connection, ansible_facts, data=None): + """ Populate the facts for vxlans + :param connection: the device connection + :param ansible_facts: Facts dictionary + :param data: previously collected conf + :rtype: dictionary + :returns: facts + """ + if connection: # just for linting purposes, remove + pass + + if not data: + # typically data is populated from the current device configuration + # data = connection.get('show running-config | section ^interface') + # using mock data instead + data = self.get_all_vxlans() + + objs = list() + for conf in data: + if conf: + obj = self.render_config(self.generated_spec, conf) + if obj: + objs.append(obj) + + ansible_facts['ansible_network_resources'].pop('vxlans', None) + facts = {} + if objs: + facts['vxlans'] = [] + params = utils.validate_config(self.argument_spec, {'config': objs}) + if params: + facts['vxlans'].extend(params['config']) + ansible_facts['ansible_network_resources'].update(facts) + + return ansible_facts + + def render_config(self, spec, conf): + """ + Render config as dictionary structure and delete keys + from spec for null values + + :param spec: The facts tree, generated from the argspec + :param conf: The configuration + :rtype: dictionary + :returns: The generated config + """ + return conf + + def get_all_vxlans(self): + vxlans = [] + vxlan_tunnels = [] + vxlan_vlan_map = [] + + vxlans_tunnels_vlan_map = self.get_all_vxlans_tunnels_vlan_map() + vxlans_evpn_nvo_list = self.get_all_vxlans_evpn_nvo_list() + + if vxlans_tunnels_vlan_map.get('VXLAN_TUNNEL'): + if vxlans_tunnels_vlan_map['VXLAN_TUNNEL'].get('VXLAN_TUNNEL_LIST'): + vxlan_tunnels.extend(vxlans_tunnels_vlan_map['VXLAN_TUNNEL']['VXLAN_TUNNEL_LIST']) + + if vxlans_tunnels_vlan_map.get('VXLAN_TUNNEL_MAP'): + if vxlans_tunnels_vlan_map['VXLAN_TUNNEL_MAP'].get('VXLAN_TUNNEL_MAP_LIST'): + vxlan_vlan_map.extend(vxlans_tunnels_vlan_map['VXLAN_TUNNEL_MAP']['VXLAN_TUNNEL_MAP_LIST']) + + self.fill_tunnel_source_ip(vxlans, vxlan_tunnels, vxlans_evpn_nvo_list) + self.fill_vlan_map(vxlans, vxlan_vlan_map) + + vxlan_vrf_list = self.get_all_vxlans_vrf_list() + self.fill_vrf_map(vxlans, vxlan_vrf_list) + + return vxlans + + def get_all_vxlans_vrf_list(self): + """Get all the vxlan tunnels and vlan map available """ + request = [{"path": "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + if "sonic-vrf:VRF_LIST" in response[0][1]: + vxlan_vrf_list = response[0][1].get("sonic-vrf:VRF_LIST", {}) + + return vxlan_vrf_list + + def get_all_vxlans_evpn_nvo_list(self): + """Get all the evpn nvo list available """ + request = [{"path": "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + vxlans_evpn_nvo_list = [] + if "sonic-vxlan:EVPN_NVO_LIST" in response[0][1]: + vxlans_evpn_nvo_list = response[0][1].get("sonic-vxlan:EVPN_NVO_LIST", []) + + return vxlans_evpn_nvo_list + + def get_all_vxlans_tunnels_vlan_map(self): + """Get all the vxlan tunnels and vlan map available """ + request = [{"path": "data/sonic-vxlan:sonic-vxlan", "method": GET}] + try: + response = edit_config(self._module, to_request(self._module, request)) + except ConnectionError as exc: + self._module.fail_json(msg=str(exc), code=exc.code) + + vxlans_tunnels_vlan_map = {} + if "sonic-vxlan:sonic-vxlan" in response[0][1]: + vxlans_tunnels_vlan_map = response[0][1].get("sonic-vxlan:sonic-vxlan", {}) + + return vxlans_tunnels_vlan_map + + def fill_tunnel_source_ip(self, vxlans, vxlan_tunnels, vxlans_evpn_nvo_list): + for each_tunnel in vxlan_tunnels: + vxlan = dict() + vxlan['name'] = each_tunnel['name'] + vxlan['source_ip'] = each_tunnel.get('src_ip', None) + vxlan['primary_ip'] = each_tunnel.get('primary_ip', None) + vxlan['evpn_nvo'] = None + if vxlan['source_ip']: + evpn_nvo = next((nvo_map['name'] for nvo_map in vxlans_evpn_nvo_list if nvo_map['source_vtep'] == vxlan['name']), None) + if evpn_nvo: + vxlan['evpn_nvo'] = evpn_nvo + vxlans.append(vxlan) + + def fill_vlan_map(self, vxlans, vxlan_vlan_map): + for each_vlan_map in vxlan_vlan_map: + name = each_vlan_map['name'] + matched_vtep = next((each_vxlan for each_vxlan in vxlans if each_vxlan['name'] == name), None) + if matched_vtep: + vni = int(each_vlan_map['vni']) + vlan = int(each_vlan_map['vlan'][4:]) + vlan_map = matched_vtep.get('vlan_map') + if vlan_map: + vlan_map.append(dict({'vni': vni, 'vlan': vlan})) + else: + matched_vtep['vlan_map'] = [dict({'vni': vni, 'vlan': vlan})] + + def fill_vrf_map(self, vxlans, vxlan_vrf_list): + for each_vrf in vxlan_vrf_list: + vni = each_vrf.get('vni', None) + if vni is None: + continue + + matched_vtep = None + for each_vxlan in vxlans: + for each_vlan in each_vxlan.get('vlan_map', []): + if vni == each_vlan['vni']: + matched_vtep = each_vxlan + + if matched_vtep: + vni = int(each_vrf['vni']) + vrf = each_vrf['vrf_name'] + vrf_map = matched_vtep.get('vrf_map') + if vrf_map: + vrf_map.append(dict({'vni': vni, 'vrf': vrf})) + else: + matched_vtep['vrf_map'] = [dict({'vni': vni, 'vrf': vrf})] diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py new file mode 100644 index 00000000..77a63d42 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py @@ -0,0 +1,155 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# (c) 2016 Red Hat Inc. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + to_list, + ComplexList +) +from ansible.module_utils.connection import Connection, ConnectionError +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine + +_DEVICE_CONFIGS = {} +STANDARD_ETH_REGEXP = r"Eth\d+(/\d+)+" +PATTERN = re.compile(STANDARD_ETH_REGEXP) + + +def get_connection(module): + if hasattr(module, "_sonic_connection"): + return module._sonic_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get("network_api") + if network_api in ["cliconf", "sonic_rest"]: + module._sonic_connection = Connection(module._socket_path) + else: + module.fail_json(msg="Invalid connection type %s" % network_api) + + return module._sonic_connection + + +def get_capabilities(module): + if hasattr(module, "_sonic_capabilities"): + return module._sonic_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + module._sonic_capabilities = json.loads(capabilities) + return module._sonic_capabilities + + +def get_config(module, flags=None): + flags = to_list(flags) + flag_str = " ".join(flags) + + try: + return _DEVICE_CONFIGS[flag_str] + except KeyError: + connection = get_connection(module) + try: + out = connection.get_config(flags=flags) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + cfg = to_text(out, errors="surrogate_then_replace").strip() + _DEVICE_CONFIGS[flag_str] = cfg + return cfg + + +def get_sublevel_config(running_config, module): + contents = list() + current_config_contents = list() + running_config = NetworkConfig(contents=running_config, indent=1) + obj = running_config.get_object(module.params['parents']) + if obj: + contents = obj.children + parents = module.params['parents'] + if parents[2:]: + temp = 1 + for count, item in enumerate(parents[2:], start=2): + item = ' ' * temp + item + temp = temp + 1 + parents[count] = item + contents[:0] = parents + indent = 0 + for c in contents: + if isinstance(c, str): + if c in parents: + current_config_contents.append(c.rjust(len(c) + indent, ' ')) + if c not in parents: + c = ' ' * (len(parents) - 1) + c + current_config_contents.append(c.rjust(len(c) + indent, ' ')) + if isinstance(c, ConfigLine): + current_config_contents.append(c.raw) + indent = 1 + sublevel_config = '\n'.join(current_config_contents) + return sublevel_config + + +def run_commands(module, commands, check_rc=True): + connection = get_connection(module) + try: + return connection.run_commands(commands=commands, check_rc=check_rc) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + +def edit_config(module, commands, skip_code=None): + connection = get_connection(module) + + # Start: This is to convert interface name from Eth1/1 to Eth1%2f1 + for request in commands: + # This check is to differenciate between requests and commands + if type(request) is dict: + url = request.get("path", None) + if url: + request["path"] = update_url(url) + # End + return connection.edit_config(commands) + + +def update_url(url): + match = re.search(STANDARD_ETH_REGEXP, url) + ret_url = url + if match: + interface_name = match.group() + interface_name = interface_name.replace("/", "%2f") + ret_url = PATTERN.sub(interface_name, url) + return ret_url + + +def to_request(module, requests): + transform = ComplexList(dict(path=dict(key=True), method=dict(), data=dict(type='dict')), module) + return transform(to_list(requests)) diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py new file mode 100644 index 00000000..7471bcb1 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py @@ -0,0 +1,611 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The sonic bgp fact class +It is in this file the configuration is collected from the device +for a given resource, parsed, and the facts tree is populated +based on the configuration. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from copy import deepcopy + +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import ( + utils, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + normalize_interface_name, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp.bgp import BgpArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +afi_safi_types_map = { + 'openconfig-bgp-types:IPV4_UNICAST': 'ipv4_unicast', + 'openconfig-bgp-types:IPV6_UNICAST': 'ipv6_unicast', + 'openconfig-bgp-types:L2VPN_EVPN': 'l2vpn_evpn', +} +GET = "get" +network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance' +protocol_bgp_path = 'protocols/protocol=BGP,bgp/bgp' + + +def get_all_vrfs(module): + """Get all VRF configurations available in chassis""" + all_vrfs = [] + ret = [] + request = {"path": "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST", "method": GET} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + + if 'sonic-vrf:VRF_LIST' in response[0][1]: + all_vrf_data = response[0][1].get('sonic-vrf:VRF_LIST', []) + if all_vrf_data: + for vrf_data in all_vrf_data: + all_vrfs.append(vrf_data['vrf_name']) + + return all_vrfs + + +def get_peergroups(module, vrf_name): + peer_groups = [] + request_path = '%s=%s/protocols/protocol=BGP,bgp/bgp/peer-groups' % (network_instance_path, vrf_name) + request = {"path": request_path, "method": GET} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + + resp = response[0][1] + if 'openconfig-network-instance:peer-groups' in resp: + data = resp['openconfig-network-instance:peer-groups'] + if 'peer-group' in data: + for peer_group in data['peer-group']: + pg = {} + if 'config' in peer_group: + if 'peer-group-name' in peer_group['config']: + pg.update({'name': peer_group['config']['peer-group-name']}) + if 'description' in peer_group['config']: + pg.update({'pg_description': peer_group['config']['description']}) + if 'disable-ebgp-connected-route-check' in peer_group['config']: + pg.update({'disable_connected_check': peer_group['config']['disable-ebgp-connected-route-check']}) + if 'dont-negotiate-capability' in peer_group['config']: + pg.update({'dont_negotiate_capability': peer_group['config']['dont-negotiate-capability']}) + if 'enforce-first-as' in peer_group['config']: + pg.update({'enforce_first_as': peer_group['config']['enforce-first-as']}) + if 'enforce-multihop' in peer_group['config']: + pg.update({'enforce_multihop': peer_group['config']['enforce-multihop']}) + local_as = {} + if 'local-as' in peer_group['config']: + local_as.update({'as': peer_group['config']['local-as']}) + if 'local-as-no-prepend' in peer_group['config']: + local_as.update({'no_prepend': peer_group['config']['local-as-no-prepend']}) + if 'local-as-replace-as' in peer_group['config']: + local_as.update({'replace_as': peer_group['config']['local-as-replace-as']}) + if 'override-capability' in peer_group['config']: + pg.update({'override_capability': peer_group['config']['override-capability']}) + if 'shutdown-message' in peer_group['config']: + pg.update({'shutdown_msg': peer_group['config']['shutdown-message']}) + if 'solo-peer' in peer_group['config']: + pg.update({'solo': peer_group['config']['solo-peer']}) + if 'strict-capability-match' in peer_group['config']: + pg.update({'strict_capability_match': peer_group['config']['strict-capability-match']}) + if 'ttl-security-hops' in peer_group['config']: + pg.update({'ttl_security': peer_group['config']['ttl-security-hops']}) + auth_pwd = {} + if 'auth-password' in peer_group and 'config' in peer_group['auth-password']: + if 'encrypted' in peer_group['auth-password']['config']: + auth_pwd.update({'encrypted': peer_group['auth-password']['config']['encrypted']}) + if 'password' in peer_group['auth-password']['config']: + auth_pwd.update({'pwd': peer_group['auth-password']['config']['password']}) + bfd = {} + if 'enable-bfd' in peer_group and 'config' in peer_group['enable-bfd']: + if 'enabled' in peer_group['enable-bfd']['config']: + bfd.update({'enabled': peer_group['enable-bfd']['config']['enabled']}) + if 'check-control-plane-failure' in peer_group['enable-bfd']['config']: + bfd.update({'check_failure': peer_group['enable-bfd']['config']['check-control-plane-failure']}) + if 'bfd-profile' in peer_group['enable-bfd']['config']: + bfd.update({'profile': peer_group['enable-bfd']['config']['bfd-profile']}) + ebgp_multihop = {} + if 'ebgp-multihop' in peer_group and 'config' in peer_group['ebgp-multihop']: + if 'enabled' in peer_group['ebgp-multihop']['config']: + ebgp_multihop.update({'enabled': peer_group['ebgp-multihop']['config']['enabled']}) + if 'multihop-ttl' in peer_group['ebgp-multihop']['config']: + ebgp_multihop.update({'multihop_ttl': peer_group['ebgp-multihop']['config']['multihop-ttl']}) + if 'transport' in peer_group and 'config' in peer_group['transport']: + if 'local-address' in peer_group['transport']['config']: + pg.update({'local_address': peer_group['transport']['config']['local-address']}) + if 'passive-mode' in peer_group['transport']['config']: + pg.update({'passive': peer_group['transport']['config']['passive-mode']}) + if 'timers' in peer_group and 'config' in peer_group['timers']: + if 'minimum-advertisement-interval' in peer_group['timers']['config']: + pg.update({'advertisement_interval': peer_group['timers']['config']['minimum-advertisement-interval']}) + timers = {} + if 'hold-time' in peer_group['timers']['config']: + timers.update({'holdtime': peer_group['timers']['config']['hold-time']}) + if 'keepalive-interval' in peer_group['timers']['config']: + timers.update({'keepalive': peer_group['timers']['config']['keepalive-interval']}) + if 'connect-retry' in peer_group['timers']['config']: + timers.update({'connect_retry': peer_group['timers']['config']['connect-retry']}) + capability = {} + if 'config' in peer_group and 'capability-dynamic' in peer_group['config']: + capability.update({'dynamic': peer_group['config']['capability-dynamic']}) + if 'config' in peer_group and 'capability-extended-nexthop' in peer_group['config']: + capability.update({'extended_nexthop': peer_group['config']['capability-extended-nexthop']}) + remote_as = {} + if 'config' in peer_group and 'peer-as' in peer_group['config']: + remote_as.update({'peer_as': peer_group['config']['peer-as']}) + if 'config' in peer_group and 'peer-type' in peer_group['config']: + remote_as.update({'peer_type': peer_group['config']['peer-type'].lower()}) + afis = [] + if 'afi-safis' in peer_group and 'afi-safi' in peer_group['afi-safis']: + for each in peer_group['afi-safis']['afi-safi']: + samp = {} + if 'afi-safi-name' in each and each['afi-safi-name']: + tmp = each['afi-safi-name'].split(':') + if tmp: + split_tmp = tmp[1].split('_') + if split_tmp: + afi = split_tmp[0].lower() + safi = split_tmp[1].lower() + if afi and safi: + samp.update({'afi': afi}) + samp.update({'safi': safi}) + if 'config' in each and 'enabled' in each['config']: + samp.update({'activate': each['config']['enabled']}) + if 'allow-own-as' in each and 'config' in each['allow-own-as']: + allowas_in = {} + allowas_conf = each['allow-own-as']['config'] + if 'origin' in allowas_conf and allowas_conf['origin']: + allowas_in.update({'origin': allowas_conf['origin']}) + elif 'as-count' in allowas_conf and allowas_conf['as-count']: + allowas_in.update({'value': allowas_conf['as-count']}) + if allowas_in: + samp.update({'allowas_in': allowas_in}) + if 'ipv4-unicast' in each: + if 'config' in each['ipv4-unicast']: + ip_afi_conf = each['ipv4-unicast']['config'] + ip_afi = update_bgp_nbr_pg_ip_afi_dict(ip_afi_conf) + if ip_afi: + samp.update({'ip_afi': ip_afi}) + if 'prefix-limit' in each['ipv4-unicast'] and 'config' in each['ipv4-unicast']['prefix-limit']: + pfx_lmt_conf = each['ipv4-unicast']['prefix-limit']['config'] + prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(pfx_lmt_conf) + if prefix_limit: + samp.update({'prefix_limit': prefix_limit}) + elif 'ipv6-unicast' in each: + if 'config' in each['ipv6-unicast']: + ip_afi_conf = each['ipv6-unicast']['config'] + ip_afi = update_bgp_nbr_pg_ip_afi_dict(ip_afi_conf) + if ip_afi: + samp.update({'ip_afi': ip_afi}) + if 'prefix-limit' in each['ipv6-unicast'] and 'config' in each['ipv6-unicast']['prefix-limit']: + pfx_lmt_conf = each['ipv6-unicast']['prefix-limit']['config'] + prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(pfx_lmt_conf) + if prefix_limit: + samp.update({'prefix_limit': prefix_limit}) + elif 'l2vpn-evpn' in each and 'prefix-limit' in each['l2vpn-evpn'] and 'config' in each['l2vpn-evpn']['prefix-limit']: + pfx_lmt_conf = each['l2vpn-evpn']['prefix-limit']['config'] + prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(pfx_lmt_conf) + if prefix_limit: + samp.update({'prefix_limit': prefix_limit}) + if 'prefix-list' in each and 'config' in each['prefix-list']: + pfx_lst_conf = each['prefix-list']['config'] + if 'import-policy' in pfx_lst_conf and pfx_lst_conf['import-policy']: + samp.update({'prefix_list_in': pfx_lst_conf['import-policy']}) + if 'export-policy' in pfx_lst_conf and pfx_lst_conf['export-policy']: + samp.update({'prefix_list_out': pfx_lst_conf['export-policy']}) + if samp: + afis.append(samp) + if auth_pwd: + pg.update({'auth_pwd': auth_pwd}) + if bfd: + pg.update({'bfd': bfd}) + if ebgp_multihop: + pg.update({'ebgp_multihop': ebgp_multihop}) + if local_as: + pg.update({'local_as': local_as}) + if timers: + pg.update({'timers': timers}) + if capability: + pg.update({'capability': capability}) + if remote_as: + pg.update({'remote_as': remote_as}) + if afis and len(afis) > 0: + afis_dict = {} + afis_dict.update({'afis': afis}) + pg.update({'address_family': afis_dict}) + peer_groups.append(pg) + + return peer_groups + + +def update_bgp_nbr_pg_ip_afi_dict(ip_afi_conf): + ip_afi = {} + if 'default-policy-name' in ip_afi_conf and ip_afi_conf['default-policy-name']: + ip_afi.update({'default_policy_name': ip_afi_conf['default-policy-name']}) + if 'send-default-route' in ip_afi_conf and ip_afi_conf['send-default-route']: + ip_afi.update({'send_default_route': ip_afi_conf['send-default-route']}) + + return ip_afi + + +def update_bgp_nbr_pg_prefix_limit_dict(pfx_lmt_conf): + prefix_limit = {} + if 'max-prefixes' in pfx_lmt_conf and pfx_lmt_conf['max-prefixes']: + prefix_limit.update({'max_prefixes': pfx_lmt_conf['max-prefixes']}) + if 'prevent-teardown' in pfx_lmt_conf and pfx_lmt_conf['prevent-teardown']: + prefix_limit.update({'prevent_teardown': pfx_lmt_conf['prevent-teardown']}) + if 'warning-threshold-pct' in pfx_lmt_conf and pfx_lmt_conf['warning-threshold-pct']: + prefix_limit.update({'warning_threshold': pfx_lmt_conf['warning-threshold-pct']}) + if 'restart-timer' in pfx_lmt_conf and pfx_lmt_conf['restart-timer']: + prefix_limit.update({'restart_timer': pfx_lmt_conf['restart-timer']}) + + return prefix_limit + + +def get_ip_afi_cfg_payload(ip_afi): + ip_afi_cfg = {} + + if ip_afi.get('default_policy_name', None) is not None: + default_policy_name = ip_afi['default_policy_name'] + ip_afi_cfg.update({'default-policy-name': default_policy_name}) + if ip_afi.get('send_default_route', None) is not None: + send_default_route = ip_afi['send_default_route'] + ip_afi_cfg.update({'send-default-route': send_default_route}) + + return ip_afi_cfg + + +def get_prefix_limit_payload(prefix_limit): + pfx_lmt_cfg = {} + + if prefix_limit.get('max_prefixes', None) is not None: + max_prefixes = prefix_limit['max_prefixes'] + pfx_lmt_cfg.update({'max-prefixes': max_prefixes}) + if prefix_limit.get('prevent_teardown', None) is not None: + prevent_teardown = prefix_limit['prevent_teardown'] + pfx_lmt_cfg.update({'prevent-teardown': prevent_teardown}) + if prefix_limit.get('warning_threshold', None) is not None: + warning_threshold = prefix_limit['warning_threshold'] + pfx_lmt_cfg.update({'warning-threshold-pct': warning_threshold}) + if prefix_limit.get('restart_timer', None) is not None: + restart_timer = prefix_limit['restart_timer'] + pfx_lmt_cfg.update({'restart-timer': restart_timer}) + + return pfx_lmt_cfg + + +def get_all_bgp_af_redistribute(module, vrfs, af_redis_params_map): + """Get all BGP Global Address Family Redistribute configurations available in chassis""" + all_af_redis_data = [] + ret_redis_data = [] + for vrf_name in vrfs: + af_redis_data = {} + request_path = '%s=%s/table-connections' % (network_instance_path, vrf_name) + request = {"path": request_path, "method": GET} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + + if "openconfig-network-instance:table-connections" in response[0][1]: + af_redis_data.update({vrf_name: response[0][1]['openconfig-network-instance:table-connections']}) + + if af_redis_data: + all_af_redis_data.append(af_redis_data) + + if all_af_redis_data: + for vrf_name in vrfs: + key = vrf_name + val = next((af_redis_data for af_redis_data in all_af_redis_data if vrf_name in af_redis_data), None) + if not val: + continue + + val = val[vrf_name] + redis_data = val.get('table-connection', []) + if not redis_data: + continue + filtered_redis_data = [] + for e_cfg in redis_data: + af_redis_data = get_from_params_map(af_redis_params_map, e_cfg) + if af_redis_data: + filtered_redis_data.append(af_redis_data) + + if filtered_redis_data: + ret_redis_data.append({key: filtered_redis_data}) + + return ret_redis_data + + +def get_all_bgp_globals(module, vrfs): + """Get all BGP configurations available in chassis""" + all_bgp_globals = [] + for vrf_name in vrfs: + get_path = '%s=%s/%s/global' % (network_instance_path, vrf_name, protocol_bgp_path) + request = {"path": get_path, "method": GET} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + for resp in response: + if "openconfig-network-instance:global" in resp[1]: + bgp_data = {'global': resp[1].get("openconfig-network-instance:global", {})} + bgp_data.update({'vrf_name': vrf_name}) + all_bgp_globals.append(bgp_data) + return all_bgp_globals + + +def get_bgp_global_af_data(data, af_params_map): + ret_af_data = {} + for key, val in data.items(): + if key == 'global': + if 'afi-safis' in val and 'afi-safi' in val['afi-safis']: + global_af_data = [] + raw_af_data = val['afi-safis']['afi-safi'] + for each_af_data in raw_af_data: + af_data = get_from_params_map(af_params_map, each_af_data) + if af_data: + global_af_data.append(af_data) + ret_af_data.update({'address_family': global_af_data}) + if 'config' in val and 'as' in val['config']: + as_val = val['config']['as'] + ret_af_data.update({'bgp_as': as_val}) + if key == 'vrf_name': + ret_af_data.update({'vrf_name': val}) + return ret_af_data + + +def get_bgp_global_data(data, global_params_map): + bgp_data = {} + for key, val in data.items(): + if key == 'global': + global_data = get_from_params_map(global_params_map, val) + bgp_data.update(global_data) + if key == 'vrf_name': + bgp_data.update({'vrf_name': val}) + return bgp_data + + +def get_from_params_map(params_map, data): + ret_data = {} + for want_key, config_key in params_map.items(): + tmp_data = {} + for key, val in data.items(): + if key == 'config': + for k, v in val.items(): + if k == config_key: + val_data = val[config_key] + ret_data.update({want_key: val_data}) + if config_key == 'afi-safi-name': + ret_data.pop(want_key) + for type_k, type_val in afi_safi_types_map.items(): + if type_k == val_data: + afi_safi = type_val.split('_') + val_data = afi_safi[0] + ret_data.update({'safi': afi_safi[1]}) + ret_data.update({want_key: val_data}) + break + else: + if key == 'timers' and ('config' in val or 'state' in val): + tmp = {} + if key in ret_data: + tmp = ret_data[key] + cfg = val['config'] if 'config' in val else val['state'] + for k, v in cfg.items(): + if k == config_key: + if k != 'minimum-advertisement-interval': + tmp.update({want_key: cfg[config_key]}) + else: + ret_data.update({want_key: cfg[config_key]}) + if tmp: + ret_data.update({key: tmp}) + + elif isinstance(config_key, list): + i = 0 + if key == config_key[0]: + if key == 'afi-safi': + cfg_data = config_key[1] + for itm in afi_safi_types_map: + if cfg_data in itm: + afi_safi = itm[cfg_data].split('_') + cfg_data = afi_safi[0] + ret_data.update({'safi': afi_safi[1]}) + ret_data.update({want_key: cfg_data}) + break + else: + cfg_data = {key: val} + for cfg_key in config_key: + if cfg_key == 'config': + continue + new_data = None + + if cfg_key in cfg_data: + new_data = cfg_data[cfg_key] + elif isinstance(cfg_data, dict) and 'config' in cfg_data: + if cfg_key in cfg_data['config']: + new_data = cfg_data['config'][cfg_key] + + if new_data is not None: + cfg_data = new_data + else: + break + else: + ret_data.update({want_key: cfg_data}) + else: + if key == config_key and val: + if config_key != 'afi-safi-name' and config_key != 'timers': + cfg_data = val + ret_data.update({want_key: cfg_data}) + + return ret_data + + +def get_bgp_data(module, global_params_map): + vrf_list = get_all_vrfs(module) + data = get_all_bgp_globals(module, vrf_list) + + objs = [] + # operate on a collection of resource x + for conf in data: + if conf: + obj = get_bgp_global_data(conf, global_params_map) + if obj: + objs.append(obj) + return objs + + +def get_bgp_af_data(module, af_params_map): + vrf_list = get_all_vrfs(module) + data = get_all_bgp_globals(module, vrf_list) + + objs = [] + # operate on a collection of resource x + for conf in data: + if conf: + obj = get_bgp_global_af_data(conf, af_params_map) + if obj: + objs.append(obj) + + return objs + + +def get_bgp_as(module, vrf_name): + as_val = None + get_path = '%s=%s/%s/global/config' % (network_instance_path, vrf_name, protocol_bgp_path) + request = {"path": get_path, "method": GET} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + + resp = response[0][1] + if "openconfig-network-instance:config" in resp and 'as' in resp['openconfig-network-instance:config']: + as_val = resp['openconfig-network-instance:config']['as'] + return as_val + + +def get_bgp_neighbors(module, vrf_name): + neighbors_data = None + get_path = '%s=%s/%s/neighbors' % (network_instance_path, vrf_name, protocol_bgp_path) + request = {"path": get_path, "method": GET} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + + resp = response[0][1] + if "openconfig-network-instance:neighbors" in resp: + neighbors_data = resp['openconfig-network-instance:neighbors'] + + return neighbors_data + + +def get_all_bgp_neighbors(module): + vrf_list = get_all_vrfs(module) + """Get all BGP neighbor configurations available in chassis""" + all_bgp_neighbors = [] + + for vrf_name in vrf_list: + neighbors_cfg = {} + + bgp_as = get_bgp_as(module, vrf_name) + if bgp_as: + neighbors_cfg['bgp_as'] = bgp_as + neighbors_cfg['vrf_name'] = vrf_name + else: + continue + + neighbors = get_bgp_neighbors(module, vrf_name) + if neighbors: + neighbors_cfg['neighbors'] = neighbors + + if neighbors_cfg: + all_bgp_neighbors.append(neighbors_cfg) + + return all_bgp_neighbors + + +def get_undefined_bgps(want, have, check_neighbors=None): + if check_neighbors is None: + check_neighbors = False + + undefined_resources = [] + + if not want: + return undefined_resources + + if not have: + have = [] + + for want_conf in want: + undefined = {} + want_bgp_as = want_conf['bgp_as'] + want_vrf = want_conf['vrf_name'] + have_conf = next((conf for conf in have if (want_bgp_as == conf['bgp_as'] and want_vrf == conf['vrf_name'])), None) + if not have_conf: + undefined['bgp_as'] = want_bgp_as + undefined['vrf_name'] = want_vrf + undefined_resources.append(undefined) + if check_neighbors and have_conf: + want_neighbors = want_conf.get('neighbors', []) + have_neighbors = have_conf.get('neighbors', []) + undefined_neighbors = get_undefined_neighbors(want_neighbors, have_neighbors) + if undefined_neighbors: + undefined['bgp_as'] = want_bgp_as + undefined['vrf_name'] = want_vrf + undefined['neighbors'] = undefined_neighbors + undefined_resources.append(undefined) + + return undefined_resources + + +def get_undefined_neighbors(want, have): + undefined_neighbors = [] + if not want: + return undefined_neighbors + + if not have: + have = [] + + for want_neighbor in want: + want_neighbor_val = want_neighbor['neighbor'] + have_neighbor = next((conf for conf in have if want_neighbor_val == conf['neighbor']), None) + if not have_neighbor: + undefined_neighbors.append({'neighbor': want_neighbor_val}) + + return undefined_neighbors + + +def validate_bgps(module, want, have): + validate_bgp_resources(module, want, have) + + +def validate_bgp_neighbors(module, want, have): + validate_bgp_resources(module, want, have, check_neighbors=True) + + +def validate_bgp_resources(module, want, have, check_neighbors=None): + undefined_resources = get_undefined_bgps(want, have, check_neighbors) + if undefined_resources: + err = "Resource not found! {res}".format(res=undefined_resources) + module.fail_json(msg=err, code=404) + + +def normalize_neighbors_interface_name(want, module): + if want: + for conf in want: + neighbors = conf.get('neighbors', None) + if neighbors: + normalize_interface_name(neighbors, module, 'neighbor') diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py new file mode 100644 index 00000000..a7f6e906 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py @@ -0,0 +1,55 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback +import json + +from ansible.module_utils._text import to_native + +try: + import jinja2 + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + + +# To create Loopback, VLAN interfaces +def build_interfaces_create_request(interface_name): + url = "data/openconfig-interfaces:interfaces" + method = "PATCH" + payload_template = """{"openconfig-interfaces:interfaces": {"interface": [{"name": "{{interface_name}}", "config": {"name": "{{interface_name}}"}}]}}""" + input_data = {"interface_name": interface_name} + env = jinja2.Environment(autoescape=False) + t = env.from_string(payload_template) + intended_payload = t.render(input_data) + ret_payload = json.loads(intended_payload) + request = {"path": url, + "method": method, + "data": ret_payload} + return request diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py new file mode 100644 index 00000000..0d6e6d1a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py @@ -0,0 +1,511 @@ +# +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# utils + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re +import json +import ast +from ansible.module_utils.six import iteritems +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + is_masklen, + to_netmask, + remove_empties +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import ( + to_request, + edit_config +) +from ansible.module_utils.connection import ConnectionError + +DEFAULT_TEST_KEY = {'config': {'name': ''}} +GET = 'get' + +intf_naming_mode = "" + + +def get_diff(base_data, compare_with_data, test_keys=None, is_skeleton=None): + diff = [] + if is_skeleton is None: + is_skeleton = False + + test_keys = normalize_testkeys(test_keys) + + if isinstance(base_data, list) and isinstance(compare_with_data, list): + dict_diff = get_diff_dict({"config": base_data}, {"config": compare_with_data}, test_keys, is_skeleton) + diff = dict_diff.get("config", []) + + else: + new_base, new_compare = convert_dict_to_single_entry_list(base_data, compare_with_data, test_keys) + diff = get_diff_dict(new_base, new_compare, test_keys, is_skeleton) + if diff: + diff = convert_single_entry_list_to_dict(diff) + else: + diff = {} + + return diff + + +def get_diff_dict(base_data, compare_with_data, test_keys=None, is_skeleton=None): + if is_skeleton is None: + is_skeleton = False + + if test_keys is None: + test_keys = [] + + if not base_data: + return base_data + + planned_set = set(base_data.keys()) + discovered_set = set(compare_with_data.keys()) + intersect_set = planned_set.intersection(discovered_set) + changed_dict = {} + has_dict_item = None + added_set = planned_set - intersect_set + # Keys part of added are new and put into changed_dict + if added_set: + for key in added_set: + if is_skeleton: + changed_dict[key] = base_data[key] + elif base_data[key] is not None: + if isinstance(base_data[key], dict): + val_dict = remove_empties(base_data[key]) + if val_dict: + changed_dict[key] = remove_empties(base_data[key]) + elif isinstance(base_data[key], list): + val_list = remove_empties_from_list(base_data[key]) + if val_list: + changed_dict[key] = remove_empties_from_list(base_data[key]) + else: + changed_dict[key] = base_data[key] + for key in intersect_set: + has_dict_item = False + value = base_data[key] + if isinstance(value, list): + p_list = base_data[key] if key in base_data else [] + d_list = compare_with_data[key] if key in compare_with_data else [] + keys_to_compare = next((test_key_item[key] for test_key_item in test_keys if key in test_key_item), None) + changed_list = [] + if p_list and d_list: + for p_list_item in p_list: + matched = False + has_diff = False + for d_list_item in d_list: + if (isinstance(p_list_item, dict) and isinstance(d_list_item, dict)): + if keys_to_compare: + key_matched_cnt = 0 + test_keys_present_cnt = 0 + common_keys = set(p_list_item).intersection(d_list_item) + for test_key in keys_to_compare: + if test_key in common_keys: + test_keys_present_cnt += 1 + if p_list_item[test_key] == d_list_item[test_key]: + key_matched_cnt += 1 + if key_matched_cnt and key_matched_cnt == test_keys_present_cnt: + remaining_keys = [test_key_item for test_key_item in test_keys if key not in test_key_item] + dict_diff = get_diff_dict(p_list_item, d_list_item, remaining_keys, is_skeleton) + matched = True + if dict_diff: + has_diff = True + for test_key in keys_to_compare: + dict_diff.update({test_key: p_list_item[test_key]}) + break + else: + dict_diff = get_diff_dict(p_list_item, d_list_item, test_keys, is_skeleton) + if not dict_diff: + matched = True + break + else: + if p_list_item == d_list_item: + matched = True + break + if not matched: + if is_skeleton: + changed_list.append(p_list_item) + else: + if isinstance(p_list_item, dict): + val_dict = remove_empties(p_list_item) + if val_dict is not None: + changed_list.append(val_dict) + elif isinstance(p_list_item, list): + val_list = remove_empties_from_list(p_list_item) + if val_list is not None: + changed_list.append(val_list) + else: + if p_list_item is not None: + changed_list.append(p_list_item) + elif has_diff and dict_diff: + changed_list.append(dict_diff) + if changed_list: + changed_dict.update({key: changed_list}) + elif p_list and (not d_list): + changed_dict[key] = p_list + elif (isinstance(value, dict) and isinstance(compare_with_data[key], dict)): + dict_diff = get_diff_dict(base_data[key], compare_with_data[key], test_keys, is_skeleton) + if dict_diff: + changed_dict[key] = dict_diff + elif value is not None: + if not is_skeleton: + if compare_with_data[key] != base_data[key]: + changed_dict[key] = base_data[key] + return changed_dict + + +def convert_dict_to_single_entry_list(base_data, compare_with_data, test_keys): + # if it is dict comparision convert dict into single entry list by adding 'config' as key + new_base = {'config': [base_data]} + new_compare = {'config': [compare_with_data]} + + # get testkey of 'config' + config_testkey = None + for item in test_keys: + for key, val in item.items(): + if key == 'config': + config_testkey = list(val)[0] + break + if config_testkey: + break + # if testkey of 'config' is not in base data, introduce single entry list + # with 'temp_key' as config testkey and base_data as data. + if config_testkey and base_data and config_testkey not in base_data: + new_base = {'config': [{config_testkey: 'temp_key', 'data': base_data}]} + new_compare = {'config': [{config_testkey: 'temp_key', 'data': compare_with_data}]} + + return new_base, new_compare + + +def convert_single_entry_list_to_dict(diff): + diff = diff['config'][0] + if 'data' in diff: + diff = diff['data'] + return diff + + +def normalize_testkeys(test_keys): + if test_keys is None: + test_keys = [] + + if not any(test_key_item for test_key_item in test_keys if "config" in test_key_item): + test_keys.append(DEFAULT_TEST_KEY) + + return test_keys + + +def update_states(commands, state): + ret_list = list() + if commands: + if isinstance(commands, list): + for command in commands: + ret = command.copy() + ret.update({"state": state}) + ret_list.append(ret) + elif isinstance(commands, dict): + ret_list.append(commands.copy()) + ret_list[0].update({"state": state}) + return ret_list + + +def dict_to_set(sample_dict): + # Generate a set with passed dictionary for comparison + test_dict = dict() + if isinstance(sample_dict, dict): + for k, v in iteritems(sample_dict): + if v is not None: + if isinstance(v, list): + if isinstance(v[0], dict): + li = [] + for each in v: + for key, value in iteritems(each): + if isinstance(value, list): + each[key] = tuple(value) + li.append(tuple(iteritems(each))) + v = tuple(li) + else: + v = tuple(v) + elif isinstance(v, dict): + li = [] + for key, value in iteritems(v): + if isinstance(value, list): + v[key] = tuple(value) + li.extend(tuple(iteritems(v))) + v = tuple(li) + test_dict.update({k: v}) + return_set = set(tuple(iteritems(test_dict))) + else: + return_set = set(sample_dict) + return return_set + + +def validate_ipv4(value, module): + if value: + address = value.split("/") + if len(address) != 2: + module.fail_json( + msg="address format is /, got invalid format {0}".format( + value + ) + ) + + if not is_masklen(address[1]): + module.fail_json( + msg="invalid value for mask: {0}, mask should be in range 0-32".format( + address[1] + ) + ) + + +def validate_ipv6(value, module): + if value: + address = value.split("/") + if len(address) != 2: + module.fail_json( + msg="address format is /, got invalid format {0}".format( + value + ) + ) + else: + if not 0 <= int(address[1]) <= 128: + module.fail_json( + msg="invalid value for mask: {0}, mask should be in range 0-128".format( + address[1] + ) + ) + + +def validate_n_expand_ipv4(module, want): + # Check if input IPV4 is valid IP and expand IPV4 with its subnet mask + ip_addr_want = want.get("address") + if len(ip_addr_want.split(" ")) > 1: + return ip_addr_want + validate_ipv4(ip_addr_want, module) + ip = ip_addr_want.split("/") + if len(ip) == 2: + ip_addr_want = "{0} {1}".format(ip[0], to_netmask(ip[1])) + + return ip_addr_want + + +def netmask_to_cidr(netmask): + bit_range = [128, 64, 32, 16, 8, 4, 2, 1] + count = 0 + cidr = 0 + netmask_list = netmask.split(".") + netmask_calc = [i for i in netmask_list if int(i) != 255 and int(i) != 0] + if netmask_calc: + netmask_calc_index = netmask_list.index(netmask_calc[0]) + elif sum(list(map(int, netmask_list))) == 0: + return "32" + else: + return "24" + for each in bit_range: + if cidr == int(netmask.split(".")[2]): + if netmask_calc_index == 1: + return str(8 + count) + elif netmask_calc_index == 2: + return str(8 * 2 + count) + elif netmask_calc_index == 3: + return str(8 * 3 + count) + break + cidr += each + count += 1 + + +def remove_empties_from_list(config_list): + ret_config = [] + if not config_list: + return ret_config + for config in config_list: + ret_config.append(remove_empties(config)) + return ret_config + + +def get_device_interface_naming_mode(module): + intf_naming_mode = "" + request = {"path": "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost", "method": GET} + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + module.fail_json(msg=str(exc), code=exc.code) + + if 'sonic-device-metadata:DEVICE_METADATA_LIST' in response[0][1]: + device_meta_data = response[0][1].get('sonic-device-metadata:DEVICE_METADATA_LIST', []) + if device_meta_data: + intf_naming_mode = device_meta_data[0].get('intf_naming_mode', 'native') + + return intf_naming_mode + + +STANDARD_ETH_REGEXP = r"[e|E]th\s*\d+/\d+" +NATIVE_ETH_REGEXP = r"[e|E]th*\d+$" +NATIVE_MODE = "native" +STANDARD_MODE = "standard" + + +def find_intf_naming_mode(intf_name): + ret_intf_naming_mode = NATIVE_MODE + + if re.search(STANDARD_ETH_REGEXP, intf_name): + ret_intf_naming_mode = STANDARD_MODE + + return ret_intf_naming_mode + + +def validate_intf_naming_mode(intf_name, module): + global intf_naming_mode + if intf_naming_mode == "": + intf_naming_mode = get_device_interface_naming_mode(module) + + if intf_naming_mode != "": + ansible_intf_naming_mode = find_intf_naming_mode(intf_name) + if intf_naming_mode != ansible_intf_naming_mode: + err = "Interface naming mode configured on switch {naming_mode}, {intf_name} is not valid".format(naming_mode=intf_naming_mode, intf_name=intf_name) + module.fail_json(msg=err, code=400) + + +def normalize_interface_name(configs, module, namekey=None): + if not namekey: + namekey = 'name' + + if configs: + for conf in configs: + if conf.get(namekey, None): + conf[namekey] = get_normalize_interface_name(conf[namekey], module) + + +def normalize_interface_name_list(configs, module): + norm_configs = [] + if configs: + for conf in configs: + conf = get_normalize_interface_name(conf, module) + norm_configs.append(conf) + + return norm_configs + + +def get_normalize_interface_name(intf_name, module): + change_flag = False + # remove the space in the given string + ret_intf_name = re.sub(r"\s+", "", intf_name, flags=re.UNICODE) + ret_intf_name = ret_intf_name.capitalize() + + # serach the numeric charecter(digit) + match = re.search(r"\d", ret_intf_name) + if match: + change_flag = True + start_pos = match.start() + name = ret_intf_name[0:start_pos] + intf_id = ret_intf_name[start_pos:] + + # Interface naming mode affects only ethernet ports + if name.startswith("Eth"): + validate_intf_naming_mode(intf_name, module) + + if ret_intf_name.startswith("Management") or ret_intf_name.startswith("Mgmt"): + name = "eth" + intf_id = "0" + elif re.search(STANDARD_ETH_REGEXP, ret_intf_name): + name = "Eth" + elif re.search(NATIVE_ETH_REGEXP, ret_intf_name): + name = "Ethernet" + elif name.startswith("Po"): + name = "PortChannel" + elif name.startswith("Vlan"): + name = "Vlan" + elif name.startswith("Lo"): + name = "Loopback" + else: + change_flag = False + + ret_intf_name = name + intf_id + + if not change_flag: + ret_intf_name = intf_name + + return ret_intf_name + + +def get_speed_from_breakout_mode(breakout_mode): + speed = None + speed_breakout_mode_map = { + "4x10G": "SPEED_10GB", "1x100G": "SPEED_100GB", "1x40G": "SPEED_40GB", "4x25G": "SPEED_25GB", "2x50G": "SPEED_50GB", + "1x400G": "SPEED_400GB", "4x100G": "SPEED_100GB", "4x50G": "SPEED_50GB", "2x100G": "SPEED_100GB", "2x200G": "SPEED_200GB" + } + if breakout_mode in speed_breakout_mode_map: + speed = speed_breakout_mode_map[breakout_mode] + return speed + + +def get_breakout_mode(module, name): + response = None + mode = None + component_name = name + if "/" in name: + component_name = name.replace("/", "%2f") + url = "data/openconfig-platform:components/component=%s" % (component_name) + request = [{"path": url, "method": GET}] + try: + response = edit_config(module, to_request(module, request)) + except ConnectionError as exc: + try: + json_obj = json.loads(str(exc).replace("'", '"')) + if json_obj and type(json_obj) is dict and 404 == json_obj['code']: + response = None + else: + module.fail_json(msg=str(exc), code=exc.code) + except Exception as err: + module.fail_json(msg=str(exc), code=exc.code) + + if response and "openconfig-platform:component" in response[0][1]: + raw_port_breakout = response[0][1]['openconfig-platform:component'][0] + port_name = raw_port_breakout.get('name', None) + port_data = raw_port_breakout.get('port', None) + if port_name and port_data and 'openconfig-platform-port:breakout-mode' in port_data: + if 'groups' in port_data['openconfig-platform-port:breakout-mode']: + group = port_data['openconfig-platform-port:breakout-mode']['groups']['group'][0] + if 'config' in group: + cfg = group.get('config', None) + breakout_speed = cfg.get('breakout-speed', None) + num_breakouts = cfg.get('num-breakouts', None) + if breakout_speed and num_breakouts: + speed = breakout_speed.replace('openconfig-if-ethernet:SPEED_', '') + speed = speed.replace('GB', 'G') + mode = str(num_breakouts) + 'x' + speed + return mode + + +def command_list_str_to_dict(module, warnings, cmd_list_in, exec_cmd=False): + cmd_list_out = [] + for cmd in cmd_list_in: + cmd_out = dict() + nested_cmd_is_dict = False + if isinstance(cmd, dict): + cmd_out = cmd + else: + try: + nest_dict = ast.literal_eval(cmd) + nested_cmd_is_dict = isinstance(nest_dict, dict) + except Exception: + nested_cmd_is_dict = False + + if nested_cmd_is_dict: + for key, value in nest_dict.items(): + cmd_out[key] = value + else: + cmd_out = cmd + + if exec_cmd and module.check_mode and not cmd_out['command'].startswith('show'): + warnings.append( + 'Only show commands are supported when using check mode, not ' + 'executing %s' % cmd_out['command'] + ) + else: + cmd_list_out.append(cmd_out) + + return cmd_list_out diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py new file mode 100644 index 00000000..ddc71331 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_aaa +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: sonic_aaa +version_added: 1.1.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Abirami N (@abirami-n) +short_description: Manage AAA and its parameters +description: + - This module is used for configuration management of aaa parameters on devices running Enterprise SONiC. +options: + config: + description: + - Specifies the aaa related configurations + type: dict + suboptions: + authentication: + description: + - Specifies the configurations required for aaa authentication + type: dict + suboptions: + data: + description: + - Specifies the data required for aaa authentication + type: dict + suboptions: + fail_through: + description: + - Specifies the state of failthrough + type: bool + local: + description: + - Enable or Disable local authentication + type: bool + group: + description: + - Specifies the method of aaa authentication + type: str + choices: + - ldap + - radius + - tacacs+ + + state: + description: + - Specifies the operation to be performed on the aaa parameters configured on the device. + - In case of merged, the input configuration will be merged with the existing aaa configuration on the device. + - In case of deleted the existing aaa configuration will be removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +# do show aaa +# AAA Authentication Information +# --------------------------------------------------------- +# failthrough : True +# login-method : local + +- name: Delete aaa configurations + dellemc.enterprise_sonic.sonic_aaa: + config: + authentication: + data: + local: True + state: deleted + +# After state: +# ------------ +# +# do show aaa +# AAA Authentication Information +# --------------------------------------------------------- +# failthrough : True +# login-method : + + +# Using deleted +# +# Before state: +# ------------- +# +# do show aaa +# AAA Authentication Information +# --------------------------------------------------------- +# failthrough : True +# login-method : local + +- name: Delete aaa configurations + dellemc.enterprise_sonic.sonic_aaa: + config: + state: deleted + +# After state: +# ------------ +# +# do show aaa +# AAA Authentication Information +# --------------------------------------------------------- +# failthrough : +# login-method : + + +# Using merged +# +# Before state: +# ------------- +# +# do show aaa +# AAA Authentication Information +# --------------------------------------------------------- +# failthrough : False +# login-method : + +- name: Merge aaa configurations + dellemc.enterprise_sonic.sonic_aaa: + config: + authentication: + data: + local: true + fail_through: true + state: merged + +# After state: +# ------------ +# +# do show aaa +# AAA Authentication Information +# --------------------------------------------------------- +# failthrough : True +# login-method : local + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.aaa.aaa import AaaArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.aaa.aaa import Aaa + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=AaaArgs.argument_spec, + supports_check_mode=True) + + result = Aaa(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_api.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_api.py new file mode 100644 index 00000000..234603a0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_api.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +The module file for sonic_vlans +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_api +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Abirami N (@abirami-n) +short_description: Manages REST operations on devices running Enterprise SONiC +description: + - Manages REST operations on devices running Enterprise SONiC Distribution + by Dell Technologies. This module provides an implementation for working + with SONiC REST operations in a deterministic way. +options: + url: + description: + - The HTTP path of the request after 'restconf/'. + type: path + required: true + body: + description: + - The body of the HTTP request/response to the web service which contains the payload. + type: raw + method: + description: + - The HTTP method of the request or response. Must be a valid method + accepted by the service that handles the request. + type: str + required: true + choices: ['GET', 'PUT', 'POST', 'PATCH', 'DELETE'] + status_code: + description: + - A list of valid, numeric, HTTP status codes that signifies the success of a request. + type: list + elements: int + required: true +""" +EXAMPLES = """ +- name: Checks that you can connect (GET) to a page and it returns a status 200 + dellemc.enterprise_sonic.sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Ethernet60 + method: "GET" + status_code: 200 + +- name: Appends data to an existing interface using PATCH and verifies if it returns status 204 + dellemc.enterprise_sonic.sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Ethernet60/config/description + method: "PATCH" + body: {"openconfig-interfaces:description": "Eth-60"} + status_code: 204 + +- name: Deletes an associated IP address using DELETE and verifies if it returns status 204 + dellemc.enterprise_sonic.sonic_api: + url: > + data/openconfig-interfaces:interfaces/interface=Ethernet64/subinterfaces/subinterface=0/ + openconfig-if-ip:ipv4/addresses/address=1.1.1.1/config/prefix-length + method: "DELETE" + status_code: 204 + +- name: Adds a VLAN network instance using PUT and verifies if it returns status 204 + dellemc.enterprise_sonic.sonic_api: + url: data/openconfig-network-instance:network-instances/network-instance=Vlan100/ + method: "PUT" + body: {"openconfig-network-instance:network-instance": [{"name": "Vlan100","config": {"name": "Vlan100"}}]} + status_code: 204 + +- name: Adds a prefix-set to a routing policy using POST and verifies if it returns 201 + dellemc.enterprise_sonic.sonic_api: + url: data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set=p1 + method: "POST" + body: {"openconfig-routing-policy:config": {"name": "p1","mode": "IPV4" }} + status_code: 201 + +""" +RETURN = """ +response: + description: The response at the network device end for the REST call which contains the status code. + returned: always + type: list + sample: {"response": [ 204,{""}]} +msg: + description: The HTTP error message from the request. + returned: HTTP Error + type: str +""" + +from ansible.module_utils.connection import ConnectionError + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import edit_config, to_request + + +def initiate_request(module): + """Get all the data available in chassis""" + url = module.params['url'] + body = module.params['body'] + method = module.params['method'] + if method == "GET" or method == "DELETE": + request = to_request(module, [{"path": url, "method": method}]) + elif method == "PATCH" or method == "PUT" or method == "POST": + request = to_request(module, [{"path": url, "method": method, "data": body}]) + + try: + response = edit_config(module, request) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + return response + + +def main(): + + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + argument_spec = dict( + url=dict(type='path', required=True), + body=dict(type='raw', required=False), + method=dict(type='str', choices=['GET', 'PUT', 'PATCH', 'DELETE', 'POST'], required=True), + status_code=dict(type='list', elements='int', required=True), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict( + changed=False, + ) + response = initiate_request(module) + response_code = response[0][0] + status_code = module.params['status_code'] + if response_code == int(status_code[0]) and response_code in (201, 204): + result.update({'changed': True}) + + result.update({ + 'response': response, + }) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py new file mode 100644 index 00000000..bc53ca40 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py @@ -0,0 +1,390 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_bgp +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_bgp +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Dhivya P (@dhivayp) +short_description: Manage global BGP and its parameters +description: + - This module provides configuration management of global BGP parameters on devices running Enterprise SONiC Distribution by Dell Technologies. +options: + config: + description: + - Specifies the BGP-related configuration. + type: list + elements: dict + suboptions: + bgp_as: + description: + - Specifies the BGP autonomous system (AS) number to configure on the device. + type: str + required: true + vrf_name: + description: + - Specifies the VRF name. + type: str + default: 'default' + router_id: + description: + - Configures the BGP routing process router-id value. + type: str + log_neighbor_changes: + description: + - Enables/disables logging neighbor up/down and reset reason. + type: bool + max_med: + description: + - Configure max med and its parameters + type: dict + suboptions: + on_startup: + description: + - On startup time and max-med value + type: dict + suboptions: + timer: + description: + - Configures on startup time + type: int + med_val: + description: + - on startup med value + type: int + timers: + description: + - Adjust routing timers + type: dict + suboptions: + holdtime: + description: + - Configures hold-time + type: int + keepalive_interval: + description: + - Configures keepalive-interval + type: int + bestpath: + description: + - Configures the BGP best-path. + type: dict + suboptions: + as_path: + description: + - Configures the as-path values. + type: dict + suboptions: + confed: + description: + - Configures the confed values of as-path. + type: bool + ignore: + description: + - Configures the ignore values of as-path. + type: bool + multipath_relax: + description: + - Configures the multipath_relax values of as-path. + type: bool + multipath_relax_as_set: + description: + - Configures the multipath_relax_as_set values of as-path. + type: bool + compare_routerid: + description: + - Configures the compare_routerid. + type: bool + med: + description: + - Configures the med values. + type: dict + suboptions: + confed: + description: + - Configures the confed values of med. + type: bool + missing_as_worst: + description: + - Configures the missing_as_worst values of as-path. + type: bool + always_compare_med: + description: + - Allows comparing meds from different neighbors if set to true + type: bool + state: + description: + - Specifies the operation to be performed on the BGP process that is configured on the device. + - In case of merged, the input configuration is merged with the existing BGP configuration on the device. + - In case of deleted, the existing BGP configuration is removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#! +#router bgp 10 vrf VrfCheck1 +# router-id 10.2.2.32 +# log-neighbor-changes +#! +#router bgp 11 vrf VrfCheck2 +# log-neighbor-changes +# bestpath as-path ignore +# bestpath med missing-as-worst confed +# bestpath compare-routerid +#! +#router bgp 4 +# router-id 10.2.2.4 +# bestpath as-path ignore +# bestpath as-path confed +# bestpath med missing-as-worst confed +# bestpath compare-routerid +#! +# +- name: Delete BGP Global attributes + dellemc.enterprise_sonic.sonic_bgp: + config: + - bgp_as: 4 + router_id: 10.2.2.4 + log_neighbor_changes: False + bestpath: + as_path: + confed: True + ignore: True + multipath_relax: False + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + - bgp_as: 10 + router_id: 10.2.2.32 + log_neighbor_changes: True + vrf_name: 'VrfCheck1' + - bgp_as: 11 + log_neighbor_changes: True + vrf_name: 'VrfCheck2' + bestpath: + as_path: + confed: False + ignore: True + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + state: deleted + + +# After state: +# ------------ +# +#! +#router bgp 10 vrf VrfCheck1 +# log-neighbor-changes +#! +#router bgp 11 vrf VrfCheck2 +# log-neighbor-changes +# bestpath compare-routerid +#! +#router bgp 4 +# log-neighbor-changes +# bestpath compare-routerid +#! + + +# Using deleted +# +# Before state: +# ------------- +# +#! +#router bgp 10 vrf VrfCheck1 +# router-id 10.2.2.32 +# log-neighbor-changes +#! +#router bgp 11 vrf VrfCheck2 +# log-neighbor-changes +# bestpath as-path ignore +# bestpath med missing-as-worst confed +# bestpath compare-routerid +#! +#router bgp 4 +# router-id 10.2.2.4 +# bestpath as-path ignore +# bestpath as-path confed +# bestpath med missing-as-worst confed +# bestpath compare-routerid +#! + +- name: Deletes all the bgp global configurations + dellemc.enterprise_sonic.sonic_bgp: + config: + state: deleted + +# After state: +# ------------ +# +#! +#! + + +# Using merged +# +# Before state: +# ------------- +# +#! +#router bgp 4 +# router-id 10.1.1.4 +#! +# +- name: Merges provided configuration with device configuration + dellemc.enterprise_sonic.sonic_bgp: + config: + - bgp_as: 4 + router_id: 10.2.2.4 + log_neighbor_changes: False + timers: + holdtime: 20 + keepalive_interval: 30 + bestpath: + as_path: + confed: True + ignore: True + multipath_relax: False + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + always_compare_med: True + max_med: + on_startup: + timer: 667 + med_val: 7878 + - bgp_as: 10 + router_id: 10.2.2.32 + log_neighbor_changes: True + vrf_name: 'VrfCheck1' + - bgp_as: 11 + log_neighbor_changes: True + vrf_name: 'VrfCheck2' + bestpath: + as_path: + confed: False + ignore: True + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + state: merged +# +# After state: +# ------------ +# +#! +#router bgp 10 vrf VrfCheck1 +# router-id 10.2.2.32 +# log-neighbor-changes +#! +#router bgp 11 vrf VrfCheck2 +# log-neighbor-changes +# bestpath as-path ignore +# bestpath med missing-as-worst confed +# bestpath compare-routerid +#! +#router bgp 4 +# router-id 10.2.2.4 +# bestpath as-path ignore +# bestpath as-path confed +# bestpath med missing-as-worst confed +# bestpath compare-routerid +# always-compare-med +# max-med on-startup 667 7878 +# timers 20 30 +# +#! + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp.bgp import BgpArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp.bgp import Bgp + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=BgpArgs.argument_spec, + supports_check_mode=True) + + result = Bgp(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py new file mode 100644 index 00000000..6d55355c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py @@ -0,0 +1,414 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_bgp_af +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: sonic_bgp_af +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Niraimadaiselvam M (@niraimadaiselvamm) +short_description: Manage global BGP address-family and its parameters +description: + - This module provides configuration management of global BGP_AF parameters on devices running Enterprise SONiC. + - bgp_as and vrf_name must be created in advance on the device. +options: + config: + description: + - Specifies the BGP_AF related configuration. + type: list + elements: dict + suboptions: + bgp_as: + description: + - Specifies the BGP autonomous system (AS) number which is already configured on the device. + type: str + required: true + vrf_name: + description: + - Specifies the VRF name which is already configured on the device. + type: str + default: 'default' + address_family: + description: + - Specifies BGP address family related configurations. + type: dict + suboptions: + afis: + description: + - List of address families, such as ipv4, ipv6, and l2vpn. + - afi and safi are required together. + type: list + elements: dict + suboptions: + afi: + description: + - Type of address family to configure. + type: str + choices: + - ipv4 + - ipv6 + - l2vpn + required: True + safi: + description: + - Specifies the type of communication for the address family. + type: str + choices: + - unicast + - evpn + default: unicast + dampening: + description: + - Enable route flap dampening if set to true + type: bool + network: + description: + - Enable routing on an IP network for each prefix provided in the network + type: list + elements: str + redistribute: + description: + - Specifies the redistribute information from another routing protocol. + type: list + elements: dict + suboptions: + protocol: + description: + - Specifies the protocol for configuring redistribute information. + type: str + choices: ['ospf', 'static', 'connected'] + required: True + metric: + description: + - Specifies the metric for redistributed routes. + type: str + route_map: + description: + - Specifies the route map reference. + type: str + advertise_pip: + description: + - Enables advertise PIP + type: bool + advertise_pip_ip: + description: + - PIP IPv4 address + type: str + advertise_pip_peer_ip: + description: + - PIP peer IPv4 address + type: str + advertise_svi_ip: + description: + - Enables advertise SVI MACIP routes + type: bool + route_advertise_list: + description: + - List of advertise routes + type: list + elements: dict + suboptions: + advertise_afi: + required: True + type: str + choices: + - ipv4 + - ipv6 + description: + - Specifies the address family + route_map: + type: str + description: + - Specifies the route-map reference + advertise_default_gw: + description: + - Specifies the advertise default gateway flag. + type: bool + advertise_all_vni: + description: + - Specifies the advertise all vni flag. + type: bool + max_path: + description: + - Specifies the maximum paths of ibgp and ebgp count. + type: dict + suboptions: + ibgp: + description: + - Specifies the count of the ibgp multipaths count. + type: int + ebgp: + description: + - Specifies the count of the ebgp multipaths count. + type: int + state: + description: + - Specifies the operation to be performed on the BGP_AF process configured on the device. + - In case of merged, the input configuration is merged with the existing BGP_AF configuration on the device. + - In case of deleted, the existing BGP_AF configuration is removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#do show running-configuration bgp +#! +#router bgp 51 +# router-id 111.2.2.41 +# timers 60 180 +# ! +# address-family ipv4 unicast +# maximum-paths 1 +# maximum-paths ibgp 1 +# dampening +# ! +# address-family ipv6 unicast +# redistribute connected route-map bb metric 21 +# redistribute ospf route-map aa metric 27 +# redistribute static route-map bb metric 26 +# maximum-paths 4 +# maximum-paths ibgp 5 +# ! +# address-family l2vpn evpn +# advertise-svi-ip +# advertise ipv6 unicast route-map aa +# advertise-pip ip 1.1.1.1 peer-ip 2.2.2.2 +#! +# +- name: Delete BGP Address family configuration from the device + dellemc.enterprise_sonic.sonic_bgp_af: + config: + - bgp_as: 51 + address_family: + afis: + - afi: l2vpn + safi: evpn + advertise_pip: True + advertise_pip_ip: "1.1.1.1" + advertise_pip_peer_ip: "2.2.2.2" + advertise_svi_ip: True + advertise_all_vni: False + advertise_default_gw: False + route_advertise_list: + - advertise_afi: ipv6 + route_map: aa + - afi: ipv4 + safi: unicast + - afi: ipv6 + safi: unicast + max_path: + ebgp: 2 + ibgp: 5 + redistribute: + - metric: "21" + protocol: connected + route_map: bb + - metric: "27" + protocol: ospf + route_map: aa + - metric: "26" + protocol: static + route_map: bb + state: deleted + +# After state: +# ------------ +# +#do show running-configuration bgp +#! +#router bgp 51 +# router-id 111.2.2.41 +# timers 60 180 +# ! +# address-family ipv6 unicast +# ! +# address-family l2vpn evpn +# +# Using deleted +# +# Before state: +# ------------- +# +#do show running-configuration bgp +#! +#router bgp 51 +# router-id 111.2.2.41 +# timers 60 180 +# ! +# address-family ipv6 unicast +# ! +# address-family l2vpn evpn +# +- name: Delete All BGP address family configurations + dellemc.enterprise_sonic.sonic_bgp_af: + config: + state: deleted + + +# After state: +# ------------ +# +#do show running-configuration bgp +#! +#router bgp 51 +# router-id 111.2.2.41 +# timers 60 180 +# +# Using merged +# +# Before state: +# ------------- +# +#do show running-configuration bgp +#! +#router bgp 51 +# router-id 111.2.2.41 +# timers 60 180 +# ! +# address-family l2vpn evpn +# +- name: Merge provided BGP address family configuration on the device. + dellemc.enterprise_sonic.sonic_bgp_af: + config: + - bgp_as: 51 + address_family: + afis: + - afi: l2vpn + safi: evpn + advertise_pip: True + advertise_pip_ip: "3.3.3.3" + advertise_pip_peer_ip: "4.4.4.4" + advertise_svi_ip: True + advertise_all_vni: False + advertise_default_gw: False + route_advertise_list: + - advertise_afi: ipv4 + route_map: bb + - afi: ipv4 + safi: unicast + network: + - 2.2.2.2/16 + - 192.168.10.1/32 + dampening: True + - afi: ipv6 + safi: unicast + max_path: + ebgp: 4 + ibgp: 5 + redistribute: + - metric: "21" + protocol: connected + route_map: bb + - metric: "27" + protocol: ospf + route_map: aa + - metric: "26" + protocol: static + route_map: bb + state: merged +# After state: +# ------------ +# +#do show running-configuration bgp +#! +#router bgp 51 +# router-id 111.2.2.41 +# timers 60 180 +# ! +# address-family ipv4 unicast +# network 2.2.2.2/16 +# network 192.168.10.1/32 +# dampening +# ! +# address-family ipv6 unicast +# redistribute connected route-map bb metric 21 +# redistribute ospf route-map aa metric 27 +# redistribute static route-map bb metric 26 +# maximum-paths 4 +# maximum-paths ibgp 5 +# ! +# address-family l2vpn evpn +# advertise-svi-ip +# advertise ipv4 unicast route-map bb +# advertise-pip ip 3.3.3.3 peer-ip 4.4.4.4 +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_af.bgp_af import Bgp_afArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_af.bgp_af import Bgp_af + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Bgp_afArgs.argument_spec, + supports_check_mode=True) + + result = Bgp_af(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py new file mode 100644 index 00000000..bd2ff74a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_bgp_as_paths +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_bgp_as_paths +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage BGP autonomous system path (or as-path-list) and its parameters +description: + - This module provides configuration management of BGP bgp_as_paths for devices + running Enterprise SONiC Distribution by Dell Technologies. +author: Kumaraguru Narayanan (@nkumaraguru) +options: + config: + description: A list of 'bgp_as_paths' configurations. + type: list + elements: dict + suboptions: + name: + required: True + type: str + description: + - Name of as-path-list. + members: + required: False + type: list + elements: str + description: + - Members of this BGP as-path; regular expression string can be provided. + permit: + required: False + type: bool + description: + - Permits or denies this as path. + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted + +# Before state: +# ------------- +# +# show bgp as-path-access-list +# AS path list test: +# action: permit +# members: 808.*,909.* + +- name: Delete BGP as path list + dellemc.enterprise_sonic.sonic_bgp_as_paths: + config: + - name: test + members: + - 909.* + permit: true + state: deleted + +# After state: +# ------------ +# +# show bgp as-path-access-list +# AS path list test: +# action: +# members: 808.* + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp as-path-access-list +# AS path list test: +# action: permit +# members: 808.*,909.* +# AS path list test1: +# action: deny +# members: 608.*,709.* + +- name: Deletes BGP as-path list + dellemc.enterprise_sonic.sonic_bgp_as_paths: + config: + - name: test + members: + state: deleted + +# After state: +# ------------ +# +# show bgp as-path-access-list +# AS path list test1: +# action: deny +# members: 608.*,709.* + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp as-path-access-list +# AS path list test: +# action: permit +# members: 808.*,909.* + +- name: Deletes BGP as-path list + dellemc.enterprise_sonic.sonic_bgp_as_paths: + config: + state: deleted + +# After state: +# ------------ +# +# show bgp as-path-access-list +# (No bgp as-path-access-list configuration present) + + +# Using merged + +# Before state: +# ------------- +# +# show bgp as-path-access-list +# AS path list test: + +- name: Adds 909.* to test as-path list + dellemc.enterprise_sonic.sonic_bgp_as_paths: + config: + - name: test + members: + - 909.* + permit: true + state: merged + +# After state: +# ------------ +# +# show bgp as-path-access-list +# AS path list test: +# action: permit +# members: 909.* + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_as_paths.bgp_as_paths import Bgp_as_pathsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_as_paths.bgp_as_paths import Bgp_as_paths + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Bgp_as_pathsArgs.argument_spec, + supports_check_mode=True) + + result = Bgp_as_paths(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py new file mode 100644 index 00000000..08c8dcc7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_bgp_communities +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: sonic_bgp_communities +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage BGP community and its parameters +description: + - This module provides configuration management of BGP bgp_communities for device + running Enterprise SONiC Distribution by Dell Technologies. +author: Kumaraguru Narayanan (@nkumaraguru) +options: + config: + description: A list of 'bgp_communities' configurations. + type: list + elements: dict + suboptions: + name: + required: True + type: str + description: + - Name of the BGP communitylist. + type: + type: str + description: + - Whether it is a standard or expanded community-list entry. + required: False + choices: + - standard + - expanded + default: standard + permit: + required: False + type: bool + description: + - Permits or denies this community. + aann: + required: False + type: str + description: + - Community number aa:nn format 0..65535:0..65535; applicable for standard BGP community type. + local_as: + required: False + type: bool + description: + - Do not send outside local AS (well-known community); applicable for standard BGP community type. + no_advertise: + required: False + type: bool + description: + - Do not advertise to any peer (well-known community); applicable for standard BGP community type. + no_export: + required: False + type: bool + description: + - Do not export to next AS (well-known community); applicable for standard BGP community type. + no_peer: + required: False + type: bool + description: + - Do not export to next AS (well-known community); applicable for standard BGP community type. + members: + required: False + type: dict + suboptions: + regex: + type: list + elements: str + required: False + description: + - Members of this BGP community list. Regular expression string can be given here. Applicable for expanded BGP community type. + description: + - Members of this BGP community list. + match: + required: False + type: str + description: + - Matches any/all of the members. + choices: + - ALL + - ANY + default: ANY + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted + +# Before state: +# ------------- +# +# show bgp community-list +# Standard community list test: match: ANY +# 101 +# 201 +# Standard community list test1: match: ANY +# 301 + +- name: Deletes BGP community member + dellemc.enterprise_sonic.sonic_bgp_communities: + config: + - name: test + members: + regex: + - 201 + state: deleted + +# After state: +# ------------ +# +# show bgp community-list +# Standard community list test: match: ANY +# 101 +# Standard community list test1: match: ANY +# 301 + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp community-list +# Standard community list test: match: ANY +# 101 +# Expanded community list test1: match: ANY +# 201 + +- name: Deletes a single BGP community + dellemc.enterprise_sonic.sonic_bgp_communities: + config: + - name: test + members: + state: deleted + +# After state: +# ------------ +# +# show bgp community-list +# Expanded community list test1: match: ANY +# 201 + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp community-list +# Standard community list test: match: ANY +# 101 +# Expanded community list test1: match: ANY +# 201 + +- name: Delete All BGP communities + dellemc.enterprise_sonic.sonic_bgp_communities: + config: + state: deleted + +# After state: +# ------------ +# +# show bgp community-list +# + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp community-list +# Standard community list test: match: ANY +# 101 +# Expanded community list test1: match: ANY +# 201 + +- name: Deletes all members in a single BGP community + dellemc.enterprise_sonic.sonic_bgp_communities: + config: + - name: test + members: + regex: + state: deleted + +# After state: +# ------------ +# +# show bgp community-list +# Expanded community list test: match: ANY +# Expanded community list test1: match: ANY +# 201 + + +# Using merged + +# Before state: +# ------------- +# +# show bgp as-path-access-list +# AS path list test: + +- name: Adds 909.* to test as-path list + dellemc.enterprise_sonic.sonic_bgp_as_paths: + config: + - name: test + members: + - 909.* + state: merged + +# After state: +# ------------ +# +# show bgp as-path-access-list +# AS path list test: +# members: 909.* + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration that is returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration that is returned is always in the same format + of the parameters above. +commands: + description: The set of commands that are pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_communities.bgp_communities import Bgp_communitiesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_communities.bgp_communities import Bgp_communities + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Bgp_communitiesArgs.argument_spec, + supports_check_mode=True) + + result = Bgp_communities(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py new file mode 100644 index 00000000..c2af0c48 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_bgp_ext_communities +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_bgp_ext_communities +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage BGP extended community-list and its parameters +description: + - This module provides configuration management of BGP extcommunity-list for devices running + Enterprise SONiC Distribution by Dell Technologies. +author: Kumaraguru Narayanan (@nkumaraguru) +options: + config: + description: A list of 'bgp_extcommunity_list' configurations. + type: list + elements: dict + suboptions: + name: + required: True + type: str + description: + - Name of the BGP ext communitylist. + type: + type: str + description: + - Whether it is a standard or expanded ext community_list entry. + required: False + choices: + - standard + - expanded + default: standard + permit: + required: False + type: bool + description: + - Permits or denies this community. + members: + required: False + type: dict + suboptions: + regex: + type: list + elements: str + required: False + description: + - Members of this BGP ext community list. Regular expression string can be given here. Applicable for expanded ext BGP community type. + route_target: + type: list + elements: str + required: False + description: + - Members of this BGP ext community list. The format of route_target is in either 0..65535:0..65535 or A.B.C.D:[1..65535] format. + route_origin: + type: list + elements: str + required: False + description: + - Members of this BGP ext community list. The format of route_origin is in either 0..65535:0..65535 or A.B.C.D:[1..65535] format. + description: + - Members of this BGP ext community list. + match: + required: False + type: str + description: + - Matches any/all of the the members. + choices: + - all + - any + default: any + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted + +# Before state: +# ------------- +# +# show bgp ext-community-list +# Standard extended community list test: match: ANY +# rt:101:101 +# rt:201:201 + +- name: Deletes a BGP ext community member + dellemc.enterprise_sonic.sonic_bgp_ext_communities: + config: + - name: test + members: + regex: + - 201:201 + state: deleted + +# After state: +# ------------ +# +# show bgp ext-community-list +# Standard extended community list test: match: ANY +# rt:101:101 +# + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp ext-community-list +# Standard extended community list test: match: ANY +# 101 +# Expanded extended community list test1: match: ANY +# 201 + +- name: Deletes a single BGP extended community + dellemc.enterprise_sonic.sonic_bgp_ext_communities: + config: + - name: test1 + members: + state: deleted + +# After state: +# ------------ +# +# show bgp ext-community-list +# Standard extended community list test: match: ANY +# 101 +# + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp ext-community-list +# Standard extended community list test: match: ANY +# 101 +# Expanded extended community list test1: match: ANY +# 201 + +- name: Deletes all BGP extended communities + dellemc.enterprise_sonic.sonic_bgp_ext_communities: + config: + state: deleted + +# After state: +# ------------ +# +# show bgp ext-community-list +# + + +# Using deleted + +# Before state: +# ------------- +# +# show bgp ext-community-list +# Standard extended community list test: match: ANY +# 101 +# Expanded extended community list test1: match: ANY +# 201 + +- name: Deletes all members in a single BGP extended community + dellemc.enterprise_sonic.sonic_bgp_ext_communities: + config: + - name: test1 + members: + regex: + state: deleted + +# After state: +# ------------ +# +# show bgp ext-community-list +# Standard extended community list test: match: ANY +# 101 +# Expanded extended community list test1: match: ANY +# + + +# Using merged + +# Before state: +# ------------- +# +# show bgp as-path-access-list +# AS path list test: + +- name: Adds 909.* to test as-path list + dellemc.enterprise_sonic.sonic_bgp_as_paths: + config: + - name: test + members: + - 909.* + state: merged + +# After state: +# ------------ +# +# show bgp as-path-access-list +# AS path list test: +# members: 909.* + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_ext_communities.bgp_ext_communities import ( + Bgp_ext_communitiesArgs, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_ext_communities.bgp_ext_communities import Bgp_ext_communities + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Bgp_ext_communitiesArgs.argument_spec, + supports_check_mode=True) + + result = Bgp_ext_communities(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py new file mode 100644 index 00000000..19aeb6fc --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py @@ -0,0 +1,1112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_bgp_neighbors +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_bgp_neighbors +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage a BGP neighbor and its parameters +description: + - This module provides configuration management of global BGP_NEIGHBORS parameters on devices running Enterprise SONiC. + - bgp_as and vrf_name must be created on the device in advance. +author: Abirami N (@abirami-n) +options: + config: + description: Specifies the BGP neighbors related configuration. + type: list + elements: dict + suboptions: + bgp_as: + description: + - Specifies the BGP autonomous system (AS) number which is already configured on the device. + type: str + required: True + vrf_name: + description: + - Specifies the VRF name which is already configured on the device. + default: default + type: str + peer_group: + description: Specifies the list of peer groups. + type: list + elements: dict + suboptions: + name: + description: Name of the peer group. + type: str + required: True + remote_as: + description: + - Remote AS of the BGP peer group to configure. + - peer_as and peer_type are mutually exclusive. + type: dict + suboptions: + peer_as: + description: + - Specifies remote AS number. + - The range is from 1 to 4294967295. + type: int + peer_type: + description: + - Specifies the type of BGP peer. + type: str + choices: + - internal + - external + bfd: + description: + - Enables or disables BFD. + type: dict + suboptions: + enabled: + description: + - Enables BFD liveliness check for a BGP peer. + type: bool + check_failure: + description: + - Link dataplane status with control plane. + type: bool + profile: + description: + - BFD Profile name. + type: str + advertisement_interval: + description: + - Specifies the minimum interval between sending BGP routing updates. + - The range is from 0 to 600. + type: int + timers: + description: + - Specifies BGP peer group timer related configurations. + type: dict + suboptions: + keepalive: + description: + - Frequency with which the device sends keepalive messages to its peer, in seconds. + - The range is from 0 to 65535. + type: int + holdtime: + description: + - Interval after not receiving a keepalive message that Enterprise SONiC declares a peer dead, in seconds. + - The range is from 0 to 65535. + type: int + connect_retry: + description: + - Time interval in seconds between attempts to establish a session with the peer. + - The range is from 1 to 65535. + type: int + capability: + description: + - Specifies capability attributes to this peer group. + type: dict + suboptions: + dynamic: + description: + - Enables or disables dynamic capability to this peer group. + type: bool + extended_nexthop: + description: + - Enables or disables advertise extended next-hop capability to the peer. + type: bool + auth_pwd: + description: + - Configuration for peer group authentication password. + type: dict + suboptions: + pwd: + description: + - Authentication password for the peer group. + type: str + required: True + encrypted: + description: + - Indicates whether the password is encrypted text. + type: bool + default: False + pg_description: + description: + - A textual description of the peer group. + type: str + disable_connected_check: + description: + - Disables EBGP conntected route check. + type: bool + dont_negotiate_capability: + description: + - Disables capability negotiation. + type: bool + ebgp_multihop: + description: + - Allow EBGP peers not on directly connected networks. + type: dict + suboptions: + enabled: + description: + - Enables the referenced group or peers to be indirectly connected. + type: bool + default: False + multihop_ttl: + description: + - Time-to-live value to use when packets are sent to the referenced group or peers and ebgp-multihop is enabled. + type: int + enforce_first_as: + description: + - Enforces the first AS for EBGP routes. + type: bool + enforce_multihop: + description: + - Enforces EBGP multihop performance for peer. + type: bool + local_address: + description: + - Set the local IP address to use for the session when sending BGP update messages. + type: str + local_as: + description: + - Specifies local autonomous system number. + type: dict + suboptions: + as: + description: + - Local autonomous system number. + type: int + required: True + no_prepend: + description: + - Do not prepend the local-as number in AS-Path advertisements. + type: bool + replace_as: + description: + - Replace the configured AS Number with the local-as number in AS-Path advertisements. + type: bool + override_capability: + description: + - Override capability negotiation result. + type: bool + passive: + description: + - Do not send open messages to this peer. + type: bool + default: False + shutdown_msg: + description: + - Add a shutdown message. + type: str + solo: + description: + - Indicates that routes advertised by the peer should not be reflected back to the peer. + type: bool + strict_capability_match: + description: + - Enables strict capability negotiation match. + type: bool + ttl_security: + description: + - Enforces only the peers that are specified number of hops away will be allowed to become peers. + type: int + address_family: + description: + - Holds of list of address families associated to the peergroup. + type: dict + suboptions: + afis: + description: + - List of address families with afi, safi, activate and allowas-in parameters. + - afi and safi are required together. + type: list + elements: dict + suboptions: + afi: + description: + - Holds afi mode. + type: str + choices: + - ipv4 + - ipv6 + - l2vpn + safi: + description: + - Holds safi mode. + type: str + choices: + - unicast + - evpn + activate: + description: + - Enable or disable activate. + type: bool + allowas_in: + description: + - Holds AS value. + - The origin and value are mutually exclusive. + type: dict + suboptions: + origin: + description: + - Set AS as the origin. + type: bool + value: + description: + - Holds AS number in the range 1-10. + type: int + ip_afi: + description: + - Common configuration attributes for IPv4 and IPv6 unicast address families. + type: dict + suboptions: + default_policy_name: + description: + - Specifies routing policy definition. + type: str + send_default_route: + description: + - Enable or disable sending of default-route to the peer. + type: bool + default: False + prefix_limit: + description: + - Specifies prefix limit attributes. + type: dict + suboptions: + max_prefixes: + description: + - Maximum number of prefixes that will be accepted from the peer. + type: int + prevent_teardown: + description: + - Enable or disable teardown of BGP session when maximum prefix limit is exceeded. + type: bool + default: False + warning_threshold: + description: + - Threshold on number of prefixes that can be received from a peer before generation of warning messages. + - Expressed as a percentage of max-prefixes. + type: int + restart_timer: + description: + - Time interval in seconds after which the BGP session is re-established after being torn down. + type: int + prefix_list_in: + description: + - Inbound route filtering policy for a peer. + type: str + prefix_list_out: + description: + - Outbound route filtering policy for a peer. + type: str + neighbors: + description: Specifies BGP neighbor-related configurations. + type: list + elements: dict + suboptions: + neighbor: + description: + - Neighbor router address. + type: str + required: True + remote_as: + description: + - Remote AS of the BGP neighbor to configure. + - peer_as and peer_type are mutually exclusive. + type: dict + suboptions: + peer_as: + description: + - Specifies remote AS number. + - The range is from 1 to 4294967295. + type: int + peer_type: + description: + - Specifies the type of BGP peer. + type: str + choices: + - internal + - external + bfd: + description: + - Enables or disables BFD. + type: dict + suboptions: + enabled: + description: + - Enables BFD liveliness check for a BGP neighbor. + type: bool + check_failure: + description: + - Link dataplane status with control plane. + type: bool + profile: + description: + - BFD Profile name. + type: str + advertisement_interval: + description: + - Specifies the minimum interval between sending BGP routing updates. + - The range is from 0 to 600. + type: int + peer_group: + description: + - The name of the peer group that the neighbor is a member of. + type: str + timers: + description: + - Specifies BGP neighbor timer-related configurations. + type: dict + suboptions: + keepalive: + description: + - Frequency with which the device sends keepalive messages to its peer, in seconds. + - The range is from 0 to 65535. + type: int + holdtime: + description: + - Interval after not receiving a keepalive message that SONiC declares a peer dead, in seconds. + - The range is from 0 to 65535. + type: int + connect_retry: + description: + - Time interval in seconds between attempts to establish a session with the peer. + - The range is from 1 to 65535. + type: int + capability: + description: + - Specifies capability attributes to this neighbor. + type: dict + suboptions: + dynamic: + description: + - Enables or disables dynamic capability to this neighbor. + type: bool + extended_nexthop: + description: + - Enables or disables advertise extended next-hop capability to the peer. + type: bool + auth_pwd: + description: + - Configuration for neighbor group authentication password. + type: dict + suboptions: + pwd: + description: + - Authentication password for the neighbor group. + type: str + required: True + encrypted: + description: + - Indicates whether the password is encrypted text. + type: bool + default: False + nbr_description: + description: + - A textual description of the neighbor. + type: str + disable_connected_check: + description: + - Disables EBGP conntected route check. + type: bool + dont_negotiate_capability: + description: + - Disables capability negotiation. + type: bool + ebgp_multihop: + description: + - Allow EBGP neighbors not on directly connected networks. + type: dict + suboptions: + enabled: + description: + - Enables the referenced group or neighbors to be indirectly connected. + type: bool + default: False + multihop_ttl: + description: + - Time-to-live value to use when packets are sent to the referenced group or neighbors and ebgp-multihop is enabled. + type: int + enforce_first_as: + description: + - Enforces the first AS for EBGP routes. + type: bool + enforce_multihop: + description: + - Enforces EBGP multihop performance for neighbor. + type: bool + local_address: + description: + - Set the local IP address to use for the session when sending BGP update messages. + type: str + local_as: + description: + - Specifies local autonomous system number. + type: dict + suboptions: + as: + description: + - Local autonomous system number. + type: int + required: True + no_prepend: + description: + - Do not prepend the local-as number in AS-Path advertisements. + type: bool + replace_as: + description: + - Replace the configured AS Number with the local-as number in AS-Path advertisements. + type: bool + override_capability: + description: + - Override capability negotiation result. + type: bool + passive: + description: + - Do not send open messages to this neighbor. + type: bool + default: False + port: + description: + - Neighbor's BGP port. + type: int + shutdown_msg: + description: + - Add a shutdown message. + type: str + solo: + description: + - Indicates that routes advertised by the peer should not be reflected back to the peer. + type: bool + strict_capability_match: + description: + - Enables strict capability negotiation match. + type: bool + ttl_security: + description: + - Enforces only the neighbors that are specified number of hops away will be allowed to become neighbors. + type: int + v6only: + description: + - Enables BGP with v6 link-local only. + type: bool + + state: + description: + - Specifies the operation to be performed on the BGP process that is configured on the device. + - In case of merged, the input configuration is merged with the existing BGP configuration on the device. + - In case of deleted, the existing BGP configuration is removed from the device. + default: merged + type: str + choices: + - merged + - deleted +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +#router bgp 11 vrf VrfCheck2 +# network import-check +# timers 60 180 +#! +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +# ! +# neighbor interface Eth1/3 +#! +#router bgp 11 +# network import-check +# timers 60 180 +# ! +# neighbor 192.168.1.4 +# ! +# peer-group SP1 +# bfd +# capability dynamic +# ! +# peer-group SP2 +# ! +# +- name: Deletes all BGP neighbors + dellemc.enterprise_sonic.sonic_bgp_neighbors: + config: + state: deleted + +# +# After state: +# ------------- +#router bgp 11 vrf VrfCheck2 +# network import-check +# timers 60 180 +#! +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +#! +#router bgp 11 +# network import-check +# timers 60 180 +# ! +# +# Using merged +# +# Before state: +# ------------ +#router bgp 11 vrf VrfCheck2 +# network import-check +# timers 60 180 +#! +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +#! +#router bgp 11 +# network import-check +# timers 60 180 +# ! + +- name: "Adds sonic_bgp_neighbors" + dellemc.enterprise_sonic.sonic_bgp_neighbors: + config: + - bgp_as: 51 + neighbors: + - neighbor: Eth1/2 + auth_pwd: + pwd: 'pw123' + encrypted: false + dont_negotiate_capability: true + ebgp_multihop: + enabled: true + multihop_ttl: 1 + enforce_first_as: true + enforce_multihop: true + local_address: 'Ethernet4' + local_as: + as: 2 + no_prepend: true + replace_as: true + nbr_description: "description 1" + override_capability: true + passive: true + port: 3 + shutdown_msg: 'msg1' + solo: true + - neighbor: 1.1.1.1 + disable_connected_check: true + ttl_security: 5 + - bgp_as: 51 + vrf_name: VrfReg1 + peer_group: + - name: SPINE + bfd: + check_failure: true + enabled: true + profile: 'profile 1' + capability: + dynamic: true + extended_nexthop: true + auth_pwd: + pwd: 'U2FsdGVkX1/4sRsZ624wbAJfDmagPLq2LsGDOcW/47M=' + encrypted: true + dont_negotiate_capability: true + ebgp_multihop: + enabled: true + multihop_ttl: 1 + enforce_first_as: true + enforce_multihop: true + local_address: 'Ethernet4' + local_as: + as: 2 + no_prepend: true + replace_as: true + pg_description: 'description 1' + override_capability: true + passive: true + solo: true + remote_as: + peer_as: 4 + - name: SPINE1 + disable_connected_check: true + shutdown_msg: "msg1" + strict_capability_match: true + timers: + keepalive: 30 + holdtime: 15 + connect_retry: 25 + ttl_security: 5 + address_family: + afis: + - afi: ipv4 + safi: unicast + activate: true + allowas_in: + origin: true + - afi: ipv6 + safi: unicast + activate: true + allowas_in: + value: 5 + neighbors: + - neighbor: Eth1/3 + remote_as: + peer_as: 10 + peer_group: SPINE + advertisement_interval: 15 + timers: + keepalive: 30 + holdtime: 15 + connect_retry: 25 + bfd: + check_failure: true + enabled: true + profile: 'profile 1' + capability: + dynamic: true + extended_nexthop: true + auth_pwd: + pwd: 'U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg=' + encrypted: true + nbr_description: 'description 2' + strict_capability_match: true + v6only: true + - neighbor: 192.168.1.4 + state: merged +# +# After state: +# ------------ +#! +#router bgp 11 vrf VrfCheck2 +# network import-check +# timers 60 180 +#! +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +# ! +# peer-group SPINE1 +# timers 15 30 +# timers connect 25 +# shutdown message msg1 +# disable-connected-check +# strict-capability-match +# ttl-security hops 5 +# ! +# peer-group SPINE +# description "description 1" +# ebgp-multihop 1 +# remote-as 4 +# bfd check-control-plane-failure profile "profile 1" +# update-source interface Ethernet4 +# capability dynamic +# capability extended-nexthop +# dont-capability-negotiate +# enforce-first-as +# enforce-multihop +# local-as 2 no-prepend replace-as +# override-capability +# passive +# password U2FsdGVkX1/4sRsZ624wbAJfDmagPLq2LsGDOcW/47M= encrypted +# solo +# address-family ipv4 unicast +# activate +# allowas-in origin +# send-community both +# ! +# address-family ipv6 unicast +# activate +# allowas-in 5 +# send-community both +# ! +# neighbor interface Eth1/3 +# description "description 2" +# peer-group SPINE +# remote-as 10 +# timers 15 30 +# timers connect 25 +# bfd check-control-plane-failure profile "profile 1" +# advertisement-interval 15 +# capability extended-nexthop +# capability dynamic +# v6only +# password U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg= encrypted +# strict-capability-match +# ! +# neighbor 192.168.1.4 +#! +# router bgp 51 +# timers 60 180 +# neighbor interface Eth1/2 +# description "description 1" +# shutdown message msg1 +# ebgp-multihop 1 +# remote-as external +# update-source interface Ethernet4 +# dont-capability-negotiate +# enforce-first-as +# enforce-multihop +# local-as 2 no-prepend replace-as +# override-capability +# passive +# password U2FsdGVkX1+bxMf9TKOhaXRNNaHmywiEVDF2lJ2c000= encrypted +# port 3 +# solo +# neighbor 1.1.1.1 +# disable-connected-check +# ttl-security hops 5 +#router bgp 11 +# network import-check +# timers 60 180 +# +# Using deleted +# +# Before state: +# ------------ +#! +#router bgp 11 vrf VrfCheck2 +# network import-check +# timers 60 180 +#! +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +# ! +# peer-group SPINE +# bfd +# remote-as 4 +# ! +# neighbor interface Eth1/3 +# peer-group SPINE +# remote-as 10 +# timers 15 30 +# advertisement-interval 15 +# bfd +# capability extended-nexthop +# capability dynamic +# ! +# neighbor 192.168.1.4 +#! +#router bgp 11 +# network import-check +# timers 60 18 +# ! +# peer-group SP +# ! +# neighbor interface Eth1/3 +# +- name: "Deletes sonic_bgp_neighbors and peer-groups specific to vrfname" + dellemc.enterprise_sonic.sonic_bgp_neighbors: + config: + - bgp_as: 51 + vrf_name: VrfReg1 + state: deleted + +# After state: +# ------------ +#! +#router bgp 11 vrf VrfCheck2 +# network import-check +# timers 60 180 +#! +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +# ! +#router bgp 11 +# network import-check +# timers 60 18 +# ! +# peer-group SP +# ! +# neighbor interface Eth1/3 +# +# Using deleted +# +# Before state: +# ------------- +# +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +# ! +# peer-group SPINE +# bfd +# remote-as 4 +# ! +# neighbor interface Eth1/3 +# peer-group SPINE +# remote-as 10 +# timers 15 30 +# advertisement-interval 15 +# bfd +# capability extended-nexthop +# capability dynamic +# ! +# neighbor 192.168.1.4 +# ! + +- name: "Deletes specific sonic_bgp_neighbors" + dellemc.enterprise_sonic.sonic_bgp_neighbors: + config: + - bgp_as: 51 + neighbors: + - neighbor: Eth1/2 + auth_pwd: + pwd: 'pw123' + encrypted: false + dont_negotiate_capability: true + ebgp_multihop: + enabled: true + multihop_ttl: 1 + enforce_first_as: true + enforce_multihop: true + local_address: 'Ethernet4' + local_as: + as: 2 + no_prepend: true + replace_as: true + nbr_description: 'description 1' + override_capability: true + passive: true + port: 3 + shutdown_msg: 'msg1' + solo: true + - neighbor: 1.1.1.1 + disable_connected_check: true + ttl_security: 5 + - bgp_as: 51 + vrf_name: VrfReg1 + peer_group: + - name: SPINE + bfd: + check_failure: true + enabled: true + profile: 'profile 1' + capability: + dynamic: true + extended_nexthop: true + auth_pwd: + pwd: 'U2FsdGVkX1/4sRsZ624wbAJfDmagPLq2LsGDOcW/47M=' + encrypted: true + dont_negotiate_capability: true + ebgp_multihop: + enabled: true + multihop_ttl: 1 + enforce_first_as: true + enforce_multihop: true + local_address: 'Ethernet4' + local_as: + as: 2 + no_prepend: true + replace_as: true + pg_description: 'description 1' + override_capability: true + passive: true + solo: true + remote_as: + peer_as: 4 + - name: SPINE1 + disable_connected_check: true + shutdown_msg: "msg1" + strict_capability_match: true + timers: + keepalive: 30 + holdtime: 15 + connect_retry: 25 + ttl_security: 5 + neighbors: + - neighbor: Eth1/3 + remote_as: + peer_as: 10 + peer_group: SPINE + advertisement_interval: 15 + timers: + keepalive: 30 + holdtime: 15 + connect_retry: 25 + bfd: + check_failure: true + enabled: true + profile: 'profile 1' + capability: + dynamic: true + extended_nexthop: true + auth_pwd: + pwd: 'U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg=' + encrypted: true + nbr_description: 'description 2' + strict_capability_match: true + v6only: true + - neighbor: 192.168.1.4 + state: deleted +# +# After state: +# ------------- +# +#router bgp 51 vrf VrfReg1 +# network import-check +# timers 60 180 +# ! +# peer-group SPINE1 +# ! +# peer-group SPINE +# ! +# neighbor interface Eth1/3 +# ! +# neighbor interface Eth1/2 +# neighbor 1.1.1.1 +# +# Using merged +# +# Before state: +# ------------- +# +# sonic# show running-configuration bgp peer-group vrf default +# (No bgp peer-group configuration present) + +- name: "Configure BGP peer-group prefix-list attributes" + dellemc.enterprise_sonic.sonic_bgp_neighbors: + config: + - bgp_as: 51 + peer_group: + - name: SPINE + address_family: + afis: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: true + prefix_limit: + max_prefixes: 1 + prevent_teardown: true + warning_threshold: 80 + prefix_list_in: p1 + prefix_list_out: p2 + state: merged + +# After state: +# ------------ +# +# sonic# show running-configuration bgp peer-group vrf default +# ! +# peer-group SPINE +# ! +# address-family ipv4 unicast +# default-originate route-map rmap_reg1 +# prefix-list p1 in +# prefix-list p2 out +# send-community both +# maximum-prefix 1 80 warning-only +# +# Using deleted +# +# Before state: +# ------------- +# +# sonic# show running-configuration bgp peer-group vrf default +# ! +# peer-group SPINE +# ! +# address-family ipv6 unicast +# default-originate route-map rmap_reg2 +# prefix-list p1 in +# prefix-list p2 out +# send-community both +# maximum-prefix 5 90 restart 2 + +- name: "Delete BGP peer-group prefix-list attributes" + dellemc.enterprise_sonic.sonic_bgp_neighbors: + config: + - bgp_as: 51 + peer_group: + - name: SPINE + address_family: + afis: + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: true + prefix_limit: + max_prefixes: 5 + warning_threshold: 90 + restart-timer: 2 + prefix_list_in: p1 + prefix_list_out: p2 + state: deleted + +# sonic# show running-configuration bgp peer-group vrf default +# (No bgp peer-group configuration present) +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_neighbors.bgp_neighbors import Bgp_neighborsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_neighbors.bgp_neighbors import Bgp_neighbors + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Bgp_neighborsArgs.argument_spec, + supports_check_mode=True) + + result = Bgp_neighbors(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py new file mode 100644 index 00000000..10400cfe --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py @@ -0,0 +1,451 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_bgp_neighbors_af +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: sonic_bgp_neighbors_af +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Niraimadaiselvam M (@niraimadaiselvamm) +short_description: Manage the BGP neighbor address-family and its parameters +description: + - This module provides configuration management of BGP neighbors address-family parameters on devices running Enterprise SONiC. + - bgp_as, vrf_name and neighbors need be created in advance on the device. +options: + config: + description: + - Specifies the BGP neighbors address-family related configuration. + type: list + elements: dict + suboptions: + bgp_as: + description: + - Specifies the BGP autonomous system (AS) number which is already configured on the device. + type: str + required: true + vrf_name: + description: + - Specifies the VRF name which is already configured on the device. + type: str + default: 'default' + neighbors: + description: + - Specifies BGP neighbor related configurations in address-family configuration mode. + type: list + elements: dict + suboptions: + neighbor: + description: + - Neighbor router address which is already configured on the device. + type: str + required: True + address_family: + description: + - Specifies BGP address-family related configurations. + - afi and safi are required together. + type: list + elements: dict + suboptions: + afi: + description: + - Type of address-family to configure. + type: str + choices: + - ipv4 + - ipv6 + - l2vpn + required: True + safi: + description: + - Specifies the type of cast for the address-family. + type: str + choices: + - unicast + - evpn + default: unicast + activate: + description: + - Enables the address-family for this neighbor. + type: bool + allowas_in: + description: + - Specifies the allowas in values. + type: dict + suboptions: + value: + description: + - Specifies the value of the allowas in. + type: int + origin: + description: + - Specifies the origin value. + type: bool + ip_afi: + description: + - Common configuration attributes for IPv4 and IPv6 unicast address families. + type: dict + suboptions: + default_policy_name: + description: + - Specifies routing policy definition. + type: str + send_default_route: + description: + - Enable or disable sending of default-route to the neighbor. + type: bool + default: False + prefix_limit: + description: + - Specifies prefix limit attributes. + type: dict + suboptions: + max_prefixes: + description: + - Maximum number of prefixes that will be accepted from the neighbor. + type: int + prevent_teardown: + description: + - Enable or disable teardown of BGP session when maximum prefix limit is exceeded. + type: bool + default: False + warning_threshold: + description: + - Threshold on number of prefixes that can be received from a neighbor before generation of warning messages. + - Expressed as a percentage of max-prefixes. + type: int + restart_timer: + description: + - Time interval in seconds after which the BGP session is re-established after being torn down. + type: int + prefix_list_in: + description: + - Inbound route filtering policy for a neighbor. + type: str + prefix_list_out: + description: + - Outbound route filtering policy for a neighbor. + type: str + route_map: + description: + - Specifies the route-map. + type: list + elements: dict + suboptions: + name: + description: + - Specifies the name of the route-map. + type: str + direction: + description: + - Specifies the direction of the route-map. + type: str + route_reflector_client: + description: + - Specifies a neighbor as a route-reflector client. + type: bool + route_server_client: + description: + - Specifies a neighbor as a route-server client. + type: bool + state: + description: + - Specifies the operation to be performed on the BGP process that is configured on the device. + - In case of merged, the input configuration is merged with the existing BGP configuration on the device. + - In case of deleted, the existing BGP configuration is removed from the device. + default: merged + type: str + choices: ['merged', 'deleted'] +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#! +#router bgp 4 +# ! +# neighbor interface Eth1/3 +# ! +# address-family ipv4 unicast +# activate +# allowas-in 4 +# route-map aa in +# route-map aa out +# route-reflector-client +# route-server-client +# send-community both +#! +# +- name: Deletes neighbors address-family with specific values + dellemc.enterprise_sonic.sonic_bgp_neighbors_af: + config: + - bgp_as: 4 + neighbors: + - neighbor: Eth1/3 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 4 + route_map: + - name: aa + direction: in + - name: aa + direction: out + route_reflector_client: true + route_server_client: true + state: deleted + +# After state: +# ------------ +#! +#router bgp 4 +# ! +# neighbor interface Eth1/3 +# ! +# address-family ipv4 unicast +# send-community both +#! + + +# Using deleted +# +# Before state: +# ------------- +# +#! +#router bgp 4 +# ! +# neighbor interface Eth1/3 +# ! +# address-family ipv4 unicast +# activate +# allowas-in 4 +# route-map aa in +# route-map aa out +# route-reflector-client +# route-server-client +# send-community both +#! +# neighbor interface Eth1/5 +# ! +# address-family ipv4 unicast +# activate +# allowas-in origin +# send-community both +#! +# +- name: Deletes neighbors address-family with specific values + dellemc.enterprise_sonic.sonic_bgp_neighbors_af: + config: + state: deleted + +# After state: +# ------------ +#! +#router bgp 4 +#! + + +# Using deleted +# +# Before state: +# ------------- +# +#! +#router bgp 4 +# ! +# neighbor interface Eth1/3 +#! +# +- name: Merges neighbors address-family with specific values + dellemc.enterprise_sonic.sonic_bgp_neighbors_af: + config: + - bgp_as: 4 + neighbors: + - neighbor: Eth1/3 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 4 + route_map: + - name: aa + direction: in + - name: aa + direction: out + route_reflector_client: true + route_server_client: true + state: merged + +# After state: +# ------------ +#! +#router bgp 4 +# ! +# neighbor interface Eth1/3 +# ! +# address-family ipv4 unicast +# activate +# allowas-in 4 +# route-map aa in +# route-map aa out +# route-reflector-client +# route-server-client +# send-community both +#! + + +# Using merged +# +# Before state: +# ------------- +# +# sonic# show running-configuration bgp neighbor vrf default 1.1.1.1 +# (No bgp neighbor configuration present) +- name: "Configure BGP neighbor prefix-list attributes" + dellemc.enterprise_sonic.sonic_bgp_neighbors_af: + config: + - bgp_as: 51 + neighbors: + - neighbor: 1.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: true + prefix_limit: + max_prefixes: 1 + prevent_teardown: true + warning_threshold: 80 + prefix_list_in: p1 + prefix_list_out: p2 + state: merged +# After state: +# ------------ +# +# sonic# show running-configuration bgp neighbor vrf default 1.1.1.1 +# ! +# neighbor 1.1.1.1 +# ! +# address-family ipv4 unicast +# default-originate route-map rmap_reg1 +# prefix-list p1 in +# prefix-list p2 out +# send-community both +# maximum-prefix 1 80 warning-only + + +# Using deleted +# +# Before state: +# ------------- +# +# sonic# show running-configuration bgp neighbor vrf default 1.1.1.1 +# ! +# neighbor 1.1.1.1 +# ! +# address-family ipv6 unicast +# default-originate route-map rmap_reg2 +# prefix-list p1 in +# prefix-list p2 out +# send-community both +# maximum-prefix 5 90 restart 2 +- name: "Delete BGP neighbor prefix-list attributes" + dellemc.enterprise_sonic.sonic_bgp_neighbors_af: + config: + - bgp_as: 51 + neighbors: + - neighbor: 1.1.1.1 + address_family: + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: true + prefix_limit: + max_prefixes: 5 + warning_threshold: 90 + restart-timer: 2 + prefix_list_in: p1 + prefix_list_out: p2 + state: deleted +# sonic# show running-configuration bgp neighbor vrf default 1.1.1.1 +# (No bgp neighbor configuration present) +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp_neighbors_af.bgp_neighbors_af import Bgp_neighbors_afArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_neighbors_af.bgp_neighbors_af import Bgp_neighbors_af + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Bgp_neighbors_afArgs.argument_spec, + supports_check_mode=True) + + result = Bgp_neighbors_af(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_command.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_command.py new file mode 100644 index 00000000..b0ce8781 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_command.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Peter Sprygada +# Copyright: (c) 2020, Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_command +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Dhivya P (@dhivayp) +short_description: Runs commands on devices running Enterprise SONiC +description: + - Runs commands on remote devices running Enterprise SONiC Distribution + by Dell Technologies. Sends arbitrary commands to an Enterprise SONiC node and + returns the results that are read from the device. This module includes an + argument that causes the module to wait for a specific condition + before returning or time out if the condition is not met. + - This module does not support running commands in configuration mode. + To configure SONiC devices, use M(dellemc.enterprise_sonic.sonic_config). +options: + commands: + description: + - List of commands to send to the remote Enterprise SONiC devices over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. If a command sent to the + device requires answering a prompt, it is possible to pass + a dict containing I(command), I(answer) and I(prompt). + Common answers are 'yes' or "\\r" (carriage return, must be + double quotes). See examples. + type: list + elements: str + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task waits for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + type: list + elements: str + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + type: str + default: all + choices: [ 'all', 'any' ] + retries: + description: + - Specifies the number of retries a command should be run + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + type: int + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + type: int + default: 1 +""" + +EXAMPLES = """ + - name: Runs show version on remote devices + dellemc.enterprise_sonic.sonic_command: + commands: show version + + - name: Runs show version and checks to see if output contains 'Dell' + dellemc.enterprise_sonic.sonic_command: + commands: show version + wait_for: result[0] contains Dell + + - name: Runs multiple commands on remote nodes + dellemc.enterprise_sonic.sonic_command: + commands: + - show version + - show interface + + - name: Runs multiple commands and evaluate the output + dellemc.enterprise_sonic.sonic_command: + commands: + - 'show version' + - 'show system' + wait_for: + - result[0] contains Dell + - result[1] contains Hostname + + - name: Runs commands that require answering a prompt + dellemc.enterprise_sonic.sonic_command: + commands: + - command: 'reload' + prompt: '[confirm yes/no]: ?$' + answer: 'no' +""" + +RETURN = """ +stdout: + description: The set of responses from the commands. + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list. + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed. + returned: failed + type: list + sample: ['...', '...'] +warnings: + description: The list of warnings (if any) generated by module based on arguments. + returned: always + type: list + sample: ['...', '...'] +""" +import time + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import ( + Conditional, +) +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ( + EntityCollection, + to_lines, +) +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import run_commands +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import command_list_str_to_dict + + +def transform_commands_dict(module, commands_dict): + transform = EntityCollection( + module, + dict( + command=dict(key=True), + output=dict(), + prompt=dict(type="list"), + answer=dict(type="list"), + newline=dict(type="bool", default=True), + sendonly=dict(type="bool", default=False), + check_all=dict(type="bool", default=False), + ), + ) + + return transform(commands_dict) + + +def parse_commands(module, warnings): + commands_dict = command_list_str_to_dict(module, warnings, module.params["commands"]) + commands = transform_commands_dict(module, commands_dict) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True, elements="str"), + + wait_for=dict(type='list', elements="str"), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + result = {'changed': False} + + warnings = list() +# check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + try: + conditionals = [Conditional(c) for c in wait_for] + except AttributeError as exc: + module.fail_json(msg=to_text(exc)) + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied.' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py new file mode 100644 index 00000000..dd054419 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_config +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Abirami N (@abirami-n) +short_description: Manages configuration sections on devices running Enterprise SONiC +description: + - Manages configuration sections of Enterprise SONiC Distribution + by Dell Technologies. SONiC configurations use a simple block indent + file syntax for segmenting configuration into sections. This module + provides an implementation for working with SONiC configuration + sections in a deterministic way. +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-configuration. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device configuration parser. This argument is mutually exclusive + with I(src). + type: list + elements: str + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + type: list + elements: str + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host, or a relative + path from the playbook or role root directory. This argument is + mutually exclusive with I(lines). + type: path + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + type: list + elements: str + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before), this + allows the playbook designer to append a set of commands to be + executed after the command set. + type: list + elements: str + save: + description: + - The C(save) argument instructs the module to save the running- + configuration to the startup-configuration at the conclusion of + the module running. If check mode is specified, this argument + is ignored. + type: bool + default: 'no' + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device configuration. + If match is set to I(line), commands are matched line by line. + If match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. If match is set to I(none), the + module does not attempt to compare the source configuration with + the running-configuration on the remote device. + type: str + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module how to perform a configuration + on the device. If the replace argument is set to I(line), then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block), then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + type: str + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When you set this argument to + I(merge), the configuration changes merge with the current + device running-configuration. When you set this argument to I(check), + the configuration updates are determined but not configured + on the remote device. + type: str + default: merge + choices: ['merge', 'check'] + config: + description: + - The module, by default, connects to the remote device and + retrieves the current running-configuration to use as a base for + comparing against the contents of source. There are times when + it is not desirable to have the task get the current + running-configuration for every task in a playbook. The I(config) + argument allows the implementer to pass in the configuration to + use as the base configuration for comparison. + type: str + backup: + description: + - This argument causes the module to create a full backup of + the current C(running-configuration) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + backup_options: + description: + - This is a dictionary object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option is ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the filename + is not given, it is generated based on the hostname, current time, and date + in the format defined by _config.@. + type: str + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file is stored. If the directory does not exist it is first + created, and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given, + an I(backup) directory is created in the current working directory + and backup configuration is copied in C(filename) within the I(backup) directory. + type: path + type: dict +""" + +EXAMPLES = """ +- dellemc.enterprise_sonic.sonic_config: + lines: ['username {{ user_name }} password {{ user_password }} role {{ user_role }}'] + +- dellemc.enterprise_sonic.sonic_config: + lines: + - description 'SONiC' + parents: ['interface Eth1/10'] + +- dellemc.enterprise_sonic.sonic_config: + lines: + - seq 2 permit udp any any + - seq 3 deny icmp any any + parents: ['ip access-list test'] + before: ['no ip access-list test'] + +""" + +RETURN = """ +updates: + description: The set of commands that is pushed to the remote device. + returned: always + type: list + sample: ['username foo password foo role admin', 'router bgp 1', 'router-id 1.1.1.1'] +commands: + description: The set of commands that is pushed to the remote device. + returned: always + type: list + sample: ['username foo password foo role admin', 'router bgp 1', 'router-id 1.1.1.1'] +saved: + description: Returns whether the configuration is saved to the startup + configuration or not. + returned: When not check_mode. + type: bool + sample: True +""" + +from ansible.module_utils.connection import ConnectionError + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import get_config, get_sublevel_config +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import edit_config, run_commands +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import command_list_str_to_dict +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + commands = module.params['lines'][0] + if (isinstance(commands, dict)) and (isinstance((commands['command']), list)): + candidate.add(commands['command'], parents=parents) + elif (isinstance(commands, dict)) and (isinstance((commands['command']), str)): + candidate.add([commands['command']], parents=parents) + else: + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def get_running_config(module): + contents = module.params['config'] + if not contents: + contents = get_config(module) + return contents + + +def main(): + + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + + argument_spec = dict( + lines=dict(aliases=['commands'], type='list', elements="str"), + parents=dict(type='list', elements="str"), + + src=dict(type='path'), + + before=dict(type='list', elements="str"), + after=dict(type='list', elements="str"), + save=dict(type='bool', default=False), + match=dict(default='line', + choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + update=dict(choices=['merge', 'check'], default='merge'), + config=dict(), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec) + + ) + + mutually_exclusive = [('lines', 'src')] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + parents = module.params['parents'] or list() + match = module.params['match'] + replace = module.params['replace'] + + warnings = list() +# check_args(module, warnings) + + result = dict(changed=False, saved=False, warnings=warnings) + if module.params['backup']: + if not module.check_mode: + result['__backup__'] = get_config(module) + + commands = list() + candidate = get_candidate(module) + if any((module.params['lines'], module.params['src'])): + if match != 'none': + config = get_running_config(module) + if parents: + contents = get_sublevel_config(config, module) + config = NetworkConfig(contents=contents, indent=1) + else: + config = NetworkConfig(contents=config, indent=1) + configobjs = candidate.difference(config, match=match, replace=replace) + else: + + configobjs = candidate.items + if configobjs: + commands = dumps(configobjs, 'commands') + if ((isinstance((module.params['lines']), list)) and + (isinstance((module.params['lines'][0]), dict)) and + (set(['prompt', 'answer']).issubset(module.params['lines'][0]))): + + cmd = {'command': commands, + 'prompt': module.params['lines'][0]['prompt'], + 'answer': module.params['lines'][0]['answer']} + commands = [cmd] + else: + commands = commands.split('\n') + cmd_list_out = command_list_str_to_dict(module, warnings, commands) + if cmd_list_out and cmd_list_out != []: + commands = cmd_list_out + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + if not module.check_mode and module.params['update'] == 'merge': + try: + edit_config(module, commands) + except ConnectionError as exc: + module.fail_json(msg=to_text(exc)) + + result['changed'] = True + result['commands'] = commands + result['updates'] = commands + + if module.params['save']: + result['changed'] = True + if not module.check_mode: + cmd = {r'command': ' write memory'} + run_commands(module, [cmd]) + result['saved'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py new file mode 100644 index 00000000..f13e9def --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +The module file for sonic_facts +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_facts +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Collects facts on devices running Enterprise SONiC +description: + - Collects facts from devices running Enterprise SONiC Distribution by + Dell Technologies. This module places the facts gathered in the fact tree + keyed by the respective resource name. The facts module always collects + a base set of facts from the device and can enable or disable collection + of additional facts. +author: +- Mohamed Javeed (@javeedf) +- Abirami N (@abirami-n) +options: + gather_subset: + description: + - When supplied, this argument restricts the facts collected + to a given subset. Possible values for this argument include + all, min, hardware, config, legacy, and interfaces. Can specify a + list of values to include a larger subset. Values can also be used + with an initial '!' to specify that a specific subset should + not be collected. + required: false + type: list + elements: str + default: '!config' + gather_network_resources: + description: + - When supplied, this argument restricts the facts collected + to a given subset. Possible values for this argument include + all and the resources like 'all', 'interfaces', 'vlans', 'lag_interfaces', 'l2_interfaces', 'l3_interfaces'. + Can specify a list of values to include a larger subset. Values + can also be used with an initial '!' to specify that a + specific subset should not be collected. + required: false + type: list + elements: str + choices: + - all + - vlans + - interfaces + - l2_interfaces + - l3_interfaces + - lag_interfaces + - bgp + - bgp_af + - bgp_neighbors + - bgp_neighbors_af + - bgp_as_paths + - bgp_communities + - bgp_ext_communities + - mclag + - prefix_lists + - vrfs + - vxlans + - users + - system + - port_breakout + - aaa + - tacacs_server + - radius_server + - static_routes + - ntp +""" + +EXAMPLES = """ +- name: Gather all facts + dellemc.enterprise_sonic.sonic_facts: + gather_subset: all + gather_network_resources: all +- name: Collects VLAN and interfaces facts + dellemc.enterprise_sonic.sonic_facts: + gather_subset: + - min + gather_network_resources: + - vlans + - interfaces +- name: Do not collects VLAN and interfaces facts + dellemc.enterprise_sonic.sonic_facts: + gather_network_resources: + - "!vlans" + - "!interfaces" +- name: Collects VLAN and minimal default facts + dellemc.enterprise_sonic.sonic_facts: + gather_subset: min + gather_network_resources: vlans +- name: Collect lag_interfaces and minimal default facts + dellemc.enterprise_sonic.sonic_facts: + gather_subset: min + gather_network_resources: lag_interfaces +""" + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.facts.facts import FactsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts + + +def main(): + """ + Main entry point for module execution + :returns: ansible_facts + """ + module = AnsibleModule(argument_spec=FactsArgs.argument_spec, + supports_check_mode=True) + warnings = ['default value for `gather_subset` ' + 'will be changed to `min` from `!config` v2.11 onwards'] + + result = Facts(module).get_facts() + + ansible_facts, additional_warnings = result + warnings.extend(additional_warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py new file mode 100644 index 00000000..0cd6a189 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_interfaces +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_interfaces +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Configure Interface attributes on interfaces such as, Eth, LAG, VLAN, and loopback. + (create a loopback interface if it does not exist.) +description: Configure Interface attributes such as, MTU, admin statu, and so on, on interfaces + such as, Eth, LAG, VLAN, and loopback. (create a loopback interface if it does not exist.) +author: Niraimadaiselvam M(@niraimadaiselvamm) +options: + config: + description: A list of interface configurations. + type: list + elements: dict + suboptions: + name: + type: str + description: The name of the interface, for example, 'Eth1/15'. + required: true + description: + type: str + description: + - Description about the interface. + enabled: + description: + - Administrative state of the interface. + type: bool + mtu: + description: + - MTU of the interface. + type: int + state: + description: + - The state the configuration should be left in. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +# show interface status | no-more +#------------------------------------------------------------------------------------------ +#Name Description Admin Oper Speed MTU +#------------------------------------------------------------------------------------------ +#Eth1/1 - up 100000 9100 +#Eth1/2 - up 100000 9100 +#Eth1/3 - down 100000 9100 +#Eth1/3 - down 1000 5000 +#Eth1/5 - down 100000 9100 +# +- name: Configures interfaces + dellemc.enterprise_sonic.sonic_interfaces: + config: + name: Eth1/3 + state: deleted +# +# After state: +# ------------- +# +# show interface status | no-more +#------------------------------------------------------------------------------------------ +#Name Description Admin Oper Speed MTU +#------------------------------------------------------------------------------------------ +#Eth1/1 - up 100000 9100 +#Eth1/2 - up 100000 9100 +#Eth1/3 - down 100000 9100 +#Eth1/3 - up 100000 9100 +#Eth1/5 - down 100000 9100 +# +# +# Using deleted +# +# Before state: +# ------------- +# +# show interface status | no-more +#------------------------------------------------------------------------------------------ +#Name Description Admin Oper Speed MTU +#------------------------------------------------------------------------------------------ +#Eth1/1 - up 100000 9100 +#Eth1/2 - up 100000 9100 +#Eth1/3 - down 100000 9100 +#Eth1/3 - down 1000 9100 +#Eth1/5 - down 100000 9100 +# + +- name: Configures interfaces + dellemc.enterprise_sonic.sonic_interfaces: + config: + state: deleted + +# +# After state: +# ------------- +# +# show interface status | no-more +#------------------------------------------------------------------------------------------ +#Name Description Admin Oper Speed MTU +#------------------------------------------------------------------------------------------ +#Eth1/1 - up 100000 9100 +#Eth1/2 - up 100000 9100 +#Eth1/3 - up 100000 9100 +#Eth1/3 - up 100000 9100 +#Eth1/5 - up 100000 9100 +# +# +# Using merged +# +# Before state: +# ------------- +# +# show interface status | no-more +#------------------------------------------------------------------------------------------ +#Name Description Admin Oper Speed MTU +#------------------------------------------------------------------------------------------ +#Eth1/1 - up 100000 9100 +#Eth1/2 - up 100000 9100 +#Eth1/3 - down 100000 9100 +#Eth1/3 - down 1000 9100 +# +- name: Configures interfaces + dellemc.enterprise_sonic.sonic_interfaces: + config: + - name: Eth1/3 + description: 'Ethernet Twelve' + - name: Eth1/5 + description: 'Ethernet Sixteen' + enable: True + mtu: 3500 + state: merged +# +# +# After state: +# ------------ +# +# show interface status | no-more +#------------------------------------------------------------------------------------------ +#Name Description Admin Oper Speed MTU +#------------------------------------------------------------------------------------------ +#Eth1/1 - up 100000 9100 +#Eth1/2 - up 100000 9100 +#Eth1/3 - down 100000 9100 +#Eth1/4 - down 1000 9100 +#Eth1/5 - down 100000 3500 +# +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.interfaces.interfaces import InterfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.interfaces.interfaces import Interfaces + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=InterfacesArgs.argument_spec, + supports_check_mode=True) + + result = Interfaces(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py new file mode 100644 index 00000000..34a8ff72 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_l2_interfaces +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_l2_interfaces +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Configure interface-to-VLAN association that is based on access or trunk mode +description: Manages Layer 2 interface attributes of Enterprise SONiC Distribution by Dell Technologies. +author: Niraimadaiselvam M(@niraimadaiselvamm) +options: + config: + description: A list of Layer 2 interface configurations. + type: list + elements: dict + suboptions: + name: + type: str + description: Full name of the interface, for example, 'Eth1/26'. + required: true + trunk: + type: dict + description: Configures trunking parameters on an interface. + suboptions: + allowed_vlans: + description: Specifies list of allowed VLANs of trunk mode on the interface. + type: list + elements: dict + suboptions: + vlan: + type: int + description: Configures the specified VLAN in trunk mode. + access: + type: dict + description: Configures access mode characteristics of the interface. + suboptions: + vlan: + type: int + description: Configures the specified VLAN in access mode. + state: + type: str + description: The state that the configuration should be left in. + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive A Eth1/3 +#11 Inactive T Eth1/3 +#12 Inactive A Eth1/4 +#13 Inactive T Eth1/4 +#14 Inactive A Eth1/5 +#15 Inactive T Eth1/5 +# +- name: Configures switch port of interfaces + dellemc.enterprise_sonic.sonic_l2_interfaces: + config: + - name: Eth1/3 + - name: Eth1/4 + state: deleted +# +# After state: +# ------------ +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#11 Inactive +#12 Inactive +#13 Inactive +#14 Inactive A Eth1/5 +#15 Inactive T Eth1/5 +# +# +# Using deleted +# +# Before state: +# ------------- +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive A Eth1/3 +#11 Inactive T Eth1/3 +#12 Inactive A Eth1/4 +#13 Inactive T Eth1/4 +#14 Inactive A Eth1/5 +#15 Inactive T Eth1/5 +# +- name: Configures switch port of interfaces + dellemc.enterprise_sonic.sonic_l2_interfaces: + config: + state: deleted +# +# After state: +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#11 Inactive +#12 Inactive +#13 Inactive +#14 Inactive +#15 Inactive +# +# +# Using merged +# +# Before state: +# ------------- +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#11 Inactive T Eth1/7 +#12 Inactive T Eth1/7 +# +- name: Configures switch port of interfaces + dellemc.enterprise_sonic.sonic_l2_interfaces: + config: + - name: Eth1/3 + access: + vlan: 10 + state: merged +# +# After state: +# ------------ +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive A Eth1/3 +#11 Inactive T Eth1/7 +#12 Inactive T Eth1/7 +# +# +# Using merged +# +# Before state: +# ------------- +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive A Eth1/3 +# +- name: Configures switch port of interfaces + dellemc.enterprise_sonic.sonic_l2_interfaces: + config: + - name: Eth1/3 + trunk: + allowed_vlans: + - vlan: 11 + - vlan: 12 + state: merged +# +# After state: +# ------------ +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive A Eth1/3 +#11 Inactive T Eth1/7 +#12 Inactive T Eth1/7 +# +# +# Using merged +# +# Before state: +# ------------- +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#11 Inactive +#12 Inactive A Eth1/4 +#13 Inactive T Eth1/4 +#14 Inactive A Eth1/5 +#15 Inactive T Eth1/5 +# +- name: Configures switch port of interfaces + dellemc.enterprise_sonic.sonic_l2_interfaces: + config: + - name: Eth1/3 + access: + vlan: 12 + trunk: + allowed_vlans: + - vlan: 13 + - vlan: 14 + state: merged +# +# After state: +# ------------ +# +#do show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#11 Inactive +#12 Inactive A Eth1/3 +# A Eth1/4 +#13 Inactive T Eth1/3 +# T Eth1/4 +#14 Inactive A Eth1/3 +# A Eth1/5 +#15 Inactive T Eth1/5 +# +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l2_interfaces.l2_interfaces import L2_interfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l2_interfaces.l2_interfaces import L2_interfaces + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=L2_interfacesArgs.argument_spec, + supports_check_mode=True) + + result = L2_interfaces(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py new file mode 100644 index 00000000..e796897a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py @@ -0,0 +1,375 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_l3_interfaces +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_l3_interfaces +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Configure the IPv4 and IPv6 parameters on Interfaces such as, Eth, LAG, VLAN, and loopback +description: + - Configures Layer 3 interface settings on devices running Enterprise SONiC + Distribution by Dell Technologies. This module provides configuration management + of IPv4 and IPv6 parameters on Ethernet interfaces of devices running Enterprise SONiC. +author: Kumaraguru Narayanan (@nkumaraguru) +options: + config: + description: A list of l3_interfaces configurations. + type: list + elements: dict + suboptions: + name: + required: True + type: str + description: + - Full name of the interface, for example, Eth1/3. + ipv4: + description: + - ipv4 configurations to be set for the Layer 3 interface mentioned in name option. + type: dict + suboptions: + addresses: + description: + - List of IPv4 addresses to be set. + type: list + elements: dict + suboptions: + address: + description: + - IPv4 address to be set in the format / + for example, 192.0.2.1/24. + type: str + secondary: + description: + - secondary flag of the ip address. + type: bool + default: 'False' + anycast_addresses: + description: + - List of IPv4 addresses to be set for anycast. + type: list + elements: str + ipv6: + description: + - ipv6 configurations to be set for the Layer 3 interface mentioned in name option. + type: dict + suboptions: + addresses: + description: + - List of IPv6 addresses to be set. + type: list + elements: dict + suboptions: + address: + description: + - IPv6 address to be set in the address format is / + for example, 2001:db8:2201:1::1/64. + type: str + enabled: + description: + - enabled flag of the ipv6. + type: bool + state: + description: + - The state that the configuration should be left in. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ + +# Using deleted +# +# Before state: +# ------------- +# +#rno-dctor-1ar01c01sw02# show running-configuration interface +#! +#interface Ethernet20 +# mtu 9100 +# speed 100000 +# shutdown +# ip address 83.1.1.1/16 +# ip address 84.1.1.1/16 secondary +# ipv6 address 83::1/16 +# ipv6 address 84::1/16 +# ipv6 enable +#! +#interface Ethernet24 +# mtu 9100 +# speed 100000 +# shutdown +# ip address 91.1.1.1/16 +# ip address 92.1.1.1/16 secondary +# ipv6 address 90::1/16 +# ipv6 address 91::1/16 +# ipv6 address 92::1/16 +# ipv6 address 93::1/16 +#! +#interface Vlan501 +# ip anycast-address 11.12.13.14/12 +# ip anycast-address 1.2.3.4/22 +#! +# +# +- name: delete one l3 interface. + dellemc.enterprise_sonic.sonic_l3_interfaces: + config: + - name: Ethernet20 + ipv4: + addresses: + - address: 83.1.1.1/16 + - address: 84.1.1.1/16 + - name: Ethernet24 + ipv6: + enabled: true + addresses: + - address: 91::1/16 + - name: Vlan501 + ipv4: + anycast_addresses: + - 11.12.13.14/12 + state: deleted + +# After state: +# ------------ +# +#rno-dctor-1ar01c01sw02# show running-configuration interface +#! +#interface Ethernet20 +# mtu 9100 +# speed 100000 +# shutdown +# ipv6 address 83::1/16 +# ipv6 address 84::1/16 +# ipv6 enable +#! +#interface Ethernet24 +# mtu 9100 +# speed 100000 +# shutdown +# ip address 91.1.1.1/16 +# ip address 92.1.1.1/16 secondary +# ipv6 address 90::1/16 +# ipv6 address 92::1/16 +# ipv6 address 93::1/16 +#! +#interface Vlan501 +# ip anycast-address 1.2.3.4/22 +#! +# +# Using deleted +# +# Before state: +# ------------- +# +#rno-dctor-1ar01c01sw02# show running-configuration interface +#! +#interface Ethernet20 +# mtu 9100 +# speed 100000 +# shutdown +# ip address 83.1.1.1/16 +# ip address 84.1.1.1/16 secondary +# ipv6 address 83::1/16 +# ipv6 address 84::1/16 +# ipv6 enable +#! +#interface Ethernet24 +# mtu 9100 +# speed 100000 +# shutdown +# ip address 91.1.1.1/16 +# ipv6 address 90::1/16 +# ipv6 address 91::1/16 +# ipv6 address 92::1/16 +# ipv6 address 93::1/16 +#! +#interface Vlan501 +# ip anycast-address 11.12.13.14/12 +# ip anycast-address 1.2.3.4/22 +#! +# +# +- name: delete all l3 interface + dellemc.enterprise_sonic.sonic_l3_interfaces: + config: + state: deleted +# +# After state: +# ------------ +# +#rno-dctor-1ar01c01sw02# show running-configuration interface +#! +#interface Ethernet20 +# mtu 9100 +# speed 100000 +# shutdown +#! +#interface Ethernet24 +# mtu 9100 +# speed 100000 +# shutdown +#! +#interface Vlan501 +#! +# +# Using merged +# +# Before state: +# ------------- +# +#rno-dctor-1ar01c01sw02# show running-configuration interface +#! +#interface Ethernet20 +# mtu 9100 +# speed 100000 +# shutdown +#! +#interface Ethernet24 +# mtu 9100 +# speed 100000 +# shutdown +#! +#interface Vlan501 +# ip anycast-address 1.2.3.4/22 +#! +# +- name: Add l3 interface configurations + dellemc.enterprise_sonic.sonic_l3_interfaces: + config: + - name: Ethernet20 + ipv4: + addresses: + - address: 83.1.1.1/16 + - address: 84.1.1.1/16 + secondary: True + ipv6: + enabled: true + addresses: + - address: 83::1/16 + - address: 84::1/16 + secondary: True + - name: Ethernet24 + ipv4: + addresses: + - address: 91.1.1.1/16 + ipv6: + addresses: + - address: 90::1/16 + - address: 91::1/16 + - address: 92::1/16 + - address: 93::1/16 + - name: Vlan501 + ipv4: + anycast_addresses: + - 11.12.13.14/12 + state: merged +# +# After state: +# ------------ +# +#rno-dctor-1ar01c01sw02# show running-configuration interface +#! +#interface Ethernet20 +# mtu 9100 +# speed 100000 +# shutdown +# ip address 83.1.1.1/16 +# ip address 84.1.1.1/16 secondary +# ipv6 address 83::1/16 +# ipv6 address 84::1/16 +# ipv6 enable +#! +#interface Ethernet24 +# mtu 9100 +# speed 100000 +# shutdown +# ip address 91.1.1.1/16 +# ipv6 address 90::1/16 +# ipv6 address 91::1/16 +# ipv6 address 92::1/16 +# ipv6 address 93::1/16 +#! +#interface Vlan501 +# ip anycast-address 1.2.3.4/22 +# ip anycast-address 11.12.13.14/12 +#! +# +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l3_interfaces.l3_interfaces import L3_interfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l3_interfaces.l3_interfaces import L3_interfaces + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=L3_interfacesArgs.argument_spec, + supports_check_mode=True) + + result = L3_interfaces(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py new file mode 100644 index 00000000..630db798 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_lag_interfaces +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_lag_interfaces +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage link aggregation group (LAG) interface parameters +description: + - This module manages attributes of link aggregation group (LAG) interfaces of + devices running Enterprise SONiC Distribution by Dell Technologies. +author: Abirami N (@abirami-n) + +options: + config: + description: A list of LAG configurations. + type: list + elements: dict + suboptions: + name: + description: + - ID of the LAG. + type: str + required: True + members: + description: + - The list of interfaces that are part of the group. + type: dict + suboptions: + interfaces: + description: The list of interfaces that are part of the group. + type: list + elements: dict + suboptions: + member: + description: + - The interface name. + type: str + mode: + description: + - Specifies mode of the port-channel while creation. + type: str + choices: + - static + - lacp + state: + description: + - The state that the configuration should be left in. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using merged +# +# Before state: +# ------------- +# +# interface Eth1/10 +# mtu 9100 +# speed 100000 +# no shutdown +# ! +# interface Eth1/15 +# channel-group 12 +# mtu 9100 +# speed 100000 +# no shutdown +# +- name: Merges provided configuration with device configuration + dellemc.enterprise_sonic.sonic_lag_interfaces: + config: + - name: PortChannel10 + members: + interfaces: + - member: Eth1/10 + state: merged +# +# After state: +# ------------ +# +# interface Eth1/10 +# channel-group 10 +# mtu 9100 +# speed 100000 +# no shutdown +# ! +# interface Eth1/15 +# channel-group 12 +# mtu 9100 +# speed 100000 +# no shutdown +# +# Using deleted +# +# Before state: +# ------------- +# interface PortChannel10 +# ! +# interface Eth1/10 +# channel-group 10 +# mtu 9100 +# speed 100000 +# no shutdown +# +- name: Deletes LAG attributes of a given interface, This does not delete the port-channel itself + dellemc.enterprise_sonic.sonic_lag_interfaces: + config: + - name: PortChannel10 + members: + interfaces: + state: deleted +# +# After state: +# ------------ +# interface PortChannel10 +# ! +# interface Eth1/10 +# mtu 9100 +# speed 100000 +# no shutdown +# +# Using deleted +# +# Before state: +# ------------- +# interface PortChannel 10 +# ! +# interface PortChannel 12 +# ! +# interface Eth1/10 +# channel-group 10 +# mtu 9100 +# speed 100000 +# no shutdown +# ! +# interface Eth1/15 +# channel-group 12 +# mtu 9100 +# speed 100000 +# no shutdown +# +- name: Deletes all LAGs and LAG attributes of all interfaces + dellemc.enterprise_sonic.sonic_lag_interfaces: + config: + state: deleted +# +# After state: +# ------------- +# +# interface Eth1/10 +# mtu 9100 +# speed 100000 +# no shutdown +# ! +# interface Eth1/15 +# mtu 9100 +# speed 100000 +# no shutdown +# +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration that is returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.lag_interfaces.lag_interfaces import Lag_interfacesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.lag_interfaces.lag_interfaces import Lag_interfaces + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Lag_interfacesArgs.argument_spec, + supports_check_mode=True) + + result = Lag_interfaces(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py new file mode 100644 index 00000000..28d3dbb5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py @@ -0,0 +1,516 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_mclag +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_mclag +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage multi chassis link aggregation groups domain (MCLAG) and its parameters +description: + - Manage multi chassis link aggregation groups domain (MCLAG) and its parameters +author: Abirami N (@abirami-n) + +options: + config: + description: Dict of mclag domain configurations. + type: dict + suboptions: + domain_id: + description: + - ID of the mclag domain (MCLAG domain). + type: int + required: True + peer_address: + description: + - The IPV4 peer-ip for corresponding MCLAG. + type: str + source_address: + description: + - The IPV4 source-ip for corresponding MCLAG. + type: str + peer_link: + description: + - Peer-link for corresponding MCLAG. + type: str + system_mac: + description: + - Mac address of MCLAG. + type: str + keepalive: + description: + - MCLAG session keepalive-interval in secs. + type: int + session_timeout: + description: + - MCLAG session timeout value in secs. + type: int + unique_ip: + description: Holds Vlan dictionary for mclag unique ip. + suboptions: + vlans: + description: + - Holds list of VLANs for which a separate IP addresses is enabled for Layer 3 protocol support over MCLAG. + type: list + elements: dict + suboptions: + vlan: + description: Holds a VLAN ID. + type: str + type: dict + members: + description: Holds portchannels dictionary for an MCLAG domain. + suboptions: + portchannels: + description: + - Holds a list of portchannels for configuring as an MCLAG interface. + type: list + elements: dict + suboptions: + lag: + description: Holds a PortChannel ID. + type: str + type: dict + state: + description: + - The state that the configuration should be left in. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using merged +# +# Before state: +# ------------- +# +# sonic# show mclag brief +# MCLAG Not Configured +# +- name: Merge provided configuration with device configuration + dellemc.enterprise_sonic.sonic_mclag: + config: + domain_id: 1 + peer_address: 1.1.1.1 + source_address: 2.2.2.2 + peer_link: 'Portchannel1' + keepalive: 1 + session_timeout: 3 + unique_ip: + vlans: + - vlan: Vlan4 + members: + portchannles: + - lag: PortChannel10 + state: merged +# +# After state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : 2.2.2.2 +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 1 secs +# Session Timeout : 3 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:1 +#----------------------------------------------------------- +# MLAG Interface Local/Remote Status +#----------------------------------------------------------- +# PortChannel10 down/down +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +# +# +# Using merged +# +# Before state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : 2.2.2.2 +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 1 secs +# Session Timeout : 3 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:1 +#----------------------------------------------------------- +# MLAG Interface Local/Remote Status +#----------------------------------------------------------- +# PortChannel10 down/down +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +# +# +- name: Merge device configuration with the provided configuration + dellemc.enterprise_sonic.sonic_mclag: + config: + domain_id: 1 + source_address: 3.3.3.3 + keepalive: 10 + session_timeout: 30 + unique_ip: + vlans: + - vlan: Vlan5 + members: + portchannels: + - lag: PortChannel12 + state: merged +# +# After state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : 3.3.3.3 +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 10 secs +# Session Timeout : 30 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:2 +#----------------------------------------------------------- +# MLAG Interface Local/Remote Status +#----------------------------------------------------------- +# PortChannel10 down/down +# PortChannel12 down/down +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# }, +# "Vlan5": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +# +# +# Using deleted +# +# Before state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : 3.3.3.3 +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 10 secs +# Session Timeout : 30 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:1 +#----------------------------------------------------------- +# MLAG Interface Local/Remote Status +#----------------------------------------------------------- +# PortChannel10 down/down +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +# +- name: Delete device configuration based on the provided configuration + dellemc.enterprise_sonic.sonic_mclag: + config: + domain_id: 1 + source_address: 3.3.3.3 + keepalive: 10 + members: + portchannels: + - lag: PortChannel10 + state: deleted +# +# After state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 1 secs +# Session Timeout : 15 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:0 +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +# +# +# +# Using deleted +# +# Before state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : 3.3.3.3 +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 10 secs +# Session Timeout : 30 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:1 +#----------------------------------------------------------- +# MLAG Interface Local/Remote Status +#----------------------------------------------------------- +# PortChannel10 down/down +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +# +- name: Delete all device configuration + dellemc.enterprise_sonic.sonic_mclag: + config: + state: deleted +# +# After state: +# ------------ +# +# sonic# show mclag brief +# MCLAG Not Configured +# +# admin@sonic:~$ show runningconfiguration all | grep MCLAG_UNIQUE_IP +# admin@sonic:~$ +# +# +# Using deleted +# +# Before state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : 3.3.3.3 +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 10 secs +# Session Timeout : 30 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:2 +#----------------------------------------------------------- +# MLAG Interface Local/Remote Status +#----------------------------------------------------------- +# PortChannel10 down/down +# PortChannel12 down/sown +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +- name: Delete device configuration based on the provided configuration + dellemc.enterprise_sonic.sonic_mclag: + config: + domain_id: 1 + source_address: 3.3.3.3 + keepalive: 10 + members: + portchannels: + - lag: PortChannel10 + state: deleted +# +# After state: +# ------------ +# +# sonic# show mclag brief +# +# Domain ID : 1 +# Role : standby +# Session Status : down +# Peer Link Status : down +# Source Address : +# Peer Address : 1.1.1.1 +# Peer Link : PortChannel1 +# Keepalive Interval : 1 secs +# Session Timeout : 15 secs +# System Mac : 20:04:0f:37:bd:c9 +# +# +# Number of MLAG Interfaces:0 +# +# admin@sonic:~$ show runningconfiguration all +# { +# ... +# "MCLAG_UNIQUE_IP": { +# "Vlan4": { +# "unique_ip": "enable" +# } +# }, +# ... +# } +# +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.mclag.mclag import MclagArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.mclag.mclag import Mclag + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=MclagArgs.argument_spec, + supports_check_mode=True) + + result = Mclag(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py new file mode 100644 index 00000000..87db8bb0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py @@ -0,0 +1,360 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_ntp +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: sonic_ntp +version_added: 2.0.0 +short_description: Manage NTP configuration on SONiC. +description: + - This module provides configuration management of NTP for devices running SONiC. +author: "M. Zhang (@mingjunzhang2019)" +options: + config: + description: + - Specifies NTP related configurations. + type: dict + suboptions: + source_interfaces: + type: list + elements: str + description: + - List of names of NTP source interfaces. + enable_ntp_auth: + type: bool + description: + - Enable or disable NTP authentication. + trusted_keys: + type: list + elements: int + description: + - List of trusted NTP authentication keys. + vrf: + type: str + description: + - VRF name on which NTP is enabled. + servers: + type: list + elements: dict + description: + - List of NTP servers. + suboptions: + address: + type: str + description: + - IPv4/IPv6 address or host name of NTP server. + required: true + key_id: + type: int + description: + - NTP authentication key used by server. + - Key_id can not be deleted. + minpoll: + type: int + description: + - Minimum poll interval to poll NTP server. + - minpoll can not be deleted. + maxpoll: + type: int + description: + - Maximum poll interval to poll NTP server. + - maxpoll can not be deleted. + ntp_keys: + type: list + elements: dict + description: + - List of NTP authentication keys. + suboptions: + key_id: + type: int + description: + - NTP authentication key identifier. + required: true + key_type: + type: str + description: + - NTP authentication key type. + - key_type can not be deleted. + - When "state" is "merged", "key_type" is required. + choices: + - NTP_AUTH_SHA1 + - NTP_AUTH_MD5 + - NTP_AUTH_SHA2_256 + key_value: + type: str + description: + - NTP authentication key value. + - key_value can not be deleted. + - When "state" is "merged", "key_value" is required. + encrypted: + type: bool + description: + - NTP authentication key_value is encrypted. + - encrypted can not be deleted. + - When "state" is "merged", "encrypted" is required. + + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#sonic# show ntp server +#---------------------------------------------------------------------- +#NTP Servers minpoll maxpoll Authentication key ID +#---------------------------------------------------------------------- +#10.11.0.1 6 10 +#10.11.0.2 5 9 +#dell.com 6 9 +#dell.org 7 10 +# +- name: Delete NTP server configuration + ntp: + config: + servers: + - address: 10.11.0.2 + - address: dell.org + state: deleted + +# After state: +# ------------ +# +#sonic# show ntp server +#---------------------------------------------------------------------- +#NTP Servers minpoll maxpoll Authentication key ID +#---------------------------------------------------------------------- +#10.11.0.1 6 10 +#dell.com 6 9 +# +# +# Using deleted +# +# Before state: +# ------------- +# +#sonic# show ntp global +#---------------------------------------------- +#NTP Global Configuration +#---------------------------------------------- +#NTP source-interfaces: Ethernet0, Ethernet4, Ethernet8, Ethernet16 +# +- name: Delete NTP source-interface configuration + ntp: + config: + source_interfaces: + - Ethernet8 + - Ethernet16 + state: deleted + +# After state: +# ------------ +# +#sonic# show ntp global +#---------------------------------------------- +#NTP Global Configuration +#---------------------------------------------- +#NTP source-interfaces: Ethernet0, Ethernet4 +# +# +# Using deleted +# +# Before state: +# ------------- +# +#sonic# show running-configuration | grep ntp +#ntp authentication-key 8 sha1 U2FsdGVkX1/NpJrdOeyMeUHEkSohY6azY9VwbAqXRTY= encrypted +#ntp authentication-key 10 md5 U2FsdGVkX1/Gxds/5pscCvIKbVngGaKka4SQineS51Y= encrypted +#ntp authentication-key 20 sha2-256 U2FsdGVkX1/eAzKj1teKhYWD7tnzOsYOijGeFAT0rKM= encrypted +# +- name: Delete NTP key configuration + ntp: + config: + ntp_keys: + - key_id: 10 + - key_id: 20 + state: deleted +# +# After state: +# ------------ +# +#sonic# show running-configuration | grep ntp +#ntp authentication-key 8 sha1 U2FsdGVkX1/NpJrdOeyMeUHEkSohY6azY9VwbAqXRTY= encrypted +# +# +# Using merged +# +# Before state: +# ------------- +# +#sonic# show ntp server +#---------------------------------------------------------------------- +#NTP Servers minpoll maxpoll Authentication key ID +#---------------------------------------------------------------------- +#10.11.0.1 6 10 +#dell.com 6 9 +# +- name: Merge NTP server configuration + ntp: + config: + servers: + - address: 10.11.0.2 + minpoll: 5 + - address: dell.org + minpoll: 7 + maxpoll: 10 + state: merged + +# After state: +# ------------ +# +#sonic# show ntp server +#---------------------------------------------------------------------- +#NTP Servers minpoll maxpoll Authentication key ID +#---------------------------------------------------------------------- +#10.11.0.1 6 10 +#10.11.0.2 5 9 +#dell.com 6 9 +#dell.org 7 10 +# +# +# Using merged +# +# Before state: +# ------------- +# +#sonic# show ntp global +#---------------------------------------------- +#NTP Global Configuration +#---------------------------------------------- +#NTP source-interfaces: Ethernet0, Ethernet4 +# +- name: Merge NTP source-interface configuration + ntp: + config: + source_interfaces: + - Ethernet8 + - Ethernet16 + state: merged + +# After state: +# ------------ +# +#sonic# show ntp global +#---------------------------------------------- +#NTP Global Configuration +#---------------------------------------------- +#NTP source-interfaces: Ethernet0, Ethernet4, Ethernet8, Ethernet16 +# +# +# Using merged +# +# Before state: +# ------------- +# +#sonic# show running-configuration | grep ntp +#ntp authentication-key 8 sha1 U2FsdGVkX1/NpJrdOeyMeUHEkSohY6azY9VwbAqXRTY= encrypted +# +- name: Merge NTP key configuration + ntp: + config: + ntp_keys: + - key_id: 10 + key_type: NTP_AUTH_MD5 + key_value: dellemc10 + encrypted: false + - key_id: 20 + key_type: NTP_AUTH_SHA2_256 + key_value: dellemc20 + encrypted: false + state: merged +# +# After state: +# ------------ +# +#sonic# show running-configuration | grep ntp +#ntp authentication-key 8 sha1 U2FsdGVkX1/NpJrdOeyMeUHEkSohY6azY9VwbAqXRTY= encrypted +#ntp authentication-key 10 md5 U2FsdGVkX1/Gxds/5pscCvIKbVngGaKka4SQineS51Y= encrypted +#ntp authentication-key 20 sha2-256 U2FsdGVkX1/eAzKj1teKhYWD7tnzOsYOijGeFAT0rKM= encrypted +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.ntp.ntp import NtpArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.ntp.ntp import Ntp + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=NtpArgs.argument_spec, + supports_check_mode=True) + + result = Ntp(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py new file mode 100644 index 00000000..66ea0047 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_port_breakout +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_port_breakout +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Niraimadaiselvam M (@niraimadaiselvamm) +short_description: Configure port breakout settings on physical interfaces +description: + - This module provides configuration management of port breakout parameters on devices running Enterprise SONiC. +options: + config: + description: + - Specifies the port breakout related configuration. + type: list + elements: dict + suboptions: + name: + description: + - Specifies the name of the port breakout. + type: str + required: true + mode: + description: + - Specifies the mode of the port breakout. + type: str + choices: + - 1x100G + - 1x400G + - 1x40G + - 2x100G + - 2x200G + - 2x50G + - 4x100G + - 4x10G + - 4x25G + - 4x50G + state: + description: + - Specifies the operation to be performed on the port breakout configured on the device. + - In case of merged, the input mode configuration will be merged with the existing port breakout configuration on the device. + - In case of deleted the existing port breakout mode configuration will be removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#do show interface breakout +#----------------------------------------------- +#Port Breakout Mode Status Interfaces +#----------------------------------------------- +#1/1 4x10G Completed Eth1/1/1 +# Eth1/1/2 +# Eth1/1/3 +# Eth1/1/4 +#1/11 1x100G Completed Eth1/11 +# + +- name: Merge users configurations + dellemc.enterprise_sonic.sonic_port_breakout: + config: + - name: 1/11 + mode: 1x100G + state: deleted + +# After state: +# ------------ +# +#do show interface breakout +#----------------------------------------------- +#Port Breakout Mode Status Interfaces +#----------------------------------------------- +#1/1 4x10G Completed Eth1/1/1 +# Eth1/1/2 +# Eth1/1/3 +# Eth1/1/4 +#1/11 Default Completed Ethernet40 + + +# Using deleted +# +# Before state: +# ------------- +# +#do show interface breakout +#----------------------------------------------- +#Port Breakout Mode Status Interfaces +#----------------------------------------------- +#1/1 4x10G Completed Eth1/1/1 +# Eth1/1/2 +# Eth1/1/3 +# Eth1/1/4 +#1/11 1x100G Completed Eth1/11 +# +- name: Merge users configurations + dellemc.enterprise_sonic.sonic_port_breakout: + config: + state: deleted + + +# After state: +# ------------ +# +#do show interface breakout +#----------------------------------------------- +#Port Breakout Mode Status Interfaces +#----------------------------------------------- +#1/1 Default Completed Ethernet0 +#1/11 Default Completed Ethernet40 + + +# Using merged +# +# Before state: +# ------------- +# +#do show interface breakout +#----------------------------------------------- +#Port Breakout Mode Status Interfaces +#----------------------------------------------- +#1/1 4x10G Completed Eth1/1/1 +# Eth1/1/2 +# Eth1/1/3 +# Eth1/1/4 +# +- name: Merge users configurations + dellemc.enterprise_sonic.sonic_port_breakout: + config: + - name: 1/11 + mode: 1x100G + state: merged + + +# After state: +# ------------ +# +#do show interface breakout +#----------------------------------------------- +#Port Breakout Mode Status Interfaces +#----------------------------------------------- +#1/1 4x10G Completed Eth1/1/1 +# Eth1/1/2 +# Eth1/1/3 +# Eth1/1/4 +#1/11 1x100G Completed Eth1/11 + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.port_breakout.port_breakout import Port_breakoutArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.port_breakout.port_breakout import Port_breakout + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Port_breakoutArgs.argument_spec, + supports_check_mode=True) + + result = Port_breakout(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py new file mode 100644 index 00000000..5a734e8b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py @@ -0,0 +1,423 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_prefix_lists +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_prefix_lists +version_added: "2.0.0" +author: Kerry Meyer (@kerry-meyer) +short_description: prefix list configuration handling for SONiC +description: + - This module provides configuration management for prefix list parameters on devices running SONiC. +options: + config: + description: + - Specifies a list of prefix set configuration dictionaries + type: list + elements: dict + suboptions: + name: + description: + - Name of a prefix set (a list of prefix entries) + type: str + required: true + afi: + description: + - Specifies the Address Family for addresses in the prefix list entries + type: str + choices: ["ipv4", "ipv6"] + default: "ipv4" + prefixes: + description: + - A list of prefix entries + type: list + elements: dict + suboptions: + sequence: + description: + - Precedence for this prefix entry (unique within the prefix list) + type: int + required: true + action: + description: + - Action to be taken for addresses matching this prefix entry + type: str + required: true + choices: ["permit", "deny"] + prefix: + description: + - IPv4 or IPv6 prefix in A.B.C.D/LEN or A:B::C:D/LEN format + type: str + required: true + ge: + description: Minimum prefix length to be matched + type: int + le: + description: Maximum prefix length to be matched + type: int + state: + description: + - Specifies the type of configuration update to be performed on the device. + - For "merged", merge specified attributes with existing configured attributes. + - For "deleted", delete the specified attributes from exiting configuration. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using "merged" state to create initial configuration +# +# Before state: +# ------------- +# +# sonic# show running-configuration ip prefix-list +# sonic# +# (No configuration present) +# +# ------------- +# +- name: Merge initial prefix-list configuration + dellemc.enterprise_sonic.sonic_prefix_lists: + config: + - name: pfx1 + afi: "ipv4" + prefixes: + - sequence: 10 + prefix: "1.2.3.4/24" + action: "permit" + ge: 26 + le: 30 + state: merged + +# After state: +# ------------ +# +# sonic# show running-configuration ip prefix-list +# ! +# ip prefix-list pfx1 seq 10 permit 1.2.3.4/24 ge 26 le 30 +# ------------ +# +# *************************************************************** +# Using "merged" state to update and add configuration +# +# Before state: +# ------------ +# +# sonic# show running-configuration ip prefix-list +# ! +# ip prefix-list pfx1 seq 10 permit 1.2.3.4/24 ge 26 le 30 +# +# sonic# show running-configuration ipv6 prefix-list +# sonic# +# (no IPv6 prefix-list configuration present) +# +# ------------ +# +- name: Merge additional prefix-list configuration + dellemc.enterprise_sonic.sonic_prefix_lists: + config: + - name: pfx1 + afi: "ipv4" + prefixes: + - sequence: 20 + action: "deny" + prefix: "1.2.3.12/26" + - sequence: 30 + action: "permit" + prefix: "7.8.9.0/24" + - name: pfx6 + afi: "ipv6" + prefixes: + - sequence: 25 + action: "permit" + prefix: "40::300/124" + state: merged + +# After state: +# ------------ +# +# sonic# show running-configuration ip prefix-list +# ! +# ip prefix-list pfx1 seq 10 permit 1.2.3.4/24 ge 26 le 30 +# ip prefix-list pfx1 seq 20 deny 1.2.3.12/26 +# ip prefix-list pfx1 seq 30 permit 7.8.9.0/24 +# +# sonic# show running-configuration ipv6 prefix-list +# ! +# ipv6 prefix-list pfx6 seq 25 permit 40::300/124 +# +# *************************************************************** +# Using "deleted" state to remove configuration +# +# Before state: +# ------------ +# +# sonic# show running-configuration ip prefix-list +# ! +# ip prefix-list pfx1 seq 10 permit 1.2.3.4/24 ge 26 le 30 +# ip prefix-list pfx1 seq 20 deny 1.2.3.12/26 +# ip prefix-list pfx1 seq 30 permit 7.8.9.0/24 +# +# sonic# show running-configuration ipv6 prefix-list +# ! +# ipv6 prefix-list pfx6 seq 25 permit 40::300/124 +# +# ------------ +# +- name: Delete selected prefix-list configuration + dellemc.enterprise_sonic.sonic_prefix_lists: + config: + - name: pfx1 + afi: "ipv4" + prefixes: + - sequence: 10 + prefix: "1.2.3.4/24" + action: "permit" + ge: 26 + le: 30 + - sequence: 20 + action: "deny" + prefix: "1.2.3.12/26" + - name: pfx6 + afi: "ipv6" + prefixes: + - sequence: 25 + action: "permit" + prefix: "40::300/124" + state: deleted + +# After state: +# ------------ +# +# sonic# show running-configuration ip prefix-list +# ! +# ip prefix-list pfx1 seq 30 permit 7.8.9.0/24 +# +# sonic# show running-configuration ipv6 prefix-list +# sonic# +# (no IPv6 prefix-list configuration present) +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + +# "before": [ +# { +# "afi": "ipv6", +# "name": "pf4", +# "prefixes": [ +# { +# "action": "permit", +# "ge": null, +# "le": null, +# "prefix": "50:60::/64", +# "sequence": 40 +# } +# ] +# }, +# { +# "afi": "ipv4", +# "name": "pf3", +# "prefixes": [ +# { +# "action": "deny", +# "ge": null, +# "le": 27, +# "prefix": "1.2.3.128/25", +# "sequence": 30 +# } +# ] +# }, +# { +# "afi": "ipv4", +# "name": "pf2", +# "prefixes": [ +# { +# "action": "permit", +# "ge": 27, +# "le": 29, +# "prefix": "10.20.30.128/25", +# "sequence": 50 +# }, +# { +# "action": "deny", +# "ge": 26, +# "le": null, +# "prefix": "10.20.30.0/24", +# "sequence": 20 +# } +# ] +# }, +# { +# "afi": "ipv4", +# "name": "pf1", +# "prefixes": [ +# { +# "action": "deny", +# "ge": 25, +# "le": 27, +# "prefix": "1.2.3.0/24", +# "sequence": 10 +# } +# ] +# } +# ] + +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + +# "after": [ +# { +# "afi": "ipv4", +# "name": "pf5", +# "prefixes": [ +# { +# "action": "permit", +# "ge": null, +# "le": null, +# "prefix": "15.25.35.0/24", +# "sequence": 15 +# } +# ] +# }, +# { +# "afi": "ipv4", +# "name": "pf1", +# "prefixes": [ +# { +# "action": "deny", +# "ge": 25, +# "le": 27, +# "prefix": "1.2.3.0/24", +# "sequence": 10 +# } +# ] +# }, +# { +# "afi": "ipv6", +# "name": "pf4", +# "prefixes": [ +# { +# "action": "permit", +# "ge": null, +# "le": null, +# "prefix": "50:60::/64", +# "sequence": 40 +# } +# ] +# }, +# { +# "afi": "ipv4", +# "name": "pf3", +# "prefixes": [ +# { +# "action": "deny", +# "ge": null, +# "le": 27, +# "prefix": "1.2.3.128/25", +# "sequence": 30 +# } +# ] +# }, +# { +# "afi": "ipv4", +# "name": "pf2", +# "prefixes": [ +# { +# "action": "permit", +# "ge": 27, +# "le": 29, +# "prefix": "10.20.30.128/25", +# "sequence": 50 +# }, +# { +# "action": "deny", +# "ge": 26, +# "le": null, +# "prefix": "10.20.30.0/24", +# "sequence": 20 +# } +# ] +# } +# ] + +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: + +# "commands": [ +# { +# "afi": "ipv4", +# "name": "pf5", +# "prefixes": [ +# { +# "action": "permit", +# "prefix": "15.25.35.0/24", +# "sequence": 15 +# } +# ], +# "state": "merged" +# } +# ], +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.prefix_lists.prefix_lists import Prefix_listsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.prefix_lists.prefix_lists import Prefix_lists + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Prefix_listsArgs.argument_spec, + supports_check_mode=True) + + result = Prefix_lists(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py new file mode 100644 index 00000000..1df4aff6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_radius_server +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_radius_server +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Niraimadaiselvam M (@niraimadaiselvamm) +short_description: Manage RADIUS server and its parameters +description: + - This module provides configuration management of radius server parameters on devices running Enterprise SONiC. +options: + config: + description: + - Specifies the radius server related configuration. + type: dict + suboptions: + auth_type: + description: + - Specifies the authentication type of the radius server. + type: str + choices: + - pap + - chap + - mschapv2 + default: pap + key: + description: + - Specifies the key of the radius server. + type: str + nas_ip: + description: + - Specifies the network access server of the radius server. + type: str + statistics: + description: + - Specifies the statistics flag of the radius server. + type: bool + timeout: + description: + - Specifies the timeout of the radius server. + type: int + retransmit: + description: + - Specifies the re-transmit value of the radius server. + type: int + servers: + description: + - Specifies the servers list of the radius server. + type: dict + suboptions: + host: + description: + - Specifies the host details of the radius servers list. + type: list + elements: dict + suboptions: + name: + description: + - Specifies the name of the radius server host. + type: str + auth_type: + description: + - Specifies the authentication type of the radius server host. + type: str + choices: + - pap + - chap + - mschapv2 + key: + description: + - Specifies the key of the radius server host. + type: str + priority: + description: + - Specifies the priority of the radius server host. + type: int + port: + description: + - Specifies the port of the radius server host. + type: int + timeout: + description: + - Specifies the timeout of the radius server host. + type: int + retransmit: + description: + - Specifies the retransmit of the radius server host. + type: int + source_interface: + description: + - Specifies the source interface of the radius server host. + type: str + vrf: + description: + - Specifies the vrf of the radius server host. + type: str + state: + description: + - Specifies the operation to be performed on the radius server configured on the device. + - In case of merged, the input mode configuration will be merged with the existing radius server configuration on the device. + - In case of deleted the existing radius server mode configuration will be removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#sonic(config)# do show radius-server +#--------------------------------------------------------- +#RADIUS Global Configuration +#--------------------------------------------------------- +#nas-ip-addr: 1.2.3.4 +#statistics : True +#timeout : 10 +#auth-type : chap +#key : chap +#retransmit : 3 +#-------------------------------------------------------------------------------- +#HOST AUTH-TYPE KEY AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI +#-------------------------------------------------------------------------------- +#localhost mschapv2 local 52 2 20 2 mgmt Ethernet12 +#myhost chap local 53 3 23 3 mgmt Ethernet24 +#--------------------------------------------------------- +#RADIUS Statistics +#--------------------------------------------------------- +# + +- name: Merge radius configurations + dellemc.enterprise_sonic.sonic_radius_server: + config: + auth_type: chap + nas_ip: 1.2.3.4 + statistics: true + timeout: 10 + servers: + host: + - name: localhost + state: deleted + +# After state: +# ------------ +#sonic(config)# do show radius-server +#--------------------------------------------------------- +#RADIUS Global Configuration +#--------------------------------------------------------- +#timeout : 5 +#auth-type : pap +#key : chap +#retransmit : 3 +#-------------------------------------------------------------------------------- +#HOST AUTH-TYPE KEY AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI +#-------------------------------------------------------------------------------- +#myhost chap local 53 3 23 3 mgmt Ethernet24 + + +# Using deleted +# +# Before state: +# ------------- +# +#sonic(config)# do show radius-server +#--------------------------------------------------------- +#RADIUS Global Configuration +#--------------------------------------------------------- +#nas-ip-addr: 1.2.3.4 +#statistics : True +#timeout : 10 +#auth-type : chap +#key : chap +#retransmit : 3 +#-------------------------------------------------------------------------------- +#HOST AUTH-TYPE KEY AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI +#-------------------------------------------------------------------------------- +#localhost mschapv2 local 52 2 20 2 mgmt Ethernet12 +#myhost chap local 53 3 23 3 mgmt Ethernet24 +#--------------------------------------------------------- +#RADIUS Statistics +#--------------------------------------------------------- +# +- name: Merge radius configurations + dellemc.enterprise_sonic.sonic_radius_server: + config: + state: deleted + +# After state: +# ------------ +#sonic(config)# do show radius-server +#--------------------------------------------------------- +#RADIUS Global Configuration +#--------------------------------------------------------- +#timeout : 5 +#auth-type : pap + + +# Using merged +# +# Before state: +# ------------- +# +#sonic(config)# do show radius-server +#--------------------------------------------------------- +#RADIUS Global Configuration +#--------------------------------------------------------- +# +- name: Merge radius configurations + dellemc.enterprise_sonic.sonic_radius_server: + config: + auth_type: chap + key: chap + nas_ip: 1.2.3.4 + statistics: true + timeout: 10 + retransmit: 3 + servers: + host: + - name: localhost + auth_type: mschapv2 + key: local + priority: 2 + port: 52 + retransmit: 2 + timeout: 20 + source_interface: Eth 12 + vrf: mgmt + state: merged + +# After state: +# ------------ +# +#sonic(config)# do show radius-server +#--------------------------------------------------------- +#RADIUS Global Configuration +#--------------------------------------------------------- +#nas-ip-addr: 1.2.3.4 +#statistics : True +#timeout : 10 +#auth-type : chap +#key : chap +#retransmit : 3 +#-------------------------------------------------------------------------------- +#HOST AUTH-TYPE KEY AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI +#-------------------------------------------------------------------------------- +#localhost mschapv2 local 52 2 20 2 mgmt Ethernet12 +#--------------------------------------------------------- +#RADIUS Statistics +#--------------------------------------------------------- + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.radius_server.radius_server import Radius_serverArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.radius_server.radius_server import Radius_server + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Radius_serverArgs.argument_spec, + supports_check_mode=True) + + result = Radius_server(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py new file mode 100644 index 00000000..7a528cdf --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_static_routes +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_static_routes +version_added: 2.0.0 +short_description: Manage static routes configuration on SONiC +description: + - This module provides configuration management of static routes for devices running SONiC +author: "Shade Talabi (@stalabi1)" +options: + config: + type: list + elements: dict + description: + - Manages 'static_routes' configurations + suboptions: + vrf_name: + required: True + type: str + description: + - Name of the configured VRF on the device. + static_list: + type: list + elements: dict + description: + - A list of 'static_routes' configurations. + suboptions: + prefix: + required: True + type: str + description: + - Destination prefix for the static route, either IPv4 or IPv6. + next_hops: + type: list + elements: dict + description: + - A list of next-hops to be utilised for the static route being specified. + suboptions: + index: + required: True + type: dict + description: + - An identifier utilised to uniquely reference the next-hop. + suboptions: + blackhole: + type: bool + default: False + description: + - Indicates that packets matching this route should be discarded. + interface: + type: str + description: + - The reference to a base interface. + nexthop_vrf: + type: str + description: + - Name of the next-hop network instance for leaked routes. + next_hop: + type: str + description: + - The next-hop that is to be used for the static route. + metric: + type: int + description: + - Specifies the preference of the next-hop entry when it is injected into the RIB. + track: + type: int + description: + - The IP SLA track ID for static route. + tag: + type: int + description: + - The tag value for the static route. + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ + +# Using merged +# +# Before State: +# ------------- +# +# sonic# show running-configuration | grep "ip route" +# (No "ip route" configuration present) + + - name: Merge static routes configurations + dellemc.enterprise_sonic.sonic_static_routes: + config: + - vrf_name: 'default' + static_list: + - prefix: '2.0.0.0/8' + next_hops: + - index: + interface: 'Ethernet4' + metric: 1 + tag: 2 + track: 3 + - index: + next_hop: '3.0.0.0' + metric: 2 + tag: 4 + track: 8 + - vrf_name: '{{vrf_1}}' + static_list: + - prefix: '3.0.0.0/8' + next_hops: + - index: + interface: 'eth0' + nexthop_vrf: '{{vrf_2}}' + next_hop: '4.0.0.0' + metric: 4 + tag: 5 + track: 6 + - index: + blackhole: True + metric: 10 + tag: 20 + track: 30 + state: merged + +# After State: +# ------------ +# +# sonic# show running-configuration | grep "ip route" +# ip route 2.0.0.0/8 3.0.0.0 tag 4 track 8 2 +# ip route 2.0.0.0/8 interface Ethernet4 tag 2 track 3 1 +# ip route vrf VrfReg1 3.0.0.0/8 4.0.0.0 interface Management 0 nexthop-vrf VrfReg2 tag 5 track 6 4 +# ip route vrf VrfREg1 3.0.0.0/8 blackhole tag 20 track 30 10 +# +# +# Modifying previous merge + + - name: Modify static routes configurations + dellemc.enterprise_sonic.sonic_static_routes: + config: + - vrf_name: '{{vrf_1}}' + static_list: + - prefix: '3.0.0.0/8' + next_hops: + - index: + blackhole: True + metric: 11 + tag: 22 + track: 33 + state: merged + +# After State: +# ------------ +# +# sonic# show running-configuration | grep "ip route" +# ip route 2.0.0.0/8 3.0.0.0 tag 4 track 8 2 +# ip route 2.0.0.0/8 interface Ethernet4 tag 2 track 3 1 +# ip route vrf VrfReg1 3.0.0.0/8 4.0.0.0 interface Management 0 nexthop-vrf VrfReg2 tag 5 track 6 4 +# ip route vrf VrfREg1 3.0.0.0/8 blackhole tag 22 track 33 11 + + +# Using deleted +# +# Before State: +# ------------- +# +# sonic# show running-configuration | grep "ip route" +# ip route 2.0.0.0/8 3.0.0.0 tag 4 track 8 2 +# ip route 2.0.0.0/8 interface Ethernet4 tag 2 track 3 1 +# ip route vrf VrfReg1 3.0.0.0/8 4.0.0.0 interface Management 0 nexthop-vrf VrfReg2 tag 5 track 6 4 +# ip route vrf VrfREg1 3.0.0.0/8 blackhole tag 22 track 33 11 + + - name: Delete static routes configurations + dellemc.enterprise_sonic.sonic_static_routes: + config: + - vrf_name: 'default' + static_list: + - prefix: '2.0.0.0/8' + next_hops: + - index: + interface: 'Ethernet4' + - vrf_name: '{{vrf_1}}' + state: deleted + +# After State: +# ------------ +# +# sonic# show running-configuration | grep "ip route" +# ip route 2.0.0.0/8 3.0.0.0 tag 4 track 8 2 + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.static_routes.static_routes import Static_routesArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.static_routes.static_routes import Static_routes + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Static_routesArgs.argument_spec, + supports_check_mode=True) + + result = Static_routes(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py new file mode 100644 index 00000000..efb285a1 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_system +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_system +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Abirami N (@abirami-n) +short_description: Configure system parameters +description: + - This module is used for configuration management of global system parameters on devices running Enterprise SONiC. +options: + config: + description: + - Specifies the system related configurations + type: dict + suboptions: + hostname: + description: + - Specifies the hostname of the SONiC device + type: str + interface_naming: + description: + - Specifies the type of interface-naming in device + type: str + choices: + - standard + - native + anycast_address: + description: + - Specifies different types of anycast address that can be configured on the device + type: dict + suboptions: + ipv4: + description: + - Enable or disable ipv4 anycast-address + type: bool + ipv6: + description: + - Enable or disable ipv6 anycast-address + type: bool + mac_address: + description: + - Specifies the mac anycast-address + type: str + state: + description: + - Specifies the operation to be performed on the system parameters configured on the device. + - In case of merged, the input configuration will be merged with the existing system configuration on the device. + - In case of deleted the existing system configuration will be removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +#! +#SONIC(config)#do show running-configuration +#! +#ip anycast-mac-address aa:bb:cc:dd:ee:ff +#ip anycast-address enable +#ipv6 anycast-address enable +#interface-naming standard + +- name: Merge provided configuration with device configuration + dellemc.enterprise_sonic.sonic_system: + config: + hostname: SONIC + interface_naming: standard + anycast_address: + ipv6: true + state: deleted + +# After state: +# ------------ +#! +#sonic(config)#do show running-configuration +#! +#ip anycast-mac-address aa:bb:cc:dd:ee:ff +#ip anycast-address enable + + +# Using deleted +# +# Before state: +# ------------- +#! +#SONIC(config)#do show running-configuration +#! +#ip anycast-mac-address aa:bb:cc:dd:ee:ff +#ip anycast-address enable +#ipv6 anycast-address enable +#interface-naming standard + +- name: Delete all system related configs in device configuration + dellemc.enterprise_sonic.sonic_system: + config: + state: deleted + +# After state: +# ------------ +#! +#sonic(config)#do show running-configuration +#! + + +# Using merged +# +# Before state: +# ------------- +#! +#sonic(config)#do show running-configuration +#! + +- name: Merge provided configuration with device configuration + dellemc.enterprise_sonic.sonic_system: + config: + hostname: SONIC + interface_naming: standard + anycast_address: + ipv6: true + ipv4: true + mac_address: aa:bb:cc:dd:ee:ff + state: merged + +# After state: +# ------------ +#! +#SONIC(config)#do show running-configuration +#! +#ip anycast-mac-address aa:bb:cc:dd:ee:ff +#ip anycast-address enable +#ipv6 anycast-address enable +#interface-naming standard + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.system.system import SystemArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.system.system import System + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=SystemArgs.argument_spec, + supports_check_mode=True) + + result = System(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py new file mode 100644 index 00000000..3295e11b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_tacacs_server +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_tacacs_server +version_added: 1.1.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Niraimadaiselvam M (@niraimadaiselvamm) +short_description: Manage TACACS server and its parameters +description: + - This module provides configuration management of tacacs server parameters on devices running Enterprise SONiC. +options: + config: + description: + - Specifies the tacacs server related configuration. + type: dict + suboptions: + auth_type: + description: + - Specifies the authentication type of the tacacs server. + type: str + choices: + - pap + - chap + - mschap + - login + default: pap + key: + description: + - Specifies the key of the tacacs server. + type: str + timeout: + description: + - Specifies the timeout of the tacacs server. + type: int + source_interface: + description: + - Specifies the source interface of the tacacs server. + type: str + servers: + description: + - Specifies the servers list of the tacacs server. + type: dict + suboptions: + host: + description: + - Specifies the host details of the tacacs servers list. + type: list + elements: dict + suboptions: + name: + description: + - Specifies the name of the tacacs server host. + type: str + auth_type: + description: + - Specifies the authentication type of the tacacs server host. + type: str + choices: + - pap + - chap + - mschap + - login + default: pap + key: + description: + - Specifies the key of the tacacs server host. + type: str + priority: + description: + - Specifies the priority of the tacacs server host. + type: int + default: 1 + port: + description: + - Specifies the port of the tacacs server host. + type: int + default: 49 + timeout: + description: + - Specifies the timeout of the tacacs server host. + type: int + default: 5 + vrf: + description: + - Specifies the vrf of the tacacs server host. + type: str + default: default + state: + description: + - Specifies the operation to be performed on the tacacs server configured on the device. + - In case of merged, the input mode configuration will be merged with the existing tacacs server configuration on the device. + - In case of deleted the existing tacacs server mode configuration will be removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +# do show tacacs-server +#--------------------------------------------------------- +#TACACS Global Configuration +#--------------------------------------------------------- +#source-interface : Ethernet12 +#timeout : 10 +#auth-type : login +#key : login +#------------------------------------------------------------------------------------------------ +#HOST AUTH-TYPE KEY PORT PRIORITY TIMEOUT VRF +#------------------------------------------------------------------------------------------------ +#1.2.3.4 pap ***** 50 2 10 mgmt +#localhost pap 49 1 5 default +# + +- name: Merge tacacs configurations + dellemc.enterprise_sonic.sonic_tacacs_server: + config: + auth_type: login + key: login + source_interface: Ethernet 12 + timeout: 10 + servers: + host: + - name: 1.2.3.4 + state: deleted + +# After state: +# ------------ +# +#do show tacacs-server +#--------------------------------------------------------- +#TACACS Global Configuration +#--------------------------------------------------------- +#timeout : 5 +#auth-type : pap +#------------------------------------------------------------------------------------------------ +#HOST AUTH-TYPE KEY PORT PRIORITY TIMEOUT VRF +#------------------------------------------------------------------------------------------------ +#localhost pap 49 1 5 default + + +# Using deleted +# +# Before state: +# ------------- +# +# do show tacacs-server +#--------------------------------------------------------- +#TACACS Global Configuration +#--------------------------------------------------------- +#source-interface : Ethernet12 +#timeout : 10 +#auth-type : login +#key : login +#------------------------------------------------------------------------------------------------ +#HOST AUTH-TYPE KEY PORT PRIORITY TIMEOUT VRF +#------------------------------------------------------------------------------------------------ +#1.2.3.4 pap ***** 50 2 10 mgmt +#localhost pap 49 1 5 default +# + +- name: Merge tacacs configurations + dellemc.enterprise_sonic.sonic_tacacs_server: + config: + state: deleted + +# After state: +# ------------ +# +#do show tacacs-server +#--------------------------------------------------------- +#TACACS Global Configuration +#--------------------------------------------------------- +#timeout : 5 +#auth-type : pap + + +# Using merged +# +# Before state: +# ------------- +# +#sonic(config)# do show tacacs-server +#--------------------------------------------------------- +#TACACS Global Configuration +#--------------------------------------------------------- +# +- name: Merge tacacs configurations + dellemc.enterprise_sonic.sonic_tacacs_server: + config: + auth_type: pap + key: pap + source_interface: Ethernet 12 + timeout: 10 + servers: + host: + - name: 1.2.3.4 + auth_type: pap + key: 1234 + state: merged + +# After state: +# ------------ +# +#sonic(config)# do show tacacs-server +#--------------------------------------------------------- +#TACACS Global Configuration +#--------------------------------------------------------- +#source-interface : Ethernet12 +#timeout : 10 +#auth-type : pap +#key : pap +#------------------------------------------------------------------------------------------------ +#HOST AUTH-TYPE KEY PORT PRIORITY TIMEOUT VRF +#------------------------------------------------------------------------------------------------ +#1.2.3.4 pap 1234 49 1 5 default + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.tacacs_server.tacacs_server import Tacacs_serverArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.tacacs_server.tacacs_server import Tacacs_server + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=Tacacs_serverArgs.argument_spec, + supports_check_mode=True) + + result = Tacacs_server(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py new file mode 100644 index 00000000..7f0855a9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2019 Red Hat +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_users +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_users +version_added: 1.1.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Niraimadaiselvam M (@niraimadaiselvamm) +short_description: Manage users and its parameters +description: + - This module provides configuration management of users parameters on devices running Enterprise SONiC. +options: + config: + description: + - Specifies the users related configuration. + type: list + elements: dict + suboptions: + name: + description: + - Specifies the name of the user. + type: str + required: true + role: + description: + - Specifies the role of the user. + type: str + choices: + - admin + - operator + password: + description: + - Specifies the password of the user. + type: str + update_password: + description: + - Specifies the update password flag. + - In case of always, password will be updated every time. + - In case of on_create, password will be updated only when user is created. + type: str + choices: + - always + - on_create + default: always + state: + description: + - Specifies the operation to be performed on the users configured on the device. + - In case of merged, the input configuration will be merged with the existing users configuration on the device. + - In case of deleted the existing users configuration will be removed from the device. + default: merged + choices: ['merged', 'deleted'] + type: str +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#do show running-configuration +#! +#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin +#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin +#username sysoperator password $6$s1eTVjcX4Udi69gY$zlYgqwoKRGC6hGL5iKDImN/4BL7LXKNsx9e5PoSsBLs6C80ShYj2LoJAUZ58ia2WNjcHXhTD1p8eU9wyRTCiE0 role operator +# +- name: Merge users configurations + dellemc.enterprise_sonic.sonic_users: + config: + - name: sysoperator + state: deleted +# After state: +# ------------ +# +#do show running-configuration +#! +#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin +#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin + + +# Using deleted +# +# Before state: +# ------------- +# +#do show running-configuration +#! +#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin +#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin +#username sysoperator password $6$s1eTVjcX4Udi69gY$zlYgqwoKRGC6hGL5iKDImN/4BL7LXKNsx9e5PoSsBLs6C80ShYj2LoJAUZ58ia2WNjcHXhTD1p8eU9wyRTCiE0 role operator +# +- name: Merge users configurations + dellemc.enterprise_sonic.sonic_users: + config: + state: deleted + +# After state: +# ------------ +# +#do show running-configuration +#! +#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin + + +# Using merged +# +# Before state: +# ------------- +# +#do show running-configuration +#! +#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin +# +- name: Merge users configurations + dellemc.enterprise_sonic.sonic_users: + config: + - name: sysadmin + role: admin + password: admin + update_password: always + - name: sysoperator + role: operator + password: operator + update_password: always + state: merged + +# After state: +# ------------ +#! +#do show running-configuration +#! +#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin +#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin +#username sysoperator password $6$s1eTVjcX4Udi69gY$zlYgqwoKRGC6hGL5iKDImN/4BL7LXKNsx9e5PoSsBLs6C80ShYj2LoJAUZ58ia2WNjcHXhTD1p8eU9wyRTCiE0 role operator + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned will always be in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.users.users import UsersArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.users.users import Users + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=UsersArgs.argument_spec, + supports_check_mode=True) + + result = Users(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py new file mode 100644 index 00000000..cfd536c7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_vlans +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_vlans +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +author: Mohamed Javeed (@javeedf) +short_description: Manage VLAN and its parameters +description: + - This module provides configuration management of VLANs parameters + on devices running Enterprise SONiC Distribution by Dell Technologies. +options: + config: + description: A dictionary of VLAN options. + type: list + elements: dict + suboptions: + vlan_id: + description: + - ID of the VLAN + - Range is 1 to 4094 + type: int + required: true + description: + description: + - Description about the VLAN. + type: str + state: + description: + - The state that the configuration should be left in. + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using merged + +# Before state: +# ------------- +# +#sonic# show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#30 Inactive +# +#sonic# +# + + +- name: Merges given VLAN attributes with the device configuration + dellemc.enterprise_sonic.sonic_vlans: + config: + - vlan_id: 10 + description: "Internal" + state: merged + +# After state: +# ------------ +# +#sonic# show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#30 Inactive +# +#sonic# +# +#sonic# show interface Vlan 10 +#Description: Internal +#Vlan10 is up +#Mode of IPV4 address assignment: not-set +#Mode of IPV6 address assignment: not-set +#IP MTU 6000 bytes +#sonic# +# + + +# Using deleted + +# Before state: +# ------------- +# +#sonic# show interface Vlan 70 +#Description: Internal +#Vlan70 is up +#Mode of IPV4 address assignment: not-set +#Mode of IPV6 address assignment: not-set +#IP MTU 6000 bytes + +- name: Deletes attributes of the given VLANs + dellemc.enterprise_sonic.sonic_vlans: + config: + - vlan_id: 70 + description: "Internal" + state: deleted + +# After state: +# ------------ +# +#sonic# show interface Vlan 70 +#Vlan70 is up +#Mode of IPV4 address assignment: not-set +#Mode of IPV6 address assignment: not-set +#IP MTU 6000 bytes + +# Before state: +# ------------- +# +#sonic# show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#20 Inactive +# +#sonic# + +- name: Deletes attributes of the given VLANs + dellemc.enterprise_sonic.sonic_vlans: + config: + - vlan_id: 20 + state: deleted + +# After state: +# ------------ +# +#sonic# show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +# +#sonic# + + +# Using deleted + +# Before state: +# ------------- +# +#sonic# show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +#10 Inactive +#20 Inactive +#30 Inactive +# +#sonic# + +- name: Deletes all the VLANs on the switch + dellemc.enterprise_sonic.sonic_vlans: + config: + state: deleted + +# After state: +# ------------ +# +#sonic# show Vlan +#Q: A - Access (Untagged), T - Tagged +#NUM Status Q Ports +# +#sonic# + + +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration that is returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vlans.vlans import VlansArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vlans.vlans import Vlans + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=VlansArgs.argument_spec, + supports_check_mode=True) + + result = Vlans(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py new file mode 100644 index 00000000..4c881aee --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_vrfs +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_vrfs +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage VRFs and associate VRFs to interfaces such as, Eth, LAG, VLAN, and loopback +description: Manages VRF and VRF interface attributes in Enterprise SONiC Distribution by Dell Technologies. +author: Abirami N (@abirami-n) +options: + config: + description: A list of VRF configurations. + type: list + elements: dict + suboptions: + name: + type: str + description: The name of the VRF interface. + required: true + members: + type: dict + description: Holds a dictionary mapping of list of interfaces linked to a VRF interface. + suboptions: + interfaces: + type: list + elements: dict + description: List of interface names that are linked to a specific VRF interface. + suboptions: + name: + type: str + description: The name of the physical interface. + state: + description: "The state of the configuration after module completion." + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +#show ip vrf +#VRF-NAME INTERFACES +#---------------------------------------------------------------- +#Vrfcheck1 +#Vrfcheck2 +#Vrfcheck3 Eth1/3 +# Eth1/14 +# Eth1/16 +# Eth1/17 +#Vrfcheck4 Eth1/5 +# Eth1/6 +# +- name: Configuring vrf deleted state + dellemc.enterprise_sonic.sonic_vrfs: + config: + - name: Vrfcheck4 + members: + interfaces: + - name: Eth1/6 + - name: Vrfcheck3 + members: + interfaces: + - name: Eth1/3 + - name: Eth1/14 + state: deleted +# +# After state: +# ------------ +# +#show ip vrf +#VRF-NAME INTERFACES +#---------------------------------------------------------------- +#Vrfcheck1 +#Vrfcheck2 +#Vrfcheck3 Eth1/16 +# Eth1/17 +#Vrfcheck4 Eth1/5 +# +# +# Using merged +# +# Before state: +# ------------- +# +#show ip vrf +#VRF-NAME INTERFACES +#---------------------------------------------------------------- +#Vrfcheck1 +#Vrfcheck2 +#Vrfcheck3 Eth1/16 +# Eth1/17 +#Vrfcheck4 +# +- name: Configuring vrf merged state + dellemc.enterprise_sonic.sonic_vrfs: + config: + - name: Vrfcheck4 + members: + interfaces: + - name: Eth1/5 + - name: Eth1/6 + - name: Vrfcheck3 + members: + interfaces: + - name: Eth1/3 + - name: Eth1/14 + state: merged +# +# After state: +# ------------ +# +#show ip vrf +#VRF-NAME INTERFACES +#---------------------------------------------------------------- +#Vrfcheck1 +#Vrfcheck2 +#Vrfcheck3 Eth1/3 +# Eth1/14 +# Eth1/16 +# Eth1/17 +#Vrfcheck4 Eth1/5 +# Eth1/6 +# +""" +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vrfs.vrfs import VrfsArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vrfs.vrfs import Vrfs + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=VrfsArgs.argument_spec, + supports_check_mode=True) + + result = Vrfs(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py new file mode 100644 index 00000000..e6613ba2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +############################################# +# WARNING # +############################################# +# +# This file is auto generated by the resource +# module builder playbook. +# +# Do not edit this file manually. +# +# Changes to this file will be over written +# by the resource module builder. +# +# Changes should be made in the model used to +# generate this file or in the resource module +# builder template. +# +############################################# + +""" +The module file for sonic_vxlans +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: sonic_vxlans +version_added: 1.0.0 +notes: +- Tested against Enterprise SONiC Distribution by Dell Technologies. +- Supports C(check_mode). +short_description: Manage VxLAN EVPN and its parameters +description: 'Manages interface attributes of Enterprise SONiC interfaces.' +author: Niraimadaiselvam M (@niraimadaiselvamm) +options: + config: + description: + - A list of VxLAN configurations. + - source_ip and evpn_nvo are required together. + type: list + elements: dict + suboptions: + name: + type: str + description: 'The name of the VxLAN.' + required: true + evpn_nvo: + type: str + description: 'EVPN nvo name' + source_ip: + description: 'The source IP address of the VTEP.' + type: str + primary_ip: + description: 'The vtep mclag primary ip address for this node' + type: str + vlan_map: + description: 'The list of VNI map of VLAN.' + type: list + elements: dict + suboptions: + vni: + type: int + description: 'Specifies the VNI ID.' + required: true + vlan: + type: int + description: 'VLAN ID for VNI VLAN map.' + vrf_map: + description: 'list of VNI map of VRF.' + type: list + elements: dict + suboptions: + vni: + type: int + description: 'Specifies the VNI ID.' + required: true + vrf: + type: str + description: 'VRF name for VNI VRF map.' + state: + description: 'The state of the configuration after module completion.' + type: str + choices: + - merged + - deleted + default: merged +""" +EXAMPLES = """ +# Using deleted +# +# Before state: +# ------------- +# +# do show running-configuration +# +#interface vxlan vteptest1 +# source-ip 1.1.1.1 +# primary-ip 2.2.2.2 +# map vni 101 vlan 11 +# map vni 102 vlan 12 +# map vni 101 vrf Vrfcheck1 +# map vni 102 vrf Vrfcheck2 +#! +# +- name: "Test vxlans deleted state 01" + dellemc.enterprise_sonic.sonic_vxlans: + config: + - name: vteptest1 + source_ip: 1.1.1.1 + vlan_map: + - vni: 101 + vlan: 11 + vrf_map: + - vni: 101 + vrf: Vrfcheck1 + state: deleted +# +# After state: +# ------------ +# +# do show running-configuration +# +#interface vxlan vteptest1 +# source-ip 1.1.1.1 +# map vni 102 vlan 12 +# map vni 102 vrf Vrfcheck2 +#! +# +# Using deleted +# +# Before state: +# ------------- +# +# do show running-configuration +# +#interface vxlan vteptest1 +# source-ip 1.1.1.1 +# map vni 102 vlan 12 +# map vni 102 vrf Vrfcheck2 +#! +# +- name: "Test vxlans deleted state 02" + dellemc.enterprise_sonic.sonic_vxlans: + config: + state: deleted +# +# After state: +# ------------ +# +# do show running-configuration +# +#! +# +# Using merged +# +# Before state: +# ------------- +# +# do show running-configuration +# +#! +# +- name: "Test vxlans merged state 01" + dellemc.enterprise_sonic.sonic_vxlans: + config: + - name: vteptest1 + source_ip: 1.1.1.1 + primary_ip: 2.2.2.2 + evpn_nvo_name: nvo1 + vlan_map: + - vni: 101 + vlan: 11 + - vni: 102 + vlan: 12 + vrf_map: + - vni: 101 + vrf: Vrfcheck1 + - vni: 102 + vrf: Vrfcheck2 + state: merged +# +# After state: +# ------------ +# +# do show running-configuration +# +#interface vxlan vteptest1 +# source-ip 1.1.1.1 +# primary-ip 2.2.2.2 +# map vni 101 vlan 11 +# map vni 102 vlan 12 +# map vni 101 vrf Vrfcheck1 +# map vni 102 vrf Vrfcheck2 +#! +# """ +RETURN = """ +before: + description: The configuration prior to the model invocation. + returned: always + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +after: + description: The resulting configuration model invocation. + returned: when changed + type: list + sample: > + The configuration returned is always in the same format + of the parameters above. +commands: + description: The set of commands that are pushed to the remote device. + returned: always + type: list + sample: ['command 1', 'command 2', 'command 3'] +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vxlans.vxlans import VxlansArgs +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vxlans.vxlans import Vxlans + + +def main(): + """ + Main entry point for module execution + + :returns: the result form module invocation + """ + module = AnsibleModule(argument_spec=VxlansArgs.argument_spec, + supports_check_mode=True) + + result = Vxlans(module).execute_module() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/terminal/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/terminal/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/terminal/sonic.py b/ansible_collections/dellemc/enterprise_sonic/plugins/terminal/sonic.py new file mode 100644 index 00000000..20665736 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/terminal/sonic.py @@ -0,0 +1,73 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Copyright (c) 2020 Dell Inc. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.terminal import TerminalBase + +DOCUMENTATION = """ +short_description: Terminal plugin module for sonic CLI modules +version_added: 1.0.0 +""" + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:#) ?$"), + re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"), + re.compile(br"\$ ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"Syntax error:"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found", re.I), + re.compile(br"'[^']' +returned error code: ?\d+"), + ] + + def on_open_shell(self): + try: + if self._get_prompt().endswith(b'$ '): + self._exec_cli_command(b'sonic-cli') + self._exec_cli_command(b'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to open sonic cli') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if prompt.endswith(b'#'): + self._exec_cli_command(b'exit') diff --git a/ansible_collections/dellemc/enterprise_sonic/rebuild.sh b/ansible_collections/dellemc/enterprise_sonic/rebuild.sh new file mode 100755 index 00000000..0e17e52b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/rebuild.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +namespace=$(grep -w "namespace" galaxy.yml | awk '{print $2}') +name=$(grep -w "name" galaxy.yml | awk '{print $2}') +version=$(grep -w "version" galaxy.yml | awk '{print $2}') +collection_file="$namespace-$name-$version.tar.gz" +#echo "$collection_file" + +rm -f /root/ansible_log.log +rm -rf /root/.ansible/collections/ansible_collections/dellemc/enterprise_sonic +rm "$collection_file" +ansible-galaxy collection build + +ansible-galaxy collection install "$collection_file" --force #-with-deps + +# ansible-playbook -i playbooks/common_examples/hosts playbooks/common_examples/sonic_l3_interfaces.yaml -vvvv +# ansible-playbook -i playbooks/common_examples/hosts playbooks/common_examples/sonic_l3_interfaces_config.yaml -vvvv +# ansible-playbook -i playbooks/common_examples/hosts playbooks/common_examples/sonic_l3_interfaces_test.yaml -vvvv +# ansible-playbook -i playbooks/common_examples/hosts -vvvv playbooks/common_examples/sonic_bgp_extcommunities.yaml -vvvv +# ansible-playbook -i playbooks/common_examples/hosts -vvvv playbooks/common_examples/sonic_bgp_extcommunities_config.yaml -vvv +# ansible-playbook -i playbooks/common_examples/hosts -vvvv playbooks/common_examples/test.yml -vvvv +# ansible-playbook -i playbooks/common_examples/hosts -vvvv playbooks/common_examples/sonic_bgp_as_paths_config.yaml -vvvv diff --git a/ansible_collections/dellemc/enterprise_sonic/requirements.txt b/ansible_collections/dellemc/enterprise_sonic/requirements.txt new file mode 100644 index 00000000..d7d0f2e2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/requirements.txt @@ -0,0 +1,2 @@ +paramiko>=2.7 +jinja2>=2.8 \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts b/ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts new file mode 100644 index 00000000..b8ec3e04 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts @@ -0,0 +1,13 @@ +sonic1 ansible_host=100.94.81.17 ansible_user=admin ansible_password=admin +sonic2 ansible_host=100.94.81.19 ansible_user=admin ansible_password=admin +#sonic2 ansible_user=admin ansible_password=admin + +[datacenter] +sonic1 +sonic2 + +[datacenter:vars] +ansible_network_os=dellemc.enterprise_sonic.sonic +ansible_python_interpreter=/usr/bin/python3 +ansible_httpapi_use_ssl=true +ansible_httpapi_validate_certs=false diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/image-upgrade.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/image-upgrade.yaml new file mode 100644 index 00000000..0891c1e6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/image-upgrade.yaml @@ -0,0 +1,31 @@ +--- +- name: "Test SONiC CLI" + hosts: datacenter + gather_facts: no + connection: ssh + vars: + build_number: 120 + tasks: + - name: Download Image from build server + get_url: + #url: "http://10.14.1.208/work/sonic_archive1/dell_sonic_3.1.x_share/{{ build_number }}/sonic-broadcom.bin" + #url: http://devopsweb.force10networks.com/tftpboot/SONIC/dell_sonic_3.1.x_share/last_good/sonic-broadcom.bin + url: http://10.14.1.69/tftpboot/SONIC/dell_sonic_3.1.x_share/{{ build_number }}/sonic-broadcom.bin + dest: /tmp/sonic-broadcom-{{ build_number }}.bin + mode: '0777' + - name: Install the downloaded image + become: true + command: sonic_installer install /tmp/sonic-broadcom-{{ build_number }}.bin -y + register: output + - name: wait for 3 seconds + pause: + seconds: 3 + - name: Unconditionally reboot the machine with all defaults + become: true + reboot: + - name: "Wait for system to get ready" + command: "show system status" + register: result + until: result.stdout.find("System is ready") >= 0 + retries: 15 + delay: 15 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml new file mode 100644 index 00000000..93dd8544 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml @@ -0,0 +1,66 @@ +--- +idempotnet_condition: "{{ 'Passed' if (idempotent_task_output.failed == false and + idempotent_task_output.commands == empty and + idempotent_task_output.changed == false) + else 'Failed' + }}" +action_condition: "{{ 'Passed' if (action_task_output.failed == false and + action_task_output.commands != empty and + action_task_output.changed == true) + else 'Failed' + }}" +cli_test_condition: "{{ 'Passed' if(cli_tests_output.failed == false and + cli_tests_output.changed == false + ) + else 'Failed' + }}" + +single_run_condition: "{{ 'Passed' if(single_run_task_output.failed == false and + single_run_task_output.commands != empty and + single_run_task_output.changed == true + ) + else 'Failed' + }}" + +single_run_idem_condition: "{{ 'Passed' if (single_run_task_output.failed == false and + single_run_task_output.commands == empty and + single_run_task_output.changed == false) + else 'Failed' + }}" + +REPORT_DIR: "/var/www/html/ansible/regression" +empty: [] + +module_name1: debug + +std_name: STANDARD +native_name: NATIVE + +interface_mode: STANDARD + +default_interface_cli_std: default interface range Eth 1/5-1/10 +default_interface_cli_native: default interface range Ethernet20-40 + +default_interface_cli: + - "{{ default_interface_cli_std if std_name in interface_mode else default_interface_cli_native }}" + +native_eth1: Ethernet20 +native_eth2: Ethernet24 +native_eth3: Ethernet28 +native_eth4: Ethernet32 +native_eth5: Ethernet36 +native_eth6: Ethernet40 + +std_eth1: Eth1/5 +std_eth2: Eth1/6 +std_eth3: Eth1/7 +std_eth4: Eth1/8 +std_eth5: Eth1/9 +std_eth6: Eth1/10 + +interface1: "{{ std_eth1 if std_name in interface_mode else native_eth1 }}" +interface2: "{{ std_eth2 if std_name in interface_mode else native_eth2 }}" +interface3: "{{ std_eth3 if std_name in interface_mode else native_eth3 }}" +interface4: "{{ std_eth4 if std_name in interface_mode else native_eth4 }}" +interface5: "{{ std_eth5 if std_name in interface_mode else native_eth5 }}" +interface6: "{{ std_eth6 if std_name in interface_mode else native_eth6 }}" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/meta/main.yaml new file mode 100644 index 00000000..a84afac6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/meta/main.yaml @@ -0,0 +1,3 @@ +--- +collections: + - dellemc.enterprise_sonic \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/action.facts.report.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/action.facts.report.yaml new file mode 100644 index 00000000..8c8ee5e5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/action.facts.report.yaml @@ -0,0 +1,10 @@ +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.1': { + 'status': action_condition, + 'module_stderr': action_task_output.module_stderr | default(action_task_output.msg | default('No Error')), + 'before': action_task_output.before | default('Not defined'), + 'after': action_task_output.after | default('Not defined'), + 'commands': action_task_output.commands | default('Not defined'), + 'configs': item.input | default('Not defined'), + }}}, recursive=True) }}" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.contains.test.facts.report.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.contains.test.facts.report.yaml new file mode 100644 index 00000000..66e263e6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.contains.test.facts.report.yaml @@ -0,0 +1,11 @@ +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.3': { + 'status': cli_contains_condition, + 'module_stderr': cli_tests_output.module_stderr | default(cli_tests_output.msg | default('No Error')), + 'commands': cli_tests_output.commands | default('Not defined'), + 'configs': item.input | default('Not defined'), + 'msg': cli_tests_output.msg | default('Not defined'), + }}}, recursive=True) }}" + # no_log: true + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.test.facts.report.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.test.facts.report.yaml new file mode 100644 index 00000000..b8165d1b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli.test.facts.report.yaml @@ -0,0 +1,11 @@ +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.3': { + 'status': cli_test_condition, + 'module_stderr': cli_tests_output.module_stderr | default(cli_tests_output.msg | default('No Error')), + 'commands': cli_tests_output.commands | default('Not defined'), + 'configs': item.input | default('Not defined'), + 'msg': cli_tests_output.msg | default('Not defined'), + }}}, recursive=True) }}" + # no_log: true + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli_tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli_tasks_template.yaml new file mode 100644 index 00000000..4c5fa8e7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/cli_tasks_template.yaml @@ -0,0 +1,14 @@ +- debug: msg="{{ base_cfg_path + item.name }}.cfg" + +- name: "Push CLI for validation" + vars: + ansible_connection: network_cli + sonic_config: + src: "{{ base_cfg_path + item.name }}.cfg" + register: cli_tests_output + ignore_errors: yes + +- debug: var=cli_tests_output +- import_role: + name: common + tasks_from: cli.test.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/idempotent.facts.report.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/idempotent.facts.report.yaml new file mode 100644 index 00000000..adeec696 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/idempotent.facts.report.yaml @@ -0,0 +1,12 @@ +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.2': { + 'status': idempotnet_condition, + 'module_stderr': idempotent_task_output.module_stderr | default(idempotent_task_output.msg | default('No Error')), + 'before': idempotent_task_output.before | default('Not defined'), + 'after': idempotent_task_output.after | default('Not defined'), + 'commands': idempotent_task_output.commands | default('Not defined'), + 'configs': item.input | default('Not defined'), + 'msg': idempotent_task_output.msg | default('Not defined'), + }}}, recursive=True) }}" + # no_log: true \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/main.yml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/main.yml @@ -0,0 +1 @@ + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/single.run.facts.report.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/single.run.facts.report.yaml new file mode 100644 index 00000000..e950b010 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/tasks/single.run.facts.report.yaml @@ -0,0 +1,10 @@ +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.1': { + 'status': single_run_task_status, + 'module_stderr': single_run_task_output.module_stderr | default(single_run_task_output.msg | default('No Error')), + 'before': single_run_task_output.before | default('Not defined'), + 'after': single_run_task_output.after | default('Not defined'), + 'commands': single_run_task_output.commands | default('Not defined'), + 'configs': item.input | default('Not defined'), + }}}, recursive=True) }}" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template.j2 b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template.j2 new file mode 100644 index 00000000..9ad1384b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template.j2 @@ -0,0 +1,14 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_{{module_name}}: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- debug: var=action_task_output + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.1': action_condition }}, recursive=True) }}" + #no_log: true + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template1.j2 b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template1.j2 new file mode 100644 index 00000000..1afb4859 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/templates/task_template1.j2 @@ -0,0 +1,14 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_{{module_name}}: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- debug: var=action_task_output + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.1': action_condition }}, recursive=True) }}" + #no_log: true + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml new file mode 100644 index 00000000..291f615e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml @@ -0,0 +1,54 @@ +--- +ansible_connection: httpapi +module_name: aaa +tests: + - name: test_case_01 + description: aaa properties + state: merged + input: + authentication: + data: + fail_through: true + group: tacacs+ + local: true + + - name: test_case_02 + description: Update created aaa properties + state: merged + input: + authentication: + data: + fail_through: false + + - name: test_case_03 + description: Update aaa properties - change group + state: merged + input: + authentication: + data: + fail_through: true + group: radius + local: true + + - name: test_case_04 + description: Delete aaa properties + state: deleted + input: + authentication: + data: + group: radius + + - name: test_case_05 + description: aaa properties + state: merged + input: + authentication: + data: + fail_through: true + group: radius + local: true + +test_delete_all: + - name: del_all_test_case_01 + description: Delete aaa properties + state: deleted diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/meta/main.yaml new file mode 100644 index 00000000..d0ceaf6f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/main.yml new file mode 100644 index 00000000..fdcaa9a4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/main.yml @@ -0,0 +1,17 @@ +- debug: msg="sonic_aaa Test started ..." + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "test_delete_all {{ module_name }} stated ..." + include_tasks: tasks_template_del.yaml + loop: "{{ test_delete_all }}" + when: test_delete_all is defined + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/preparation_tests.yaml new file mode 100644 index 00000000..e8a964f3 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes old radius server configurations + sonic_aaa: + config: {} + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template.yaml new file mode 100644 index 00000000..3b9f6b98 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_aaa: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_aaa: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template_del.yaml new file mode 100644 index 00000000..b50cc26c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/tasks/tasks_template_del.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_aaa: + state: "{{ item.state }}" + config: + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_aaa: + state: "{{ item.state }}" + config: + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/defaults/main.yml new file mode 100644 index 00000000..2e553a03 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/defaults/main.yml @@ -0,0 +1,6 @@ +--- +ansible_connection: httpapi + +preparations_tests: + init_prefix: + - "no ip prefix-list p1" \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/invalid.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/invalid.yaml new file mode 100644 index 00000000..8e7f1513 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/invalid.yaml @@ -0,0 +1,28 @@ +--- +- name: "Test sonic_api with invalid payload" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Ethernet64/config/description + method: "PATCH" + status_code: 204 + body: {"openconfig-interfaces:descriptio": "hi "} + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- name: "Test sonic_api with invalid url" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Ethernet64/config/ + method: "PATCH" + status_code: 204 + body: {"openconfig-interfaces:description": "hi "} + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/main.yaml new file mode 100644 index 00000000..8e9893d8 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/main.yaml @@ -0,0 +1,10 @@ +--- +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- include_tasks: test_get.yaml +- include_tasks: test_patch.yaml +- include_tasks: test_post.yaml +- include_tasks: test_put.yaml +- include_tasks: test_delete.yaml +- include_tasks: invalid.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/patch.txt b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/patch.txt new file mode 100644 index 00000000..cacbc086 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/patch.txt @@ -0,0 +1,8 @@ +{"openconfig-if-ip:ipv4": { + "addresses": { + "address": [ + { + "ip": "1.1.1.1", + "config": {"ip": "1.1.1.1", "prefix-length": 24} + }]}} + } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/preparation_tests.yaml new file mode 100644 index 00000000..a9be2b6d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/preparation_tests.yaml @@ -0,0 +1,6 @@ +- name: "remove prefix" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_prefix }}" + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_delete.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_delete.yaml new file mode 100644 index 00000000..6b646cd5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_delete.yaml @@ -0,0 +1,12 @@ +--- + - name: "Test delete_api and check whether it returns code 204" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Ethernet64/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses/address=1.1.1.1/config/prefix-length + method: "DELETE" + status_code: 204 + register: result + + - assert: + that: + - "result.changed == true" + - "204 in result.response[0]" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_get.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_get.yaml new file mode 100644 index 00000000..b705840b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_get.yaml @@ -0,0 +1,11 @@ +--- +- name: "check whether get_api returns code 200" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Ethernet8 + method: "GET" + status_code: 200 + register: result +- assert: + that: + - "result.changed == false" + - "200 in result.response[0]" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_patch.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_patch.yaml new file mode 100644 index 00000000..0e779612 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_patch.yaml @@ -0,0 +1,13 @@ +--- + - name: "Test patch_api and check whether code 204 is returned" + sonic_api: + url: data/openconfig-interfaces:interfaces/interface=Ethernet64/config/description + method: "PATCH" + status_code: 204 + body: {"openconfig-interfaces:description": "hi "} + register: result + + - assert: + that: + - "result.changed == true" + - "204 in result.response[0]" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_post.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_post.yaml new file mode 100644 index 00000000..50383888 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_post.yaml @@ -0,0 +1,27 @@ +--- + - name: "Test post_api and check whether code 201 is returned" + sonic_api: + url: data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set=p1 + method: "POST" + body: {"openconfig-routing-policy:config": {"name": "p1", "mode": "IPV4" }} + status_code: 201 + register: result + + - assert: + that: + - "result.changed == true" + - "201 in result.response[0]" + + - name: "Test post_api to create same prefix-set and check whether play is failed" + sonic_api: + url: data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set=p1 + method: "POST" + body: {"openconfig-routing-policy:config": {"name": "p1", "mode": "IPV4" }} + status_code: 201 + register: result + ignore_errors: yes + + - assert: + that: + - "result.failed == true" + - "result.msg is defined" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_put.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_put.yaml new file mode 100644 index 00000000..4b0a7abe --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_api/tasks/test_put.yaml @@ -0,0 +1,13 @@ +--- + - name: "Test put_api and check whether code 204 is returned" + sonic_api: + url: data/openconfig-network-instance:network-instances/network-instance=Vlan100 + method: "PUT" + body: {"openconfig-network-instance:network-instance": [{"name": "Vlan100", "config": {"name": "Vlan100"}}]} + status_code: 204 + register: result + + - assert: + that: + - "result.changed == true" + - "204 in result.response[0]" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml new file mode 100644 index 00000000..0eb7a6cb --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml @@ -0,0 +1,250 @@ +--- +ansible_connection: httpapi +module_name: bgp + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +bgp_as_1: 51 +bgp_as_2: 52 +bgp_as_3: 53 + +preparations_tests: + init_vrf: + - "ip vrf {{vrf_1}}" + - "ip vrf {{vrf_2}}" + +tests_cli: + - name: cli_test_case_01 + description: creates bestpath BGP properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.4 + bestpath: + as_path: + confed: True + ignore: True + multipath_relax: True + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + max_med: + on_startup: + timer: 667 + med_val: 7878 + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.5 + vrf_name: "{{vrf_1}}" + bestpath: + as_path: + confed: True + ignore: True + multipath_relax: True + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + max_med: + on_startup: + timer: 889 + med_val: 8854 + +tests: + - name: test_case_01 + description: creates BGP properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.4 + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.5 + vrf_name: "{{vrf_1}}" + - name: test_case_02 + description: Updates BGP properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.30 + log_neighbor_changes: True + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.31 + vrf_name: "{{vrf_1}}" + log_neighbor_changes: True + - name: test_case_03 + description: Deletes BGP properties + state: deleted + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.30 + log_neighbor_changes: True + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.31 + vrf_name: "{{vrf_1}}" + log_neighbor_changes: True + - name: test_case_04 + description: creates bestpath BGP properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.4 + bestpath: + as_path: + confed: True + ignore: True + multipath_relax: True + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + max_med: + on_startup: + timer: 889 + med_val: 8854 + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.5 + vrf_name: "{{vrf_1}}" + bestpath: + as_path: + confed: True + ignore: True + multipath_relax: True + multipath_relax_as_set: True + compare_routerid: True + med: + confed: True + missing_as_worst: True + max_med: + on_startup: + timer: 556 + med_val: 5567 + - name: test_case_05 + description: Update bestpath BGP properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.51 + bestpath: + as_path: + confed: False + ignore: False + compare_routerid: False + med: + confed: False + max_med: + on_startup: + timer: 776 + med_val: 7768 + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.52 + vrf_name: "{{vrf_1}}" + bestpath: + as_path: + multipath_relax: False + multipath_relax_as_set: False + compare_routerid: False + med: + missing_as_worst: False + max_med: + on_startup: + timer: 445 + med_val: 4458 + - name: test_case_06 + description: Update1 bestpath BGP properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.51 + bestpath: + as_path: + confed: True + ignore: True + compare_routerid: True + med: + confed: True + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.52 + vrf_name: "{{ vrf_1 }}" + bestpath: + as_path: + multipath_relax: True + multipath_relax_as_set: True + compare_routerid: True + med: + missing_as_worst: True + - name: test_case_07 + description: Deletes BGP properties + state: deleted + input: + - bgp_as: "{{ bgp_as_1 }}" + bestpath: + as_path: + confed: False + ignore: False + compare_routerid: False + med: + confed: False + max_med: + on_startup: + timer: 889 + med_val: 8854 + - bgp_as: "{{ bgp_as_2 }}" + vrf_name: "{{vrf_1}}" + bestpath: + as_path: + multipath_relax: True + multipath_relax_as_set: False + compare_routerid: True + med: + missing_as_worst: False + max_med: + on_startup: + timer: 889 + med_val: 8854 + - name: test_case_08 + description: Update1 bestpath BGP properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + router_id: 110.2.2.51 + bestpath: + as_path: + confed: True + ignore: True + compare_routerid: True + med: + confed: True + - bgp_as: "{{ bgp_as_2 }}" + router_id: 110.2.2.52 + vrf_name: "{{ vrf_1 }}" + bestpath: + as_path: + multipath_relax: True + multipath_relax_as_set: True + compare_routerid: True + med: + missing_as_worst: True + - bgp_as: "{{ bgp_as_3 }}" + router_id: 120.2.2.52 + vrf_name: "{{ vrf_2 }}" + bestpath: + as_path: + multipath_relax: True + multipath_relax_as_set: True + compare_routerid: True + med: + missing_as_worst: True + - name: test_case_09 + description: Deletes BGP properties + state: deleted + input: + - bgp_as: "{{ bgp_as_2 }}" + vrf_name: "{{vrf_1}}" + - name: test_case_10 + description: Deletes all BGP properties + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..8a52f127 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/cleanup_tests.yaml @@ -0,0 +1,6 @@ +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml new file mode 100644 index 00000000..29d5392d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml @@ -0,0 +1,28 @@ +- debug: msg="sonic_interfaces Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} CLI validation started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests_cli }}" + +- name: "Test CLI validation started ..." + include_role: + name: common + tasks_from: cli_tasks_template.yaml + loop: "{{ tests_cli }}" + +- name: Clean up test + include_tasks: cleanup_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/preparation_tests.yaml new file mode 100644 index 00000000..ec5f139d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/preparation_tests.yaml @@ -0,0 +1,11 @@ +- name: "initialize VRFs" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_vrf }}" + +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/tasks_template.yaml new file mode 100644 index 00000000..7a7394e2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_bgp: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg new file mode 100644 index 00000000..72000656 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg @@ -0,0 +1,18 @@ +router bgp 52 vrf VrfReg1 + router-id 110.2.2.5 + log-neighbor-changes + bestpath as-path multipath-relax as-set + bestpath as-path ignore + bestpath as-path confed + bestpath med missing-as-worst confed + bestpath compare-routerid + timers 60 180 +router bgp 51 + router-id 110.2.2.4 + log-neighbor-changes + bestpath as-path multipath-relax as-set + bestpath as-path ignore + bestpath as-path confed + bestpath med missing-as-worst confed + bestpath compare-routerid + timers 60 180 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml new file mode 100644 index 00000000..ba23b3f5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml @@ -0,0 +1,324 @@ +--- +ansible_connection: httpapi +module_name: bgp_af + + +bgp_as_1: 51 +bgp_as_2: 52 + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +preparations_tests: + init_route_map: + - route-map rmap_reg1 permit 11 + - route-map rmap_reg2 permit 11 + - route-map rmap_reg3 permit 11 + init_vrf: + - "ip vrf {{vrf_1}}" + - "ip vrf {{vrf_2}}" + init_bgp: + - bgp_as: "{{bgp_as_1}}" + router_id: 111.2.2.41 + log_neighbor_changes: False + - bgp_as: "{{bgp_as_2}}" + router_id: 111.2.2.42 + log_neighbor_changes: True + vrf_name: VrfReg1 +tests: + - name: test_case_01 + description: BGP AF properties + state: merged + input: + - bgp_as: "{{ bgp_as_1 }}" + address_family: + afis: + - afi: ipv4 + safi: unicast + - afi: ipv6 + safi: unicast + - afi: l2vpn + safi: evpn + - bgp_as: "{{ bgp_as_2 }}" + vrf_name: "{{vrf_1}}" + address_family: + afis: + - afi: ipv4 + safi: unicast + - afi: ipv6 + safi: unicast + - afi: l2vpn + safi: evpn + - name: test_case_02 + description: Update created BGP AF properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 2 + ibgp: 3 + redistribute: + - metric: "20" + protocol: connected + route_map: rmap_reg1 + - metric: "26" + protocol: ospf + route_map: rmap_reg2 + - metric: "25" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 4 + redistribute: + - metric: "21" + protocol: connected + route_map: rmap_reg3 + - metric: "27" + protocol: ospf + route_map: rmap_reg1 + - metric: "28" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + advertise_pip: True + advertise_pip_ip: "1.1.1.1" + advertise_pip_peer_ip: "2.2.2.2" + advertise_svi_ip: True + advertise_all_vni: True + route_advertise_list: + - advertise_afi: ipv4 + route_map: rmap_reg1 + - bgp_as: "{{bgp_as_2}}" + vrf_name: "{{vrf_1}}" + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 1 + ibgp: 2 + redistribute: + - metric: "20" + protocol: connected + route_map: rmap_reg1 + - metric: "26" + protocol: ospf + route_map: rmap_reg2 + - metric: "25" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 2 + redistribute: + - metric: "21" + protocol: connected + route_map: rmap_reg3 + - metric: "27" + protocol: ospf + route_map: rmap_reg1 + - metric: "28" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + route_advertise_list: + - advertise_afi: ipv4 + route_map: rmap_reg1 + - name: test_case_03 + description: Update2 created BGP AF properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + vrf_name: default + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 2 + ibgp: 3 + redistribute: + - metric: "30" + protocol: connected + route_map: rmap_reg1 + - metric: "36" + protocol: ospf + route_map: rmap_reg2 + - metric: "35" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 4 + redistribute: + - metric: "31" + protocol: connected + route_map: rmap_reg3 + - metric: "37" + protocol: ospf + route_map: rmap_reg1 + - metric: "38" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + advertise_pip: False + advertise_pip_ip: "3.3.3.3" + advertise_pip_peer_ip: "4.4.4.4" + advertise_svi_ip: False + advertise_all_vni: False + route_advertise_list: + - advertise_afi: ipv6 + route_map: rmap_reg2 + - bgp_as: "{{bgp_as_2}}" + vrf_name: "{{vrf_1}}" + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 4 + ibgp: 5 + redistribute: + - metric: "40" + protocol: connected + route_map: rmap_reg1 + - metric: "41" + protocol: ospf + route_map: rmap_reg2 + - metric: "42" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 9 + ibgp: 8 + redistribute: + - metric: "43" + protocol: connected + route_map: rmap_reg3 + - metric: "44" + protocol: ospf + route_map: rmap_reg1 + - metric: "45" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + route_advertise_list: + - advertise_afi: ipv6 + route_map: rmap_reg2 + - name: test_case_04 + description: Delete BGP AF properties + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + vrf_name: default + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 4 + ibgp: 3 + redistribute: + - metric: "30" + protocol: connected + route_map: rmap_reg1 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 4 + redistribute: + - metric: "37" + protocol: ospf + route_map: rmap_reg1 + - metric: "38" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + advertise_pip: False + advertise_pip_ip: "3.3.3.3" + advertise_pip_peer_ip: "4.4.4.4" + advertise_svi_ip: False + route_advertise_list: + - advertise_afi: ipv4 + route_map: rmap_reg1 + - bgp_as: "{{bgp_as_2}}" + vrf_name: "{{vrf_1}}" + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 4 + ibgp: 3 + redistribute: + - metric: "41" + protocol: ospf + route_map: rmap_reg2 + - metric: "42" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 4 + ibgp: 6 + redistribute: + - metric: "43" + protocol: connected + route_map: rmap_reg3 + - afi: l2vpn + safi: evpn + route_advertise_list: + - advertise_afi: ipv4 + - name: test_case_05 + description: Delete1 BGP AF properties + state: deleted + input: + - bgp_as: "{{ bgp_as_1 }}" + vrf_name: default + address_family: + afis: + - afi: ipv4 + safi: unicast + redistribute: + - afi: ipv6 + safi: unicast + redistribute: + - bgp_as: "{{bgp_as_2}}" + vrf_name: "{{vrf_1}}" + address_family: + afis: + - afi: l2vpn + safi: evpn + route_advertise_list: + - name: test_case_06 + description: Delete2 BGP AF properties + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + vrf_name: default + address_family: + afis: + - name: test_case_07 + description: Delete3 BGP AF properties + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..002f8ab7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/cleanup_tests.yaml @@ -0,0 +1,10 @@ +- name: Deletes old bgp_af + sonic_bgp_af: + config: [] + state: deleted + ignore_errors: yes +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/main.yml new file mode 100644 index 00000000..287404c2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/main.yml @@ -0,0 +1,15 @@ +- debug: msg="sonic_interfaces Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + +- name: "cleanup {{ module_name }} started ..." + include_tasks: cleanup_tests.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/preparation_tests.yaml new file mode 100644 index 00000000..82a2f308 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/preparation_tests.yaml @@ -0,0 +1,20 @@ +- name: "initialize route maps" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_route_map }}" +- name: "initialize VRFs" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_vrf }}" +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes +- name: Create bgp + sonic_bgp: + config: "{{ preparations_tests.init_bgp }}" + state: merged + ignore_errors: yes \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/tasks_template.yaml new file mode 100644 index 00000000..9fe7149d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_bgp_af: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp_af: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml new file mode 100644 index 00000000..f2e31e4a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml @@ -0,0 +1,78 @@ +--- +ansible_connection: httpapi +module_name: bgp_as_paths + +bgp_as_1: 51 +bgp_as_2: 52 + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +tests: + - name: test_case_01 + description: BGP properties + state: merged + input: + - name: test + members: + - "11" + permit: True + - name: test_1 + members: + - "101.101" + permit: False + - name: test_case_02 + description: Update created BGP properties + state: merged + input: + - name: test + members: + - "11" + - "22" + - "33" + - 44 + permit: True + - name: test_1 + members: + - "101.101" + - "201.201" + - "301.301" + permit: False + - name: test_2 + members: + - '111\\:' + - '11\\d+' + - '113\\*' + - '114\\' + permit: True + - name: test_case_03 + description: Delete BGP properties + state: deleted + input: + - name: test + members: + - "33" + - name: test_1 + members: + - "101.101" + - "201.201" + - "301.301" + permit: False + - name: test_2 + members: + - '111\\:' + - '11\\d+' + - '113\\*' + - '114\\' + permit: True + - name: test_case_04 + description: Delete BGP properties + state: deleted + input: + - name: test + members: + permit: + - name: test_case_05 + description: Delete BGP properties + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/main.yml new file mode 100644 index 00000000..51c65668 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/main.yml @@ -0,0 +1,13 @@ +- debug: msg="sonic_interfaces Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/preparation_tests.yaml new file mode 100644 index 00000000..f524c06f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes old bgp as paths + sonic_bgp_as_paths: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/tasks_template.yaml new file mode 100644 index 00000000..70dbcdff --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_bgp_as_paths: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: true + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp_as_paths: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: true + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml new file mode 100644 index 00000000..eb32d275 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml @@ -0,0 +1,101 @@ +--- +ansible_connection: httpapi +module_name: bgp_communities + +tests: + - name: test_case_01 + description: BGP Communities properties + state: merged + input: + - name: test + type: expanded + permit: false + match: ANY + members: + regex: + - "11" + - "12" + - name: test2 + type: standard + permit: true + match: ALL + members: + regex: + - "21" + - "22" + - name: test_case_02 + description: Update created BGP properties + state: merged + input: + - name: test + type: expanded + permit: false + match: ANY + members: + regex: + - "12" + - "13" + - 14 + - name: test2 + type: standard + permit: true + match: ALL + members: + regex: + - "23" + - "24" + - 25 + - name: test_case_03 + description: Update1 created BGP properties + state: merged + input: + - name: test + type: expanded + permit: true + match: ANY + members: + regex: + - "11" + - "12" + - name: test2 + type: standard + permit: false + match: ALL + members: + regex: + - "21" + - "22" + - name: test_case_04 + description: Delete BGP properties + state: deleted + input: + - name: test + type: expanded + members: + regex: + - "12" + - "13" + - name: test2 + type: standard + match: ALL + members: + regex: + - "23" + - "24" + - name: test_case_05 + description: Delete1 BGP properties + state: deleted + input: + - name: test + type: expanded + members: + regex: + - name: test_case_06 + description: Delete2 BGP properties + state: deleted + input: + - name: test + - name: test_case_07 + description: Delete2 BGP properties + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/main.yml new file mode 100644 index 00000000..94d190b3 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/main.yml @@ -0,0 +1,13 @@ +- debug: msg="{{ module_name }} Test started ..." + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/preparation_tests.yaml new file mode 100644 index 00000000..d204af4e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes old bgp sonic_bgp_communities + sonic_bgp_communities: + config: [] + state: deleted + ignore_errors: yes \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/tasks_template.yaml new file mode 100644 index 00000000..e875d051 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/tasks/tasks_template.yaml @@ -0,0 +1,23 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_bgp_communities: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: true + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp_communities: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: true + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml + +- debug: var=action_task_output \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml new file mode 100644 index 00000000..be6e96a8 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml @@ -0,0 +1,321 @@ +--- +ansible_connection: httpapi +module_name: sonic_bgp_ext_communities + +tests: "{{ merged_tests + deleted_tests }}" + +merged_tests: + - name: test_case_01 + description: BGP Communities properties + state: merged + input: + - name: test_ext + type: expanded + permit: false + match: any + members: + regex: + - "11" + - "12" + - name: test_std + type: standard + permit: true + match: all + members: + route_target: + - "101:101" + - "201:201" + route_origin: + - "301:301" + - "401:401" + - name: test_case_02 + description: Update permit BGP Communities properties + state: merged + input: + - name: test_ext + type: expanded + permit: false + match: any + members: + regex: + - "13" + - "14" + - name: test_std + type: standard + permit: true + match: all + members: + route_target: + - "2201:101" + - "3301:201" + - "11.1.1.1:223" + - "11.1.1.2:224" + route_origin: + - "4401:301" + - "5501:401" + - name: test_case_03 + description: Update permit BGP Communities properties + state: merged + input: + - name: test_ext + type: expanded + permit: false + match: any + members: + regex: + - "15" + - "16" + - name: test_std + type: standard + permit: true + match: all + members: + route_target: + - "2202:101" + - "3302:201" + - "11.1.1.1:225" + - "11.1.1.2:226" + - "11.1.1.3:225" + - "11.1.1.4:226" + - name: test_case_04 + description: Update match BGP Communities properties + state: merged + input: + - name: test_ext + type: expanded + permit: false + match: any + members: + regex: + - "15" + - "16" + - name: test_std + type: standard + permit: true + match: all + members: + route_origin: + - "4403.301" + - "5503.401" + - name: test_case_05 + description: Create empty Communities properties + state: merged + input: + - name: test_ext1 + type: expanded + permit: true + match: any + - name: test_std1 + type: standard + permit: false + match: any + - name: test_case_06 + description: test BGP Communities properties + state: merged + input: + - name: test_comm112 + type: expanded + permit: true + match: any + members: + regex: + - "15" + - "16" + - name: test_comm + type: standard + permit: false + match: any + members: + route_origin: + - "4403.301" + - "5503.401" + +deleted_tests: + # Ethernet testcases started... + - name: del_test_case_0111 + description: BGP Communities properties + state: merged + input: + - name: test_ext + type: expanded + permit: false + match: any + members: + regex: + - "11" + - "12" + - "13" + - "14" + - "15" + - "16" + - name: test_std + type: standard + permit: true + match: all + members: + route_target: + - "101.101" + - "201.201" + - "102.101" + - "202.201" + - "1.1.1.1.101" + - "1.1.1.2.201" + route_origin: + - "301.301" + - "401.401" + - "302.301" + - "402.401" + - "303.301" + - "403.401" + - name: test_std11 + type: standard + permit: true + match: all + members: + route_target: + - "101.101" + - "201.201" + - "102.101" + - "202.201" + - "103.101" + - "203.201" + - "1.1.1.1.101" + - "1.1.1.2.201" + - "1.1.1.1.102" + - "1.1.1.2.203" + route_origin: + - "301.301" + - "401.401" + - "302.301" + - "402.401" + - "303.301" + - "403.401" + - name: test_std12 + type: standard + permit: true + match: all + members: + route_target: + - "101.101" + - "201.201" + - "102.101" + - "202.201" + - "103.101" + - "203.201" + - "1.1.1.1.101" + - "1.1.1.2.201" + - "1.1.1.1.102" + - "1.1.1.2.203" + route_origin: + - "301.301" + - "401.401" + - "302.301" + - "402.401" + - "303.301" + - "403.401" + - name: test_std12 + type: standard + permit: true + match: all + members: + route_target: + - "101.101" + - "201.201" + - "102.101" + - "202.201" + - "103.101" + - "203.201" + - "1.1.1.1.101" + - "1.1.1.2.201" + - "1.1.1.1.102" + - "1.1.1.2.203" + route_origin: + - "301.301" + - "401.401" + - "302.301" + - "402.401" + - "303.301" + - "403.401" + - name: test_std12 + type: standard + permit: true + match: all + members: + route_target: + - "103.101" + - "203.201" + - "1.1.1.1:102" + - "1.1.1.2.203" + route_origin: + - "301.301" + - "401.401" + - name: del_test_case_01 + description: BGP Communities properties + state: deleted + input: + - name: test_ext + type: expanded + members: + regex: + - "11" + - "12" + - name: test_std + type: standard + members: + route_target: + - "101:101" + - "201:201" + route_origin: + - "301:301" + - "401:401" + - name: test_std1 + type: standard + members: + route_target: + - "101:101" + - "201:201" + route_origin: + - "301:301" + - "401:401" + - name: test_case_02 + description: BGP Communities properties + state: deleted + input: + - name: test_ext + type: expanded + members: + regex: + - name: test_std + type: standard + members: + route_target: + - "1.1.1.1.101" + - name: test_std11 + type: standard + members: + route_origin: + - "301.301" + - "401.401" + - "1.1.1.1.101" + - name: del_test_case_03 + description: Update created BGP properties + state: deleted + input: + - name: test_ext + - name: test_std + type: standard + members: + route_target: + - name: test_std11 + type: standard + members: + route_origin: + - name: test_std12 + type: standard + members: + route_origin: + route_target: + - name: del_test_case_04 + description: Update created BGP properties + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/main.yml new file mode 100644 index 00000000..d2ae6b41 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/main.yml @@ -0,0 +1,13 @@ +- debug: msg="{{ module_name }} Test started ..." + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +# - name: Display all variables/facts known for a host +# debug: +# var: ansible_facts.test_reports + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/preparation_tests.yaml new file mode 100644 index 00000000..e4a98edb --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes old bgp sonic_bgp_communities + sonic_bgp_ext_communities: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/tasks_template.yaml new file mode 100644 index 00000000..380ef22f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/tasks/tasks_template.yaml @@ -0,0 +1,23 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_bgp_ext_communities: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: true + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp_ext_communities: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: true + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml + +- debug: var=action_task_output \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml new file mode 100644 index 00000000..35386125 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml @@ -0,0 +1,316 @@ +--- +ansible_connection: httpapi +module_name: sonic_bgp_neighbors + +bgp_as_1: 51 +bgp_as_2: 52 + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +preparations_tests: + init_route_map: + - route-map rmap_reg1 permit 11 + - route-map rmap_reg2 permit 11 + - route-map rmap_reg3 permit 11 + - route-map rmap_reg4 permit 11 + - route-map rmap_reg5 permit 11 + init_vrf: + - "ip vrf {{vrf_1}}" + - "ip vrf {{vrf_2}}" + init_bgp: + - bgp_as: "{{bgp_as_1}}" + router_id: 111.2.2.41 + log_neighbor_changes: False + - bgp_as: "{{bgp_as_2}}" + router_id: 111.2.2.42 + log_neighbor_changes: True + vrf_name: VrfReg1 + +negative_tests: + - name: negative_test_case_01 + description: allowas_in beyond value + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: Ethernet12 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 11 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + - name: negative_test_case_02 + description: BGP properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: Ethernet12 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 11 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg2 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + +tests: "{{ merged_tests }}" + +deleted_tests: + - name: test_case_del_01 + description: Delete peer group BGP properties + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + peergroup: + - name: SPINE + - name: SPINE1 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peergroup: + - name: SPINE + - name: SPINE1 + + + +merged_tests: + - name: test_case_01 + description: BGP properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: Ethernet12 + remote_as: 5 + peer_group: SPINE + advertisement_interval: 10 + timers: + keepalive: 40 + holdtime: 50 + bfd: true + capability: + dynamic: true + extended_nexthop: true + - neighbor: 192.168.1.4 + - neighbor: 2::2 + - neighbor: Ethernet8 + - neighbor: 192.168.1.5 + remote_as: 6 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - neighbor: 3::3 + remote_as: 7 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: Ethernet24 + remote_as: 11 + peer_group: SPINE + advertisement_interval: 10 + timers: + keepalive: 40 + holdtime: 50 + bfd: true + capability: + dynamic: true + extended_nexthop: true + - neighbor: 192.168.2.2 + - neighbor: Ethernet28 + remote_as: 12 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - name: test_case_02 + description: Update BGP properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peergroup: + - name: SPINE + - name: SPINE1 + neighbors: + - neighbor: Ethernet12 + remote_as: 111 + peer_group: SPINE + advertisement_interval: 11 + timers: + keepalive: 41 + holdtime: 51 + bfd: false + capability: + dynamic: false + extended_nexthop: false + - neighbor: 192.168.1.4 + - neighbor: Ethernet8 + - neighbor: 192.168.1.5 + remote_as: 112 + peer_group: SPINE1 + advertisement_interval: 21 + timers: + keepalive: 22 + holdtime: 23 + capability: + dynamic: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peergroup: + - name: SPINE + - name: SPINE1 + neighbors: + - neighbor: Ethernet24 + remote_as: 213 + peer_group: SPINE1 + advertisement_interval: 44 + timers: + keepalive: 55 + holdtime: 44 + bfd: false + capability: + dynamic: false + extended_nexthop: false + - neighbor: 192.168.2.2 + - neighbor: Ethernet28 + remote_as: 214 + peer_group: SPINE + advertisement_interval: 45 + timers: + keepalive: 33 + holdtime: 34 + capability: + dynamic: false + - neighbor: 3::3 + remote_as: 215 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - name: test_case_03 + description: BGP ipv6 properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: 2::2 + - neighbor: 11::11 + remote_as: external + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - neighbor: 3::3 + remote_as: 7 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: 192.168.2.2 + - neighbor: Ethernet28 + remote_as: 12 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + capability: + dynamic: true + - name: test_case_04 + description: BGP remote-as properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: Ethernet8 + remote_as: internal + - neighbor: 11::11 + remote_as: external + - neighbor: 67.1.1.1 + remote_as: 7 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: Ethernet8 + remote_as: 1345 + - neighbor: 11::11 + remote_as: 2345 + - neighbor: 67.1.1.1 + remote_as: external + - name: test_case_05 + description: BGP remote-as properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: Ethernet8 + remote_as: external + - neighbor: 11::11 + remote_as: internal + - neighbor: 67.1.1.1 + remote_as: internal + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peergroup: + - name: SPINE + neighbors: + - neighbor: Ethernet8 + remote_as: internal + - neighbor: 11::11 + remote_as: external + - neighbor: 67.1.1.1 + remote_as: 1123 \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml new file mode 100644 index 00000000..140eeeae --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml @@ -0,0 +1,880 @@ +--- +ansible_connection: httpapi +module_name: sonic_bgp_neighbors + +bgp_as_1: 51 +bgp_as_2: 52 + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +preparations_tests: + init_route_map: + - route-map rmap_reg1 permit 11 + - route-map rmap_reg2 permit 11 + - route-map rmap_reg3 permit 11 + - route-map rmap_reg4 permit 11 + - route-map rmap_reg5 permit 11 + init_prefix_list: + - ip prefix-list p1 seq 1 permit 1.1.1.1/1 + - ip prefix-list p2 seq 2 permit 2.2.2.2/2 + init_vrf: + - "ip vrf {{vrf_1}}" + - "ip vrf {{vrf_2}}" + init_bgp: + - bgp_as: "{{bgp_as_1}}" + router_id: 111.2.2.41 + log_neighbor_changes: False + - bgp_as: "{{bgp_as_1}}" + router_id: 111.2.2.41 + log_neighbor_changes: False + vrf_name: VrfReg1 + - bgp_as: "{{bgp_as_2}}" + router_id: 111.2.2.52 + log_neighbor_changes: True + vrf_name: VrfReg2 + + +tests: "{{ merged_tests + deleted_tests }}" + +action_tests: + - name: test_case_action_01 + description: Delete peer group BGP NEIGHBORS NEIGHBORS properties + state: merged + input: [] + +deleted_tests: + - name: test_case_del_01 + description: Delete BGP NEIGHBORS additional attributes + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: "{{ interface1 }}" + auth_pwd: + pwd: "U2FsdGVkX19eY7P3qRyyjaFsQgjoSQE71IX6IeBRios=" + encrypted: true + dont_negotiate_capability: false + ebgp_multihop: + enabled: false + multihop_ttl: 2 + enforce_first_as: false + enforce_multihop: false + local_address: '1::1' + local_as: + nbr_description: "description 2" + override_capability: false + passive: false + port: 4 + solo: false + - neighbor: 192.168.1.5 + disable_connected_check: false + shutdown_msg: "msg2" + ttl_security: 8 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + neighbors: + - neighbor: "{{ interface1 }}" + auth_pwd: + pwd: "U2FsdGVkX19eY7P3qRyyjaFsQgjoSQE71IX6IeBRios=" + encrypted: true + nbr_description: 'description 3' + strict_capability_match: false + v6only: false + + - name: test_case_del_02 + description: Delete BGP peer-group prefix-list attributes + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE3 + address_family: + afis: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: false + prefix_limit: + max_prefixes: 2 + prevent_teardown: false + warning_threshold: 88 + restart_timer: 5 + prefix_list_in: p2 + prefix_list_out: p1 + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: false + prefix_limit: + max_prefixes: 3 + warning_threshold: 77 + restart_timer: 10 + prefix_list_in: p1 + prefix_list_out: p2 + - afi: l2vpn + safi: evpn + prefix_limit: + max_prefixes: 4 + warning_threshold: 66 + restart_timer: 15 + prefix_list_in: p2 + prefix_list_out: p1 + + - name: test_case_del_03 + description: BGP NEIGHBORS remote-as properties + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + remote_as: + peer_type: internal + address_family: + afis: + - afi: ipv4 + safi: unicast + allowas_in: + origin: false + neighbors: + - neighbor: "{{ interface1 }}" + remote_as: + peer_type: internal + - neighbor: 11::11 + remote_as: + peer_type: external + - neighbor: 67.1.1.1 + remote_as: + peer_as: 7 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + remote_as: + peer_as: 1232 + capability: + dynamic: true + extended_nexthop: true + neighbors: + - neighbor: "{{ interface1 }}" + remote_as: + peer_as: 1234 + - neighbor: 11::11 + remote_as: + peer_as: 4332 + - neighbor: 67.1.1.1 + remote_as: + peer_type: external + + - name: test_case_del_04 + description: BGP NEIGHBORS remote-as properties + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: "{{ interface1 }}" + - neighbor: 11::11 + - neighbor: 67.1.1.1 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + neighbors: + - neighbor: "{{ interface1 }}" + - neighbor: 11::11 + - neighbor: 67.1.1.1 + + - name: test_case_del_05 + description: BGP NEIGHBORS delete neighbor peergroup, bfd, and timers + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + bfd: + enabled: false + check_failure: false + profile: "profile 2" + timers: + keepalive: 40 + holdtime: 50 + connect_retry: 60 + neighbors: + - neighbor: "{{ interface2 }}" + peer_group: SPINE + bfd: + enabled: false + check_failure: false + profile: "profile 3" + timers: + keepalive: 41 + holdtime: 51 + connect_retry: 61 + - neighbor: 3::3 + peer_group: SPINE + - neighbor: 192.168.1.5 + peer_group: SPINE1 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + bfd: + enabled: false + check_failure: false + profile: "profile 2" + timers: + keepalive: 40 + holdtime: 50 + connect_retry: 60 + neighbors: + - neighbor: "{{ interface4 }}" + peer_group: SPINE + - neighbor: "{{ interface3 }}" + peer_group: SPINE1 + bfd: + enabled: false + check_failure: false + profile: "profile 4" + timers: + keepalive: 55 + holdtime: 44 + connect_retry: 33 + - neighbor: 3::3 + peer_group: SPINE + + - name: test_case_del_06 + description: Delete peer group additional attributes + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + auth_pwd: + pwd: "U2FsdGVkX19eY7P3qRyyjaFsQgjoSQE71IX6IeBRios=" + encrypted: true + dont_negotiate_capability: false + ebgp_multihop: + enabled: false + multihop_ttl: 2 + enforce_first_as: false + enforce_multihop: false + local_address: '1.1.1.1' + local_as: + as: 3 + no_prepend: false + replace_as: false + pg_description: "description 2" + override_capability: false + passive: false + solo: false + - name: SPINE1 + disable_connected_check: false + shutdown_msg: "msg2" + strict_capability_match: false + ttl_security: 8 + + - name: test_case_del_07 + description: Delete peer group BGP NEIGHBORS properties + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE1 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + - name: SPINE1 + + - name: test_case_del_08 + description: BGP NEIGHBORS remote-as properties + state: deleted + input: [] + +merged_tests: + - name: test_case_01 + description: BGP NEIGHBORS properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + remote_as: + peer_as: 12 + bfd: + enabled: true + check_failure: true + profile: "profile 1" + advertisement_interval: 10 + timers: + keepalive: 40 + holdtime: 50 + connect_retry: 60 + capability: + dynamic: true + extended_nexthop: true + address_family: + afis: + - afi: ipv4 + safi: unicast + allowas_in: + value: 2 + neighbors: + - neighbor: "{{ interface2 }}" + remote_as: + peer_as: 12 + peer_group: SPINE + advertisement_interval: 10 + timers: + keepalive: 40 + holdtime: 50 + connect_retry: 60 + bfd: + enabled: true + check_failure: true + profile: "profile 1" + capability: + dynamic: true + extended_nexthop: true + - neighbor: 192.168.1.4 + - neighbor: 2::2 + - neighbor: "{{ interface1 }}" + - neighbor: 192.168.1.5 + remote_as: + peer_as: 6 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + connect_retry: 10 + capability: + dynamic: true + - neighbor: 3::3 + remote_as: + peer_as: 7 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + connect_retry: 10 + capability: + dynamic: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + remote_as: + peer_type: internal + bfd: + enabled: true + check_failure: true + profile: "profile 1" + advertisement_interval: 15 + timers: + keepalive: 50 + holdtime: 40 + connect_retry: 60 + capability: + dynamic: true + extended_nexthop: true + address_family: + afis: + - afi: ipv6 + safi: unicast + allowas_in: + value: 3 + neighbors: + - neighbor: "{{ interface3 }}" + remote_as: + peer_as: 11 + peer_group: SPINE + advertisement_interval: 10 + timers: + keepalive: 40 + holdtime: 50 + connect_retry: 60 + bfd: + enabled: true + check_failure: true + profile: "profile 2" + capability: + dynamic: true + extended_nexthop: true + - neighbor: 192.168.2.2 + - neighbor: "{{ interface4 }}" + remote_as: + peer_as: 12 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + connect_retry: 10 + capability: + dynamic: true + - name: test_case_02 + description: Update BGP NEIGHBORS properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + bfd: + enabled: false + check_failure: false + profile: "profile 2" + - name: SPINE1 + remote_as: + peer_type: external + bfd: + enabled: true + check_failure: true + profile: "profile 2" + advertisement_interval: 15 + timers: + keepalive: 30 + holdtime: 60 + connect_retry: 90 + capability: + dynamic: true + extended_nexthop: true + address_family: + afis: + - afi: ipv4 + safi: unicast + allowas_in: + origin: true + neighbors: + - neighbor: "{{ interface2 }}" + remote_as: + peer_as: 111 + peer_group: SPINE + advertisement_interval: 11 + timers: + keepalive: 41 + holdtime: 51 + connect_retry: 61 + bfd: + enabled: false + check_failure: false + profile: "profile 3" + capability: + dynamic: false + extended_nexthop: false + - neighbor: 192.168.1.4 + - neighbor: "{{ interface1 }}" + - neighbor: 192.168.1.5 + remote_as: + peer_as: 112 + peer_group: SPINE1 + advertisement_interval: 21 + timers: + keepalive: 22 + holdtime: 23 + connect_retry: 24 + capability: + dynamic: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + bfd: + enabled: false + check_failure: false + profile: "profile 2" + - name: SPINE1 + remote_as: + peer_type: internal + bfd: + enabled: true + check_failure: true + profile: "profile 2" + advertisement_interval: 30 + timers: + keepalive: 10 + holdtime: 20 + connect_retry: 30 + capability: + dynamic: true + extended_nexthop: true + address_family: + afis: + - afi: ipv6 + safi: unicast + allowas_in: + origin: true + neighbors: + - neighbor: "{{ interface3 }}" + remote_as: + peer_as: 212 + peer_group: SPINE1 + advertisement_interval: 44 + timers: + keepalive: 55 + holdtime: 44 + connect_retry: 33 + bfd: + enabled: false + check_failure: false + profile: "profile 4" + capability: + dynamic: false + extended_nexthop: false + - neighbor: 192.168.2.2 + - neighbor: "{{ interface4 }}" + remote_as: + peer_as: 214 + peer_group: SPINE + advertisement_interval: 45 + timers: + keepalive: 33 + holdtime: 34 + connect_retry: 35 + capability: + dynamic: false + - neighbor: 3::3 + remote_as: + peer_as: 215 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + connect_retry: 10 + capability: + dynamic: true + - name: test_case_03 + description: BGP NEIGHBORS ipv6 properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + remote_as: + peer_as: 1123 + neighbors: + - neighbor: 2::2 + - neighbor: 11::11 + remote_as: + peer_type: external + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + connect_retry: 10 + capability: + dynamic: true + - neighbor: 3::3 + remote_as: + peer_as: 556 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + connect_retry: 10 + capability: + dynamic: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + remote_as: + peer_type: external + address_family: + afis: + - afi: ipv6 + safi: unicast + allowas_in: + value: 4 + neighbors: + - neighbor: 192.168.2.2 + - neighbor: "{{ interface4 }}" + remote_as: + peer_as: 557 + peer_group: SPINE + advertisement_interval: 20 + timers: + keepalive: 30 + holdtime: 20 + connect_retry: 10 + capability: + dynamic: true + - name: test_case_04 + description: BGP NEIGHBORS remote-as properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + remote_as: + peer_type: internal + neighbors: + - neighbor: "{{ interface1 }}" + remote_as: + peer_type: internal + - neighbor: 11::11 + remote_as: + peer_type: external + - neighbor: 67.1.1.1 + remote_as: + peer_as: 7 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + remote_as: + peer_as: 1321 + address_family: + afis: + - afi: l2vpn + safi: evpn + allowas_in: + value: 4 + neighbors: + - neighbor: "{{ interface1 }}" + remote_as: + peer_as: 1234 + - neighbor: 11::11 + remote_as: + peer_as: 4332 + - neighbor: 67.1.1.1 + remote_as: + peer_type: external + - name: test_case_05 + description: BGP NEIGHBORS remote-as properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + neighbors: + - neighbor: "{{ interface1 }}" + remote_as: + peer_type: external + - neighbor: 11::11 + remote_as: + peer_type: internal + - neighbor: 67.1.1.1 + remote_as: + peer_type: internal + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + peer_group: + - name: SPINE + capability: + dynamic: true + extended_nexthop: true + neighbors: + - neighbor: "{{ interface1 }}" + remote_as: + peer_type: internal + - neighbor: 11::11 + remote_as: + peer_type: external + - neighbor: 67.1.1.1 + remote_as: + peer_as: 1123 + - name: test_case_06 + description: BGP NEIGHBORS configure additional attributes + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + auth_pwd: + pwd: "U2FsdGVkX19eY7P3qRyyjaFsQgjoSQE71IX6IeBRios=" + encrypted: true + dont_negotiate_capability: true + ebgp_multihop: + enabled: true + multihop_ttl: 1 + enforce_first_as: true + enforce_multihop: true + local_address: "{{ interface5 }}" + local_as: + as: 2 + no_prepend: true + replace_as: true + pg_description: "description 1" + override_capability: true + passive: true + solo: true + - name: SPINE1 + disable_connected_check: true + shutdown_msg: "msg1" + strict_capability_match: true + ttl_security: 5 + neighbors: + - neighbor: "{{ interface1 }}" + auth_pwd: + pwd: "U2FsdGVkX19eY7P3qRyyjaFsQgjoSQE71IX6IeBRios=" + encrypted: true + dont_negotiate_capability: true + ebgp_multihop: + enabled: true + multihop_ttl: 1 + enforce_first_as: true + enforce_multihop: true + local_address: "{{ interface5 }}" + local_as: + as: 2 + no_prepend: true + replace_as: true + nbr_description: "description 1" + override_capability: true + passive: true + port: 3 + solo: true + - neighbor: 192.168.1.5 + disable_connected_check: true + shutdown_msg: "msg1" + ttl_security: 5 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + neighbors: + - neighbor: "{{ interface1 }}" + auth_pwd: + pwd: "U2FsdGVkX19eY7P3qRyyjaFsQgjoSQE71IX6IeBRios=" + encrypted: true + nbr_description: 'description 2' + strict_capability_match: true + v6only: true + - name: test_case_07 + description: BGP NEIGHBORS modify additional attributes + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE + dont_negotiate_capability: false + ebgp_multihop: + enabled: false + multihop_ttl: 2 + enforce_first_as: false + enforce_multihop: false + local_address: '1.1.1.1' + local_as: + as: 3 + no_prepend: false + replace_as: false + pg_description: "description 2" + override_capability: false + passive: false + solo: false + - name: SPINE1 + disable_connected_check: false + shutdown_msg: "msg2" + strict_capability_match: false + ttl_security: 8 + neighbors: + - neighbor: "{{ interface1 }}" + dont_negotiate_capability: false + ebgp_multihop: + enabled: false + multihop_ttl: 2 + enforce_first_as: false + enforce_multihop: false + local_address: '1::1' + local_as: + as: 3 + no_prepend: false + replace_as: false + nbr_description: "description 2" + override_capability: false + passive: false + port: 4 + solo: false + - neighbor: 192.168.1.5 + disable_connected_check: false + shutdown_msg: "msg2" + ttl_security: 8 + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + neighbors: + - neighbor: "{{ interface1 }}" + nbr_description: 'description 3' + strict_capability_match: false + v6only: false + - name: test_case_08 + description: Configure BGP peer-group prefix-list attributes + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE3 + address_family: + afis: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: true + prefix_limit: + max_prefixes: 1 + prevent_teardown: true + warning_threshold: 80 + prefix_list_in: p1 + prefix_list_out: p2 + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: true + prefix_limit: + max_prefixes: 2 + warning_threshold: 70 + restart_timer: 5 + prefix_list_in: p2 + prefix_list_out: p1 + - afi: l2vpn + safi: evpn + prefix_limit: + max_prefixes: 3 + warning_threshold: 60 + restart_timer: 8 + prefix_list_in: p1 + prefix_list_out: p2 + - name: test_case_09 + description: Modify BGP peer-group prefix-list attributes + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + peer_group: + - name: SPINE3 + address_family: + afis: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: false + prefix_limit: + max_prefixes: 2 + prevent_teardown: false + warning_threshold: 88 + restart_timer: 5 + prefix_list_in: p2 + prefix_list_out: p1 + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: false + prefix_limit: + max_prefixes: 3 + warning_threshold: 77 + restart_timer: 10 + prefix_list_in: p1 + prefix_list_out: p2 + - afi: l2vpn + safi: evpn + prefix_limit: + max_prefixes: 4 + warning_threshold: 66 + restart_timer: 15 + prefix_list_in: p2 + prefix_list_out: p1 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/action_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/action_template.yaml new file mode 100644 index 00000000..98dee21e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/action_template.yaml @@ -0,0 +1,10 @@ +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp_neighbors: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..d8f20a11 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/cleanup_tests.yaml @@ -0,0 +1,10 @@ +- name: Deletes old bgp_neighbors + sonic_bgp_neighbors: + config: [] + state: deleted + ignore_errors: yes +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/main.yml new file mode 100644 index 00000000..c622e0b3 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/main.yml @@ -0,0 +1,19 @@ +- debug: msg="sonic_interfaces Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "Test action {{ module_name }} started ..." + include_tasks: action_template.yaml + loop: "{{ action_tests }}" + +- name: "Cleanup {{ module_name }} started ..." + include_tasks: cleanup_tests.yaml + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/preparation_tests.yaml new file mode 100644 index 00000000..c5917c78 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/preparation_tests.yaml @@ -0,0 +1,38 @@ +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize route maps" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_route_map }}" + ignore_errors: yes +- name: "initialize prefix_lists" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_prefix_list }}" + ignore_errors: yes +- name: "initialize VRFs" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_vrf }}" + ignore_errors: yes +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes +- name: Create bgp + sonic_bgp: + config: "{{ preparations_tests.init_bgp }}" + state: merged + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/tasks_template.yaml new file mode 100644 index 00000000..d170be5d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_bgp_neighbors: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp_neighbors: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml new file mode 100644 index 00000000..dcb7b46e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml @@ -0,0 +1,468 @@ +--- +ansible_connection: httpapi +module_name: sonic_bgp_neighbors_af + +bgp_as_1: 51 +bgp_as_2: 52 + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +preparations_tests: + init_route_map: + - route-map rmap_reg1 permit 11 + - route-map rmap_reg2 permit 11 + - route-map rmap_reg3 permit 11 + - route-map rmap_reg4 permit 11 + - route-map rmap_reg5 permit 11 + init_prefix_list: + - ip prefix-list p1 seq 1 permit 1.1.1.1/1 + - ip prefix-list p2 seq 2 permit 2.2.2.2/2 + init_vrf: + - "ip vrf {{vrf_1}}" + - "ip vrf {{vrf_2}}" + init_bgp: + - bgp_as: "{{bgp_as_1}}" + router_id: 111.2.2.41 + log_neighbor_changes: False + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + router_id: 111.2.2.42 + log_neighbor_changes: True + init_bgp_neighbors: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 12.1.1.1 + - neighbor: 13.1.1.1 + - neighbor: 14.1.1.1 + - neighbor: 15.1.1.1 + - neighbor: "{{ interface3 }}" + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{vrf_1}}" + neighbors: + - neighbor: 21.1.1.1 + - neighbor: 22.1.1.1 + - neighbor: 23.1.1.1 + - neighbor: 24.1.1.1 + - neighbor: "{{ interface4 }}" + +negative_tests: + - name: negative_test_case_01 + description: allowas_in beyond value + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: "{{ interface3 }}" + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 11 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + - name: negative_test_case_02 + description: BGP NEIGHBORS AF properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: "{{ interface3 }}" + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 11 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg2 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true +tests1: + - name: test_case_02 + description: Update BGP NEIGHBORS AF properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 12.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + origin: true + route_map: + - name: rmap_reg1 + direction: in + route_reflector_client: false + route_server_client: true + - afi: ipv6 + safi: unicast + allowas_in: + value: 3 + route_map: + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: false + - afi: l2vpn + safi: evpn + allowas_in: + value: 4 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{ vrf_1 }}" + neighbors: + - neighbor: "{{ interface4 }}" + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 4 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + - afi: ipv6 + safi: unicast + allowas_in: + value: 7 + route_map: + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: false + - afi: l2vpn + safi: evpn + allowas_in: + origin: true + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + + +tests: + - name: test_case_01 + description: BGP NEIGHBORS AF properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 12.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 7 + route_map: + - name: rmap_reg1 + direction: in + route_reflector_client: false + route_server_client: true + activate: true + - afi: ipv6 + safi: unicast + allowas_in: + value: 7 + route_map: + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: false + activate: false + - afi: l2vpn + safi: evpn + allowas_in: + origin: true + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + activate: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{ vrf_1 }}" + neighbors: + - neighbor: "{{ interface4 }}" + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 4 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + activate: false + - afi: ipv6 + safi: unicast + allowas_in: + value: 5 + route_map: + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: false + activate: true + - afi: l2vpn + safi: evpn + allowas_in: + origin: true + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + activate: false + - name: test_case_02 + description: Update BGP NEIGHBORS AF properties + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 12.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + origin: true + route_map: + - name: rmap_reg1 + direction: in + route_reflector_client: false + route_server_client: true + - afi: ipv6 + safi: unicast + allowas_in: + value: 3 + route_map: + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: false + - afi: l2vpn + safi: evpn + allowas_in: + value: 4 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + - bgp_as: "{{bgp_as_1}}" + vrf_name: "{{ vrf_1 }}" + neighbors: + - neighbor: "{{ interface4 }}" + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 4 + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + - afi: ipv6 + safi: unicast + allowas_in: + value: 7 + route_map: + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: false + - afi: l2vpn + safi: evpn + allowas_in: + origin: true + route_map: + - name: rmap_reg1 + direction: in + - name: rmap_reg1 + direction: out + route_reflector_client: true + route_server_client: true + - name: test_case_03 + description: BGP NEIGHBORS AF + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 12.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 7 + route_map: + - name: rmap_reg1 + direction: out + route_reflector_client: false + route_server_client: true + - name: test_case_04 + description: Change route map + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 12.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + allowas_in: + value: 7 + route_map: + - name: rmap_reg2 + direction: out + route_reflector_client: false + route_server_client: true + - name: test_case_05 + description: Configure BGP neighbor prefix-list attributes + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 15.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: true + prefix_limit: + max_prefixes: 1 + prevent_teardown: true + warning_threshold: 80 + prefix_list_in: p1 + prefix_list_out: p2 + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: true + prefix_limit: + max_prefixes: 2 + warning_threshold: 70 + restart_timer: 5 + prefix_list_in: p2 + prefix_list_out: p1 + - afi: l2vpn + safi: evpn + prefix_limit: + max_prefixes: 3 + warning_threshold: 60 + restart_timer: 8 + prefix_list_in: p1 + prefix_list_out: p2 + - name: test_case_06 + description: Modify BGP neighbor prefix-list attributes + state: merged + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 15.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: false + prefix_limit: + max_prefixes: 2 + prevent_teardown: false + warning_threshold: 88 + restart_timer: 5 + prefix_list_in: p2 + prefix_list_out: p1 + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: false + prefix_limit: + max_prefixes: 3 + warning_threshold: 77 + restart_timer: 10 + prefix_list_in: p1 + prefix_list_out: p2 + - afi: l2vpn + safi: evpn + prefix_limit: + max_prefixes: 4 + warning_threshold: 66 + restart_timer: 15 + prefix_list_in: p2 + prefix_list_out: p1 + - name: test_case_07 + description: Delete BGP neighbor prefix-list attributes + state: deleted + input: + - bgp_as: "{{bgp_as_1}}" + neighbors: + - neighbor: 15.1.1.1 + address_family: + - afi: ipv4 + safi: unicast + ip_afi: + default_policy_name: rmap_reg2 + send_default_route: false + prefix_limit: + max_prefixes: 2 + prevent_teardown: false + warning_threshold: 88 + restart_timer: 5 + prefix_list_in: p2 + prefix_list_out: p1 + - afi: ipv6 + safi: unicast + ip_afi: + default_policy_name: rmap_reg1 + send_default_route: false + prefix_limit: + max_prefixes: 3 + warning_threshold: 77 + restart_timer: 10 + prefix_list_in: p1 + prefix_list_out: p2 + - afi: l2vpn + safi: evpn + prefix_limit: + max_prefixes: 4 + warning_threshold: 66 + restart_timer: 15 + prefix_list_in: p2 + prefix_list_out: p1 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..453ed64e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/cleanup_tests.yaml @@ -0,0 +1,15 @@ +- name: Deletes old bgp_neighbors_af + sonic_bgp_neighbors_af: + config: [] + state: deleted + ignore_errors: yes +- name: Deletes old bgp_neighbors + sonic_bgp_neighbors: + config: [] + state: deleted + ignore_errors: yes +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/main.yml new file mode 100644 index 00000000..012bf4ae --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/main.yml @@ -0,0 +1,21 @@ +- debug: msg="sonic_interfaces Test started ..." + +- debug: var=interface1 + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "Cleanup {{ module_name }} started ..." + include_tasks: cleanup_tests.yaml + +# - name: "Test nagative {{ module_name }} started ..." +# include_tasks: negative_tasks_template.yaml +# loop: "{{ negative_tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/preparation_tests.yaml new file mode 100644 index 00000000..93f74215 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/preparation_tests.yaml @@ -0,0 +1,43 @@ +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize route maps" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_route_map }}" + ignore_errors: yes +- name: "initialize prefix lists" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_prefix_list }}" + ignore_errors: yes +- name: "initialize VRFs" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_vrf }}" + ignore_errors: yes +- name: Deletes old bgp + sonic_bgp: + config: [] + state: deleted + ignore_errors: yes +- name: Create bgp + sonic_bgp: + config: "{{ preparations_tests.init_bgp }}" + state: merged + ignore_errors: yes +- name: Create bgp neighbors + sonic_bgp_neighbors: + config: "{{ preparations_tests.init_bgp_neighbors }}" + state: merged + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/tasks_template.yaml new file mode 100644 index 00000000..1ac758be --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_bgp_neighbors_af: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_bgp_neighbors_af: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/defaults/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/defaults/main.yaml new file mode 100644 index 00000000..ae1e7fb6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/defaults/main.yaml @@ -0,0 +1 @@ +ansible_connection: network_cli \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/bad_operator.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/bad_operator.yaml new file mode 100644 index 00000000..79150a75 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/bad_operator.yaml @@ -0,0 +1,33 @@ +--- + + - name: Test sonic commands with wait_for negative case + sonic_command: + commands: + - 'show platform' + - 'show system' + wait_for: + - result[0] contains Fel + register: result + ignore_errors: yes + + - assert: + that: + - "result.failed == True" + - "result.failed_conditions is defined" + - "result.msg is defined" + + - name: Test sonic commands with wait_for and match=any + sonic_command: + commands: + - 'show platform' + - 'show system' + wait_for: + - result[0] contains Fel + - result[1] contains sonic + match: any + register: result + + - assert: + that: + - "result.failed == False" + - "result.changed == False" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/cli_command.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/cli_command.yaml new file mode 100644 index 00000000..f6294f30 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/cli_command.yaml @@ -0,0 +1,26 @@ +--- + + - block: + + - name: get output for single command + register: result + cli_command: + command: show system + + - assert: + that: + - result.changed == false + - result.stdout is defined + + - name: send invalid command + register: result + ignore_errors: true + cli_command: + command: show foo + + - assert: + that: + - result.failed == true + - result.msg is defined + + when: ansible_connection == 'network_cli' diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/contains.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/contains.yaml new file mode 100644 index 00000000..40a7f310 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/contains.yaml @@ -0,0 +1,29 @@ +--- + + + - name: Test sonic single command with wait_for + sonic_command: + commands: 'show platform ' + wait_for: + - result[0] contains Del + register: result + + - assert: + that: + - "result.changed == false" + - "result.stdout is defined" + + - name: Test sonic multiple command with wait_for + sonic_command: + commands: + - 'show platform' + - 'show system' + wait_for: + - result[0] contains Dell + - result[1] contains sonic + register: result + + - assert: + that: + - "result.changed == false" + - "result.stdout is defined" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/invalid.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/invalid.yaml new file mode 100644 index 00000000..0f5278bb --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/invalid.yaml @@ -0,0 +1,25 @@ +--- +- name: run invalid command + register: result + ignore_errors: true + sonic_command: + commands: + - show foo + +- assert: + that: + - result.failed == true + - result.msg is defined + +- name: run commands that include invalid command + register: result + ignore_errors: true + sonic_command: + commands: + - show system + - show foo + +- assert: + that: + - result.failed == true + - result.msg is defined diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/main.yaml new file mode 100644 index 00000000..8dd58fbe --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/main.yaml @@ -0,0 +1,12 @@ +--- + +- include_tasks: output.yaml +- include_tasks: contains.yaml +- include_tasks: bad_operator.yaml +- include_tasks: invalid.yaml +- include_tasks: cli_command.yaml +- include_tasks: test_local.yaml + vars: + ansible_connection: local +- include_tasks: timeout.yaml +- include_tasks: prompt.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/output.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/output.yaml new file mode 100644 index 00000000..5f6840cf --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/output.yaml @@ -0,0 +1,24 @@ +--- + + - name: Test sonic single command + sonic_command: + commands: 'show interface status' + register: result + + - assert: + that: + - "result.changed == false" + - "result.stdout is defined" + + - name: Test sonic multiple commands + sonic_command: + commands: + - 'show platform' + - 'show system' + register: result + + - assert: + that: + - "result.changed == false" + - "result.stdout is defined" + - "result.stdout | length == 2" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/prompt.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/prompt.yaml new file mode 100644 index 00000000..0bf3fc03 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/prompt.yaml @@ -0,0 +1,15 @@ +--- + + + - name: Test sonic command with prompt handling + sonic_command: + commands: + - command: 'image remove all' + prompt: '\[y/N\]:$' + answer: 'N' + register: result + + - assert: + that: + - "result.changed == false" + - "result.stdout is defined" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/test_local.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/test_local.yaml new file mode 100644 index 00000000..e765756b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/test_local.yaml @@ -0,0 +1,16 @@ +--- + + - block: + + - name: test failure for local connection + register: result + cli_command: + command: show platform + ignore_errors: true + + - assert: + that: + - result.failed == true + - "'Connection type local is not valid for this module' in result.msg" + when: + ansible_connection == 'local' diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/timeout.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/timeout.yaml new file mode 100644 index 00000000..1f1f2ffd --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_command/tasks/timeout.yaml @@ -0,0 +1,30 @@ +--- + +- name: test bad condition + register: result + ignore_errors: true + sonic_command: + commands: + - show system + wait_for: + - result[0] contains bad_value_string + +- assert: + that: + - result.failed == true + - result.msg is defined + +- name: test bad condition with less retries + register: result + ignore_errors: true + sonic_command: + commands: + - show system + retries: 5 + wait_for: + - result[0] contains bad_value_string + +- assert: + that: + - result.failed == true + - result.msg is defined diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/defaults/main.yml new file mode 100644 index 00000000..38be9bc7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/defaults/main.yml @@ -0,0 +1,83 @@ +--- +module_name: sonic_config +ansible_connection: network_cli +idempotent_condition: "{{ 'Passed' if ( idempotent_task_output.failed == false and + idempotent_task_output.commands is not defined and + idempotent_task_output.changed == false) + else 'Failed' + }}" +action_condition: "{{ 'Passed' if ( action_task_output.failed == false and + action_task_output.commands is defined and + action_task_output.changed == true) + else 'Failed' + }}" + +prompt_tc_condition: "{{ 'Passed' if ( prompt_tc.commands is defined ) + else 'Failed' + }}" + +backup_condition: "{{ 'Passed' if (backup_tc.stat.exists == true) + else 'Failed' + }}" + +replace_or_exact_condition: "{{ 'Passed' if ( replace_tc.commands is defined and replace_tc.commands |length>2) else 'Failed' }}" + +empty: [] + +bgp_as_1: 51 +bgp_as_2: 52 + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +snmp_location: snmp_chennai +snmp_contact: snmp_devops + +preparations_tests: + clean_cfg_lines: + - no interface PortChannel 11 + - no interface PortChannel 1 + - no interface PortChannel 2 + - no snmp-server community abcd + - no snmp-server community efgh + - no snmp-server community ijkl + - no snmp-server community mnop + - no snmp-server community qrst + - no snmp-server community uvwx + - no snmp-server location + - no snmp-server contact + - interface Vlan 11 + clean_interfaces: + - parent: interface Ethernet8 + lines: + - no description + +tests: + # Ethernet testcases started... + - name: test_case_01 + description: Configure ip access-list using 'before' and 'after' option on SONIC device + input: + lines: + - mtu 4444 + parents: ['interface PortChannel 11'] + before: ['snmp-server community abcd'] + after: ['snmp-server community efgh'] + - name: test_case_02 + description: Test sonic config module with single CLI + input: + before: 'snmp-server community ijkl' + commands: 'snmp-server community mnop' + - name: test_case_03 + description: Test sonic config module with multiple CLI + input: + commands: ['snmp-server community qrst', 'snmp-server community uvwx'] + - name: test_case_04 + description: Configure interface description using parents option on SONIC device + input: + lines: + - description 'hi' + parents: ['interface Ethernet8'] + - name: test_case_05 + description: Configure cli using source file + input: + src: snmp.j2 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/backup.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/backup.yaml new file mode 100644 index 00000000..c6bb3542 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/backup.yaml @@ -0,0 +1,16 @@ +- name: create configurable backup path + sonic_config: + backup: yes + backup_options: + filename: backup.cfg + dir_path: /tmp/ + register: backup_file + +- name: Verify file is created or not + stat: path={{backup_file.backup_path}} + register: backup_tc + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'backup_test_case.1': backup_condition }}, recursive=True) }}" + no_log: true diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/main.yml new file mode 100644 index 00000000..933bb71f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/main.yml @@ -0,0 +1,24 @@ +- debug: msg="sonic_config Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "Test Prompt validation started ..." + include_tasks: prompt.yaml + +- name: "Test sublevel command using replace in {{ module_name }} started ..." + include_tasks: replace_tasks_template.yaml + +- name: "Test sublevel command using match in {{ module_name }} started ..." + include_tasks: match_template.yaml + +- name: "Test backup option in {{ module_name }} started ..." + include_tasks: backup.yaml + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/match_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/match_template.yaml new file mode 100644 index 00000000..a92a809c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/match_template.yaml @@ -0,0 +1,64 @@ +- name: Configure sub level command using default match on SONIC device + sonic_config: + lines: ['switchport access Vlan 11', 'mtu 1500', 'no shutdown'] + parents: ['interface PortChannel 2'] + register: action_task_output + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'match_test_case.1': action_condition }}, recursive=True) }}" + no_log: true + +- name: Configure sub level command using default match on SONIC device idempotent + sonic_config: + lines: ['switchport access Vlan 11', 'mtu 1500', 'no shutdown'] + parents: ['interface PortChannel 2'] + register: idempotent_task_output + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'match_test_case.2': idempotent_condition }}, recursive=True) }}" + when: item.idemponent is not defined or item.idemponent != false + no_log: true + +- name: Rerun same configure task interchanging positions of sublevel commands using "match=strict" on SONIC device + sonic_config: + lines: ['mtu 1500', 'switchport access Vlan 11', 'no shutdown'] + parents: ['interface PortChannel 2'] + match: strict + register: action_task_output + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'match_test_case.3': action_condition }}, recursive=True) }}" + no_log: true + +- name: Configure sublevel command using "match=exact" on SONIC device + sonic_config: + lines: ['switchport access Vlan 11', 'mtu 1500', 'no shutdown', 'graceful-shutdown'] + parents: ['interface PortChannel 2'] + match: exact + register: replace_tc + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'match_test_case.4': replace_or_exact_condition }}, recursive=True) }}" + no_log: true + +- name: Rerun sublevel command using "match=exact" on SONIC device idempotent + sonic_config: + lines: ['switchport access Vlan 11', 'mtu 1500', 'no shutdown', 'graceful-shutdown'] + parents: ['interface PortChannel 2'] + match: exact + register: idempotent_task_output + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'match_test_case.5': idempotent_condition }}, recursive=True) }}" + when: item.idemponent is not defined or item.idemponent != false + no_log: true diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/preparation_tests.yaml new file mode 100644 index 00000000..5187a4fa --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/preparation_tests.yaml @@ -0,0 +1,11 @@ +- name: "clean_cfg_lines" + sonic_config: + commands: "{{ preparations_tests.clean_cfg_lines }}" + register: prep_tasks + +- name: "parent based clean cfg" + sonic_config: + lines: "{{ item.lines }}" + parents: "{{ item.parent }}" + register: prep_tasks + with_items: "{{preparations_tests.clean_interfaces}}" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/prompt.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/prompt.yaml new file mode 100644 index 00000000..ac5f7057 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/prompt.yaml @@ -0,0 +1,12 @@ +- name: Test sonic config with prompt handling + sonic_config: + commands: + - command: 'do image remove all' + prompt: '\[y/N\]:$' + answer: 'N' + register: prompt_tc + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'prompt_test_case.1': prompt_tc_condition }}, recursive=True) }}" + no_log: true diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/replace_tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/replace_tasks_template.yaml new file mode 100644 index 00000000..9ab50b36 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/replace_tasks_template.yaml @@ -0,0 +1,38 @@ +- name: Configure sub level command using "default" option on SONIC device + sonic_config: + lines: ['mtu 1312', 'no shutdown', 'no graceful-shutdown'] + parents: ['interface PortChannel 1'] + register: action_task_output + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'replace_test_case.1': action_condition }}, recursive=True) }}" + no_log: true + +- name: Configure sub level command using replace "block" option on SONIC device + sonic_config: + lines: ['mtu 1312', 'no shutdown', 'graceful-shutdown'] + parents: ['interface PortChannel 1'] + replace: block + register: replace_tc + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'replace_test_case.2': replace_or_exact_condition }}, recursive=True) }}" + no_log: true + +- name: Configure sub level command using replace "block" option on SONIC device idempotent + sonic_config: + lines: ['mtu 1312', 'no shutdown', 'graceful-shutdown'] + parents: ['interface PortChannel 1'] + replace: block + register: idempotent_task_output + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {'replace_test_case.3': idempotent_condition }}, recursive=True) }}" + when: item.idemponent is not defined or item.idemponent != false + no_log: true diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/tasks_template.yaml new file mode 100644 index 00000000..fa2e7ace --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_config: "{{ item.input }}" + register: action_task_output + ignore_errors: true + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.1': action_condition }}, recursive=True) }}" + no_log: true + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_config: "{{ item.input }}" + register: idempotent_task_output + ignore_errors: true + when: item.idemponent is not defined or item.idemponent != false + +- set_fact: + ansible_facts: + test_reports: "{{ ansible_facts['test_reports']| default({})| combine({module_name: {item.name+'.2': idempotent_condition }}, recursive=True) }}" + when: item.idemponent is not defined or item.idemponent != false + no_log: true diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/snmp.j2 b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/snmp.j2 new file mode 100644 index 00000000..cd93c9b5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/snmp.j2 @@ -0,0 +1,6 @@ +{% if snmp_location is defined and snmp_location %} +snmp-server location "{{ snmp_location }}" +{% endif %} +{% if snmp_contact is defined and snmp_contact %} +snmp-server contact "{{ snmp_contact }}" +{% endif %} diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/src.txt b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/src.txt new file mode 100644 index 00000000..fbfeac92 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_config/templates/src.txt @@ -0,0 +1,3 @@ +interface ethernet 40 +description hello +mtu 1800 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml new file mode 100644 index 00000000..d391e690 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml @@ -0,0 +1,153 @@ +--- +ansible_connection: httpapi +module_name: interfaces + +po1: PortChannel50 +po2: PortChannel51 + +lo1: Loopback 100 +lo2: Loopback 101 + +vlan1: 500 +vlan2: 501 + +preparations_tests: + add_vlans_input: + - vlan_id: "{{ vlan1 }}" + - vlan_id: "{{ vlan2 }}" + add_lag_interfaces: + - name: "{{ po1 }}" + - name: "{{ po2 }}" + delete_port_configurations: + - "no interface {{ lo1 }}" + - "no interface {{ lo2 }}" + +tests: + + # Ethernet testcases started... + - name: test_case_01 + description: Update interface parameters + state: merged + input: + - name: "{{ interface1 }}" + description: ansible Ethernet4 descr + mtu: 6445 + enabled: false + - name: test_case_02 + description: Update interface parameters + state: merged + input: + - name: "{{ interface1 }}" + description: ansible Ethernet4 descr + mtu: 6444 + enabled: true + - name: "{{ interface3 }}" + description: ansible Ethernet12 descr + mtu: 6000 + enabled: true + - name: "{{ interface2 }}" + description: ansible Ethernet8 descr + mtu: 5666 + enabled: false + - name: "{{ interface4 }}" + description: ansible Ethernet16 descr + mtu: 5222 + enabled: true + - name: test_case_03 + description: Update interface parameters + state: deleted + input: + - name: "{{ interface1 }}" + description: + - name: "{{ interface3 }}" + mtu: + - name: "{{ interface2 }}" + enabled: + - name: "{{ interface4 }}" + - name: test_case_04 + description: Update interface parameters + state: merged + input: + - name: "{{ interface1 }}" + description: ansible Ethernet4 descr + mtu: 6444 + enabled: true + - name: "{{ interface3 }}" + description: ansible Ethernet12 descr + - name: "{{ interface4 }}" + description: ansible eth56 descr +# Loopback test cases started + - name: test_case_05 + description: Loopback interface parameters + state: merged + input: + - name: "{{ lo1 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os + - name: "{{ lo2 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os + - name: test_case_06 + description: Loopback delete interface parameters + state: deleted + input: + - name: "{{ lo1 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os + - name: test_case_07 + description: Loopback delete interface parameters + state: deleted + input: + - name: "{{ lo1 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os + - name: "{{ lo2 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os +# Vlan test cases started + - name: test_case_08 + description: Update interface parameters + state: merged + input: + - name: "Vlan{{ vlan1 }}" # Vlan: desc, enabled not configurable in sonic-os + mtu: 5000 + - name: "Vlan{{ vlan2 }}" + mtu: 5001 + - name: test_case_09 + description: Update interface parameters + state: deleted + input: + - name: "Vlan{{ vlan1 }}" + mtu: + - name: "Vlan{{ vlan2 }}" + - name: test_case_10 + description: Update interface parameters + state: merged + input: + - name: "Vlan{{ vlan1 }}" + mtu: 6676 + - name: "Vlan{{ vlan2 }}" + mtu: 5113 +# Portchannel testcase started + - name: test_case_12 + description: Update interface parameters + state: merged + input: + - name: "{{ po1 }}" + mtu: 3434 + enabled: true + - name: "{{ po2 }}" + description: ansible PortChannel51 descr + mtu: 5454 + enabled: true + - name: test_case_13 + description: Update interface parameters + state: deleted + input: + - name: "{{ po1 }}" + - name: "{{ po2 }}" + - name: test_case_14 + description: Update interface parameters + state: merged + input: + - name: "{{ po1 }}" + mtu: 3434 + enabled: true + - name: "{{ po2 }}" + description: ansible PortChannel51 descr + mtu: 5454 + enabled: true + - name: test_case_15 + description: Update interface parameters + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/main.yml new file mode 100644 index 00000000..51c65668 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/main.yml @@ -0,0 +1,13 @@ +- debug: msg="sonic_interfaces Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/preparation_tests.yaml new file mode 100644 index 00000000..ce448251 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/preparation_tests.yaml @@ -0,0 +1,44 @@ +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize default interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ default_interface_cli }}" + register: output + ignore_errors: yes +- name: "delete loopback interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.delete_port_configurations }}" + register: output + ignore_errors: yes +- name: delete VLANs + sonic_vlans: + config: [] + state: deleted + ignore_errors: yes +- name: delete existing portchannels + sonic_lag_interfaces: + config: [] + state: deleted + ignore_errors: yes +- name: create VLANs + sonic_vlans: + config: "{{ preparations_tests.add_vlans_input }}" + state: merged + ignore_errors: yes +- name: create sonic_lag_interfaces "merged" state + sonic_lag_interfaces: + config: "{{ preparations_tests.add_lag_interfaces }}" + state: merged + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml new file mode 100644 index 00000000..debf1a6c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml new file mode 100644 index 00000000..8117a89c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml @@ -0,0 +1,101 @@ +--- +ansible_connection: httpapi +module_name: l2_interfaces + +preparations_tests: + add_vlans_input: + - vlan_id: 500 + - vlan_id: 501 + - vlan_id: 502 + - vlan_id: 503 + - vlan_id: 504 + - vlan_id: 505 + - vlan_id: 400 + - vlan_id: 401 + - vlan_id: 402 + delete_port_configurations: + - name: "{{ interface1 }}" + - name: "{{ interface2 }}" + - name: "{{ interface3 }}" + add_lag_interfaces: + - name: PortChannel100 + - name: PortChannel101 + +tests: + # merge test cases started + - name: test_case_01 + description: Add access and trunk VLANs + state: merged + input: + - name: "{{ interface1 }}" + access: + vlan: 400 + - name: "{{ interface2 }}" + trunk: + allowed_vlans: + - vlan: 501 + - vlan: 502 + - name: "{{ interface4 }}" + access: + vlan: 401 + trunk: + allowed_vlans: + - vlan: 504 + - vlan: 505 + - name: PortChannel100 + access: + vlan: 400 + - name: "{{ interface3 }}" + trunk: + allowed_vlans: + - vlan: 501 + - vlan: 502 + - name: test_case_02 + description: Update trunk VLANs + state: merged + input: + - name: "{{ interface3 }}" + trunk: + allowed_vlans: + - vlan: 502 + - vlan: 503 + access: + vlan: 402 + # delete test cases started + - name: test_case_03 + description: Delete Access VLAN + state: deleted + input: + - name: "{{ interface1 }}" + access: + - name: test_case_04 + description: Delete specific trunk VLANs + state: deleted + input: + - name: "{{ interface3 }}" + trunk: + allowed_vlans: + - vlan: 502 + - name: test_case_05 + description: Delete access VLANs from both associations + state: deleted + input: + - name: "{{ interface3 }}" + access: + vlan: + - name: test_case_06 + description: Delete all trunk VLANs + state: deleted + input: + - name: "{{ interface3 }}" + trunk: + allowed_vlans: + - name: test_case_07 + description: Delete all associations in specific interface + state: deleted + input: + - name: "{{ interface2 }}" + - name: test_case_08 + description: Delete All associations in all interfaces + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/main.yml new file mode 100644 index 00000000..be018fea --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/main.yml @@ -0,0 +1,12 @@ +- debug: msg="sonic_l2_interfaces Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/preparation_tests.yaml new file mode 100644 index 00000000..12561232 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/preparation_tests.yaml @@ -0,0 +1,35 @@ +--- +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize default interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ default_interface_cli }}" + register: output + ignore_errors: yes +- name: create sonic_lag_interfaces "merged" state + sonic_lag_interfaces: + config: "{{ preparations_tests.add_lag_interfaces }}" + state: merged + ignore_errors: yes +- name: Delete VLANs Inputs + sonic_vlans: + config: "{{ preparations_tests.add_vlans_input }}" + state: deleted + register: merge_vlans_output + ignore_errors: yes +- name: create VLANs + sonic_vlans: + config: "{{ preparations_tests.add_vlans_input }}" + state: merged + register: merge_vlans_output + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/tasks_template.yaml new file mode 100644 index 00000000..8c883e5d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_l2_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_l2_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml new file mode 100644 index 00000000..de632204 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml @@ -0,0 +1,244 @@ +--- +ansible_connection: httpapi +module_name: l3_interfaces + +preparations_tests: + delete_port_configurations: + - name: "{{ interface1 }}" + - name: "{{ interface2 }}" + - name: "{{ interface3 }}" + - name: "{{ interface4 }}" + init_loopback: + - "interface Loopback 100" + - "interface Loopback 101" + - "interface Portchannel 100" + - "interface Portchannel 101" + - "interface Vlan 100" + - "interface Vlan 101" + - "interface Vlan 102" + - "interface Vlan 501" + +tests: + # Vlan testcases started... + - name: test_case_01 + description: Update interface parameters + state: merged + input: + - name: Loopback100 + ipv4: + addresses: + - address: 101.1.1.1/32 + - address: 102.1.1.1/32 + secondary: True + - name: vlan 100 + ipv6: + enabled: true + addresses: + - address: 150::1/32 + - name: po 100 + ipv4: + addresses: + - address: 180.1.1.1/16 + - name: vlan 102 + ipv4: + anycast_addresses: + - 1.1.1.1/16 + - name: test_case_02 + description: Update interface parameters + state: merged + input: + - name: Loopback100 + ipv4: + addresses: + - address: 99.1.1.1/32 + - address: 103.1.1.1/32 + secondary: True + ipv6: + enabled: false + addresses: + - address: 101::1/128 + - name: vlan 100 + ipv4: + addresses: + - address: 150.1.1.1/16 + - name: po 100 + ipv6: + enabled: true + addresses: + - address: 180::1/16 + - name: vlan 102 + ipv4: + anycast_addresses: + - 11.12.13.14/12 + - name: test_case_03 + description: Update interface parameters + state: merged + input: + - name: loopback 100 + ipv4: + addresses: + - address: 102.1.1.1/32 + secondary: True + ipv6: + addresses: + - address: 102::1/128 + - name: vlan 100 + ipv4: + addresses: + - address: 152.1.1.1/16 + ipv6: + enabled: true + addresses: + - address: 152::1/16 + - name: po 100 + ipv4: + addresses: + - address: 182.1.1.1/16 + ipv6: + enabled: true + addresses: + - address: 182::1/16 + - name: test_case_04 + description: Update interface parameters + state: merged + input: + - name: "{{ interface1 }}" + ipv4: + addresses: + - address: 80.1.1.1/16 + - name: "{{ interface2 }}" + ipv6: + enabled: true + addresses: + - address: 90::1/16 + - name: test_case_05 + description: Update interface parameters + state: merged + input: + - name: "{{ interface1 }}" + ipv4: + addresses: + - address: 81.1.1.1/16 + - name: "{{ interface2 }}" + ipv6: + enabled: false + addresses: + - address: 90::1/16 + - address: 91::1/16 + - name: test_case_06 + description: Update interface parameters + state: merged + input: + - name: "{{ interface1 }}" + ipv4: + addresses: + - address: 83.1.1.1/16 + ipv6: + enabled: true + addresses: + - address: 83::1/16 + - address: 84::1/16 + - name: "{{ interface2 }}" + ipv4: + addresses: + - address: 91.1.1.1/16 + ipv6: + addresses: + - address: 90::1/16 + - address: 91::1/16 + - address: 92::1/16 + - address: 93::1/16 + - name: test_case_07 + description: Update interface parameters + state: deleted + input: + - name: "{{ interface1 }}" + ipv4: + addresses: + - address: 82.1.1.1/16 + - name: "{{ interface2 }}" + ipv6: + enabled: false + addresses: + - address: 91::1/16 + - name: vlan 102 + ipv4: + anycast_addresses: + - 1.1.1.1/16 + - name: test_case_08 + description: Update interface parameters + state: deleted + input: + - name: "{{ interface1 }}" + - name: "{{ interface2 }}" + - name: Vlan100 + - name: test_case_09 + description: Logical interfaces config + state: merged + input: + - name: Vlan101 + ipv4: + addresses: + - address: 104.1.1.1/16 + ipv6: + addresses: + - address: 1041::1/16 + - address: 1042::1/16 + - name: Loopback101 + ipv4: + addresses: + - address: 204.1.1.1/32 + ipv6: + addresses: + - address: 2041::1/128 + - address: 2042::1/128 + - name: PortChannel101 + ipv4: + addresses: + - address: 214.1.1.1/16 + ipv6: + addresses: + - address: 3041::1/16 + - address: 3042::1/16 + - name: test_case_10 + description: Naming tests + state: merged + input: + - name: vlan 501 + ipv4: + addresses: + - address: 105.1.1.1/16 + ipv6: + enabled: true + addresses: + - address: 1051::1/16 + - address: 1052::1/16 + - name: lo101 + ipv4: + addresses: + - address: 204.1.1.7/32 + ipv6: + enabled: true + addresses: + - address: 2041::1/128 + - address: 2042::1/128 + - name: portchannel 100 # po100 or portchannel100 + ipv4: + addresses: + - address: 64.1.1.1/16 + ipv6: + enabled: true + addresses: + - address: 3051::1/16 + - address: 3052::1/16 + - name: test_case_11 + description: Naming tests + state: deleted + input: + - name: vlan 501 + - name: lo101 + - name: portchannel 100 # po100 or portchannel100 + - name: test_case_12 + description: Update interface parameters + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/main.yml new file mode 100644 index 00000000..51c65668 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/main.yml @@ -0,0 +1,13 @@ +- debug: msg="sonic_interfaces Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml new file mode 100644 index 00000000..66700d53 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml @@ -0,0 +1,28 @@ +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: Deletes old l3 interfaces + sonic_l3_interfaces: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize default interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ default_interface_cli }}" + register: output + ignore_errors: yes +- name: "initialize init_loopback" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_loopback }}" + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/tasks_template.yaml new file mode 100644 index 00000000..c2d30198 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_l3_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_l3_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml new file mode 100644 index 00000000..3f77caba --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml @@ -0,0 +1,89 @@ +--- +ansible_connection: httpapi +module_name: lag_interfaces + +preparations_tests: + delete_port_configurations: + - name: "{{ interface1 }}" + - name: "{{ interface2 }}" + - name: "{{ interface3 }}" + - name: "{{ interface4 }}" + +tests: + # Ethernet testcases started... + - name: test_case_01 + description: Update Ethernet interface parameters + state: merged + input: + - name: PortChannel43 + mode: static + - name: PortChannel44 + mode: lacp + - name: test_case_02 + description: Update interface parameters + state: merged + input: + - name: PortChannel40 + mode: static + members: + interfaces: + - member: "{{ interface1 }}" + - name: PortChannel41 + mode: lacp + members: + interfaces: + - member: "{{ interface3 }}" + - name: PortChannel43 + mode: static + - name: test_case_03 + description: Update interface parameters + state: merged + input: + - name: PortChannel40 + mode: static + members: + interfaces: + - member: "{{ interface1 }}" + - member: "{{ interface2 }}" + - name: PortChannel41 + mode: lacp + members: + interfaces: + - member: "{{ interface3 }}" + - member: "{{ interface4 }}" + - name: PortChannel42 + - name: test_case_04 + description: Delete interface parameters + state: deleted + input: + - name: PortChannel40 + members: + interfaces: + - member: "{{ interface1 }}" + - name: PortChannel41 + members: + interfaces: + - name: PortChannel42 + - name: test_case_05 + description: Update interface parameters + state: merged + input: + - name: portchannel 40 + members: + interfaces: + - member: "{{ interface1 }}" + - name: po41 + members: + interfaces: + - member: "{{ interface3 }}" + - name: test_case_06 + description: Create standalone portchannels + state: merged + input: + - name: portchannel42 + - name: portchannel 12 + - name: po10 + - name: test_case_07 + description: Update interface parameters + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/meta/main.yaml new file mode 100644 index 00000000..78f79f8c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/main.yml new file mode 100644 index 00000000..1338f5f8 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/main.yml @@ -0,0 +1,13 @@ +- debug: msg="sonic_interfaces Test started ..." + +- name: Preparation test, default lag-interface configs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/preparation_tests.yaml new file mode 100644 index 00000000..55b64d5f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/preparation_tests.yaml @@ -0,0 +1,22 @@ +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize default interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ default_interface_cli }}" + register: output + ignore_errors: yes +- name: Delete all port channels + sonic_lag_interfaces: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/tasks_template.yaml new file mode 100644 index 00000000..92767743 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/tasks/tasks_template.yaml @@ -0,0 +1,22 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_lag_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_lag_interfaces: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml new file mode 100644 index 00000000..a2df2d36 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml @@ -0,0 +1,107 @@ +--- +ansible_connection: httpapi +module_name: mclag +preparations_tests: + add_vlans_input: + - vlan_id: 4 + - vlan_id: 5 + - vlan_id: 6 + - vlan_id: 2 + add_lag_interfaces: + - name: Po10 + - name: Po11 + - name: Po12 + - name: Po13 + +merged_tests: + - name: test_case_01 + description: MCLAG properties + state: merged + input: + domain_id: 1 + source_address: 3.3.3.3 + peer_address: 1.1.1.1 + peer_link: "{{ interface1 }}" + keepalive: 3 + session_timeout: 300 + system_mac: 00:00:00:01:01:01 + + - name: test_case_02 + description: Update created MCLAG properties + state: merged + input: + domain_id: 1 + source_address: 3.3.3.4 + peer_address: 1.1.1.2 + peer_link: "{{ interface2 }}" + keepalive: 2 + session_timeout: 350 + system_mac: 00:00:00:11:11:11 + + - name: test_case_03 + description: Update MCLAG properties - associate vlans and portchannels + state: merged + input: + domain_id: 1 + unique_ip: + vlans: + - vlan: vlan4 + - vlan: vlan5 + members: + portchannels: + - lag: Po10 + - lag: Po11 + +delete_all: + - name: del_all_test_case_01 + description: Delete MCLAG properties + state: deleted + +updated_tests: + - name: test_case_05 + description: Create new MCLAG with all properties including vlans and portchannels + state: merged + input: + domain_id: 2 + source_address: 3.3.3.5 + peer_address: 1.1.1.3 + peer_link: "{{ interface3 }}" + keepalive: 3 + session_timeout: 300 + system_mac: 00:00:00:01:01:01 + unique_ip: + vlans: + - vlan: vlan2 + - vlan: vlan6 + members: + portchannels: + - lag: Po13 + - lag: Po12 + + - name: del_test_case_01 + description: Delete MCLAG properties + state: deleted + input: + domain_id: 2 + source_address: 3.3.3.3 + peer_address: 1.1.1.1 + peer_link: "{{ interface3 }}" + keepalive: 3 + session_timeout: 300 + system_mac: 00:00:00:01:01:01 + unique_ip: + vlans: + - vlan: vlan2 + members: + portchannels: + - lag: Po13 + + - name: del_test_case_02 + description: Delete MCLAG associated vlans and portchannels + state: deleted + input: + domain_id: 2 + unique_ip: + vlans: + members: + portchannels: diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/meta/main.yaml new file mode 100644 index 00000000..d0ceaf6f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml new file mode 100644 index 00000000..071ef949 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml @@ -0,0 +1,22 @@ +- debug: msg="sonic_mclag Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ merged_tests }}" + +- name: "delete_all {{ module_name }} stated ..." + include_tasks: tasks_template_del.yaml + loop: "{{ delete_all }}" + when: delete_all is defined + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ updated_tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/preparation_tests.yaml new file mode 100644 index 00000000..843a1e83 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/preparation_tests.yaml @@ -0,0 +1,21 @@ +--- +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: create sonic_lag_interfaces "merged" state + sonic_lag_interfaces: + config: "{{ preparations_tests.add_lag_interfaces }}" + state: merged + ignore_errors: yes +- name: create VLANs Inputs + sonic_vlans: + config: "{{ preparations_tests.add_vlans_input }}" + state: merged + ignore_errors: yes +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template.yaml new file mode 100644 index 00000000..add970c4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_mclag: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_mclag: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template_del.yaml new file mode 100644 index 00000000..9001d50a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/tasks_template_del.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_mclag: + state: "{{ item.state }}" + config: + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_mclag: + state: "{{ item.state }}" + config: + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml new file mode 100644 index 00000000..860297d6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml @@ -0,0 +1,209 @@ +--- +ansible_connection: httpapi +module_name: ntp + +po1: Portchannel 100 +vlan1: Vlan 100 +lo1: Loopback 100 + +mgmt_vrf: mgmt + +ntp_ip_server_1: 10.11.0.1 +ntp_ip_server_2: 10.11.0.2 +ntp_ip_server_3: 10.11.0.3 +ntp_host_server: pool.ntp.org + +preparations_tests: + delete_interfaces: + - "no interface {{ po1 }}" + - "no interface {{ vlan1 }}" + - "no interface {{ lo1 }}" + init_interfaces: + - "interface {{ po1 }}" + - "interface {{ vlan1 }}" + - "interface {{ lo1 }}" + delete_mgmt_vrf: + - "no ip vrf mgmt" + create_mgmt_vrf: + - "ip vrf mgmt" + +tests: + - name: test_case_01 + description: Configure a NTP source interface + state: merged + input: + source_interfaces: + - "{{ interface1 }}" + + - name: test_case_02 + description: Create a NTP server + state: merged + input: + servers: + - address: "{{ ntp_ip_server_1 }}" + + - name: test_case_03 + description: Configure NTP source interfaces + state: merged + input: + source_interfaces: + - "{{ interface1 }}" + - "{{ po1 }}" + - "{{ vlan1 }}" + - "{{ lo1 }}" + + - name: test_case_04 + description: Create NTP servers + state: merged + input: + servers: + - address: "{{ ntp_ip_server_1 }}" + minpoll: 6 + maxpoll: 9 + - address: "{{ ntp_ip_server_2 }}" + minpoll: 5 + maxpoll: 8 + - address: "{{ ntp_host_server }}" + minpoll: 8 + maxpoll: 10 + + - name: test_case_05 + description: Configure NTP source interfaces and servers + state: merged + input: + source_interfaces: + - "{{ interface2 }}" + - "{{ po1 }}" + servers: + - address: "{{ ntp_ip_server_1 }}" + minpoll: 6 + maxpoll: 9 + - address: "{{ ntp_ip_server_3 }}" + minpoll: 7 + maxpoll: 10 + + - name: test_case_06 + description: Delete a NTP source interface + state: deleted + input: + source_interfaces: + - "{{ interface1 }}" + + - name: test_case_07 + description: Delete a NTP server + state: deleted + input: + servers: + - address: "{{ ntp_ip_server_1 }}" + + - name: test_case_08 + description: Delete several NTP source interfaces + state: deleted + input: + source_interfaces: + - "{{ interface2 }}" + - "{{ po1 }}" + + - name: test_case_09 + description: Delete several NTP servers + state: deleted + input: + servers: + - address: "{{ ntp_ip_server_1 }}" + - address: "{{ ntp_ip_server_3 }}" + + - name: test_case_10 + description: Delete NTP source interfaces and servers + state: deleted + input: + source_interfaces: + - "{{ interface2 }}" + - "{{ vlan1 }}" + - "{{ lo1 }}" + servers: + - address: "{{ ntp_ip_server_1 }}" + - address: "{{ ntp_host_server }}" + + - name: test_case_11 + description: Configure NTP VRF + state: merged + input: + vrf: "{{ mgmt_vrf }}" + + - name: test_case_12 + description: Delete NTP VRF + state: deleted + input: + vrf: "{{ mgmt_vrf }}" + + - name: test_case_13 + description: Enable NTP authentication + state: merged + input: + enable_ntp_auth: true + + - name: test_case_14 + description: Create NTP authentication keys + state: merged + input: + ntp_keys: + - key_id: 2 + key_type: NTP_AUTH_SHA1 + key_value: U2FsdGVkX197E9oiCGzwZlZxZpF5f/ZI8v+SGJdQvmA= + encrypted: true + - key_id: 6 + key_type: NTP_AUTH_MD5 + key_value: U2FsdGVkX1/wWVxmcp59mJQO6uzhFEHIxScdCbIqJh4= + encrypted: true + + - name: test_case_15 + description: Configure NTP trusted keys + state: merged + input: + trusted_keys: + - 2 + - 6 + + - name: test_case_16 + description: Create NTP servers with key + state: merged + input: + servers: + - address: "{{ ntp_ip_server_1 }}" + key_id: 2 + minpoll: 6 + maxpoll: 9 + + - name: test_case_17 + description: Delete NTP trusted keys + state: deleted + input: + trusted_keys: + - 2 + - 6 + + - name: test_case_18 + description: Delete NTP server + state: deleted + input: + servers: + - address: "{{ ntp_ip_server_1 }}" + + - name: test_case_19 + description: Delete NTP authentication keys + state: deleted + input: + ntp_keys: + - key_id: 2 + - key_id: 6 + + - name: test_case_20 + description: Delete NTP authentication + state: deleted + input: + enable_ntp_auth: true + + - name: test_case_21 + description: Delete all NTP configurations + state: deleted + input: {} diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..bb31455e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/cleanup_tests.yaml @@ -0,0 +1,15 @@ +- name: Delete interfaces + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.delete_interfaces }}" + register: output + ignore_errors: yes + +- name: Delete MGMT VRF + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.delete_mgmt_vrf }}" + register: output + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/main.yml new file mode 100644 index 00000000..ba1574e3 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/main.yml @@ -0,0 +1,16 @@ +- debug: msg="sonic_ntp Test started ..." + +- name: Preparations tests + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "Cleanup test {{ module_name }} started" + include_tasks: cleanup_tests.yaml + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/preparation_tests.yaml new file mode 100644 index 00000000..f74a2019 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/preparation_tests.yaml @@ -0,0 +1,21 @@ +- name: Delete existing NTP configurations + sonic_ntp: + config: {} + state: deleted + ignore_errors: yes + +- name: Initialize interfaces + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_interfaces }}" + register: output + ignore_errors: yes + +- name: Create MGMT VRF + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.create_mgmt_vrf }}" + register: output + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/tasks_template.yaml new file mode 100644 index 00000000..c580db84 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_ntp: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_ntp: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml new file mode 100644 index 00000000..402088be --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml @@ -0,0 +1,57 @@ +--- +ansible_connection: httpapi +module_name: port_breakout + +preparations_tests: + delete_port_breakouts: + - "no interface breakout port 1/97" + - "no interface breakout port 1/98" + - "no interface breakout port 1/99" + - "no interface breakout port 1/100" + - "no interface breakout port 1/101" + - "no interface breakout port 1/102" + +tests_cli: + - name: cli_test_case_01 + description: Configure breakout mode for ports + state: merged + input: + - name: 1/97 + mode: 4x25G + - name: 1/98 + mode: 1x40G + +tests: + - name: test_case_01 + description: Configure breakout mode for ports + state: merged + input: + - name: 1/97 + mode: 4x25G + - name: 1/98 + mode: 1x40G + - name: 1/99 + mode: 4x25G + - name: 1/100 + mode: 4x10G + - name: 1/101 + mode: 1x40G + - name: 1/102 + mode: 4x25G + - name: test_case_02 + description: Update breakout mode for ports + state: merged + input: + - name: 1/97 + mode: 1x40G + - name: 1/98 + mode: 4x10G + - name: test_case_03 + description: delete specific port breakout mode + state: deleted + input: + - name: 1/98 + - name: test_case_04 + description: deleting all the port breakout modes + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/meta/main.yaml new file mode 100644 index 00000000..0b356217 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..fc7e76dd --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml @@ -0,0 +1,6 @@ +- name: Deletes old bgp + sonic_port_breakout: + config: [] + state: deleted + ignore_errors: yes + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml new file mode 100644 index 00000000..2dea6531 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml @@ -0,0 +1,31 @@ +- debug: msg="sonic_port_breakout Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} CLI validation started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests_cli }}" + +- name: "Test CLI validation started ..." + include_role: + name: common + tasks_from: cli_tasks_template.yaml + loop: "{{ tests_cli }}" + +# - name: Preparations test +# include_tasks: preparation_tests.yaml + +# - name: "Test {{ module_name }} started ..." +# include_tasks: tasks_template.yaml +# loop: "{{ tests }}" + +# - name: Cleanup tests +# include_tasks: cleanup_tests.yaml + +# - name: Display all variables/facts known for a host +# debug: +# var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/preparation_tests.yaml new file mode 100644 index 00000000..14ceb14c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes old port breakouts + sonic_port_breakout: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml new file mode 100644 index 00000000..04ab1b45 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_port_breakout: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_port_breakout: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg new file mode 100644 index 00000000..c08c5950 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg @@ -0,0 +1,2 @@ +interface breakout port 1/97 mode 4x25G +interface breakout port 1/98 mode 1x40G diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml new file mode 100644 index 00000000..be519991 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml @@ -0,0 +1,122 @@ +--- +ansible_connection: httpapi +module_name: sonic_prefix_lists + +tests: + - name: test_case_01 + description: Add initial prefix list configuration + state: merged + input: + - afi: ipv4 + name: pf1 + prefixes: + - action: deny + sequence: 10 + prefix: 1.2.3.0/24 + ge: 25 + le: 27 + - afi: ipv4 + name: pf2 + prefixes: + - action: deny + prefix: 10.20.30.0/24 + sequence: 20 + ge: 26 + - action: permit + prefix: 10.20.30.128/25 + sequence: 50 + ge: 27 + le: 29 + - afi: ipv4 + name: pf3 + prefixes: + - action: deny + prefix: 1.2.3.128/25 + sequence: 30 + le: 27 + - afi: ipv6 + name: pf4 + prefixes: + - action: permit + sequence: 40 + prefix: 50:60::/64 + + - name: test_case_02 + description: Add another prefix set to the existing configuration + state: merged + input: + - afi: ipv4 + name: pf5 + prefixes: + - action: permit + prefix: 15.25.35.0/24 + sequence: 15 + + - name: test_case_03 + description: Add a prefix to an existing prefix set + state: merged + input: + - afi: ipv4 + name: pf3 + prefixes: + - action: permit + prefix: 1.2.3.192/26 + sequence: 40 + ge: 28 + le: 30 + + - name: test_case_04 + description: Modify "action" attributes for an existing prefix + state: merged + input: + - afi: ipv4 + name: pf2 + prefixes: + - action: permit + prefix: 10.20.30.0/24 + sequence: 20 + ge: 26 + - action: deny + prefix: 10.20.30.128/25 + sequence: 50 + ge: 27 + le: 29 + + - name: test_case_05 + description: Delete prefixes from existing prefix sets + state: deleted + input: + - afi: ipv4 + name: pf2 + prefixes: + - action: deny + prefix: 10.20.30.0/24 + sequence: 20 + ge: 26 + - afi: ipv4 + name: pf3 + prefixes: + - action: permit + prefix: 1.2.3.192/26 + sequence: 40 + ge: 28 + le: 30 + - afi: ipv4 + name: pf5 + prefixes: + - action: permit + prefix: 15.25.35.0/24 + sequence: 15 + + - name: test_case_06 + description: Delete prefix sets from the existing configuration + state: deleted + input: + - name: pf1 + - name: pf4 + afi: ipv6 + + - name: test_case_07 + description: Delete all prefix set configuration + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/meta/main.yaml new file mode 100644 index 00000000..0b356217 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..1a25238c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/cleanup_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes all prefix lists + sonic_prefix_lists: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/main.yml new file mode 100644 index 00000000..6414ec4e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/main.yml @@ -0,0 +1,15 @@ +- debug: msg="sonic_prefix_lists Test started ..." + +- name: Preparation test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "Cleanup {{ module_name }} started ..." + include_tasks: cleanup_tests.yaml + +- name: Display Test Report Output + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/preparation_tests.yaml new file mode 100644 index 00000000..6db3bac9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/preparation_tests.yaml @@ -0,0 +1,7 @@ +- name: "Delete old prefix lists" + vars: + ansible_connection: httpapi + sonic_prefix_lists: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/tasks_template.yaml new file mode 100644 index 00000000..4e5b0f4e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_prefix_lists: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_prefix_lists: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml new file mode 100644 index 00000000..6a79dc88 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml @@ -0,0 +1,114 @@ +--- +ansible_connection: httpapi +module_name: radius_server + +preparations_tests: + delete_radius_server: + - name: delete_all + description: Configure radius server + state: deleted + input: +tests: + - name: test_case_01 + description: Add new host configuration + state: merged + input: + auth_type: chap + timeout: 12 + nas_ip: 10.11.12.13 + retransmit: 5 + statistics: true + servers: + host: + - name: my_host1 + auth_type: chap + priority: 3 + vrf: mgmt + timeout: 12 + port: 55 + source_interface: "{{ interface1 }}" + retransmit: 7 + - name: my_host2 + auth_type: pap + priority: 4 + vrf: mgmt + timeout: 15 + port: 56 + source_interface: "{{ interface2 }}" + retransmit: 8 + - name: my_host3 + auth_type: mschapv2 + priority: 6 + vrf: mgmt + timeout: 20 + port: 57 + source_interface: "{{ interface3 }}" + retransmit: 9 + - name: test_case_02 + description: Update specific params of radius server + state: merged + input: + auth_type: mschapv2 + timeout: 24 + servers: + host: + - name: my_host + auth_type: mschapv2 + port: 45 + timeout: 9 + vrf: mgmt + - name: test_case_03 + description: Delete specific params of radius server + state: deleted + input: + key: login + timeout: 24 + servers: + host: + - name: my_host + - name: test_case_04 + description: Delete all hosts configurations + state: deleted + input: + servers: + host: + - name: test_case_05 + description: Add new host configuration + state: merged + input: + auth_type: chap + timeout: 12 + nas_ip: 10.11.12.13 + retransmit: 5 + statistics: true + servers: + host: + - name: my_host1 + auth_type: chap + priority: 3 + vrf: mgmt + timeout: 12 + port: 55 + source_interface: "{{ interface1 }}" + retransmit: 7 + - name: my_host2 + auth_type: pap + priority: 4 + vrf: mgmt + timeout: 15 + port: 56 + source_interface: "{{ interface2 }}" + retransmit: 8 + - name: my_host3 + auth_type: mschapv2 + priority: 6 + vrf: mgmt + timeout: 20 + port: 57 + source_interface: "{{ interface3 }}" + retransmit: 9 + +test_delete_all: + - name: test_case_06 + description: delete all the configurations of radius server + state: deleted diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/meta/main.yaml new file mode 100644 index 00000000..0b356217 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..bbe93069 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/cleanup_tests.yaml @@ -0,0 +1,6 @@ +- name: Deletes old bgp + sonic_radius_server: + config: {} + state: deleted + ignore_errors: yes + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/main.yml new file mode 100644 index 00000000..18bebfba --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/main.yml @@ -0,0 +1,16 @@ +- debug: msg="sonic_radius_server Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "test_delete_all {{ module_name }} stated ..." + include_tasks: tasks_template_del.yaml + loop: "{{ test_delete_all }}" + when: test_delete_all is defined diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/preparation_tests.yaml new file mode 100644 index 00000000..f1a64aa7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes old radius server configurations + sonic_radius_server: + config: {} + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template.yaml new file mode 100644 index 00000000..66281d52 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_radius_server: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_radius_server: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template_del.yaml new file mode 100644 index 00000000..1cfb7c04 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/tasks/tasks_template_del.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_radius_server: + state: "{{ item.state }}" + config: + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_radius_server: + state: "{{ item.state }}" + config: + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/templates/cli_test_case_01.cfg new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml new file mode 100644 index 00000000..64d2485f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml @@ -0,0 +1,172 @@ +--- +ansible_connection: httpapi +module_name: static_routes + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +preparations_tests: + init_vrf: + - 'ip vrf {{vrf_1}}' + - 'ip vrf {{vrf_2}}' +tests: + - name: test_case_01 + description: Add new static routes configuration + state: merged + input: + - vrf_name: 'default' + static_list: + - prefix: '2.0.0.0/8' + next_hops: + - index: + interface: '{{ interface1 }}' + metric: 1 + tag: 2 + track: 3 + - vrf_name: '{{vrf_1}}' + static_list: + - prefix: '3.0.0.0/8' + next_hops: + - index: + blackhole: True + interface: '{{ interface1 }}' + nexthop_vrf: '{{vrf_2}}' + next_hop: '5.0.0.0' + metric: 1 + tag: 2 + track: 3 + - index: + interface: '{{ interface1 }}' + nexthop_vrf: '{{vrf_2}}' + next_hop: '4.0.0.0' + metric: 4 + tag: 5 + track: 6 + - vrf_name: '{{vrf_2}}' + static_list: + - prefix: '1.0.0.0/8' + next_hops: + - index: + interface: '{{ interface2 }}' + nexthop_vrf: '{{vrf_1}}' + - index: + interface: '{{ interface3 }}' + next_hop: '2.0.0.0' + - prefix: '7.0.0.0/8' + next_hops: + - index: + nexthop_vrf: '{{vrf_1}}' + next_hop: '3.0.0.0' + - name: test_case_02 + description: Modify static routes configuration + state: merged + input: + - vrf_name: 'default' + static_list: + - prefix: '2.0.0.0/8' + next_hops: + - index: + interface: '{{ interface1 }}' + metric: 10 + tag: 20 + track: 30 + - index: + next_hop: '3.0.0.0' + metric: 8 + tag: 10 + track: 12 + - vrf_name: '{{vrf_1}}' + static_list: + - prefix: '3.0.0.0/8' + next_hops: + - index: + blackhole: True + interface: '{{ interface1 }}' + nexthop_vrf: '{{vrf_2}}' + next_hop: '5.0.0.0' + metric: 11 + tag: 22 + track: 33 + - vrf_name: '{{vrf_2}}' + static_list: + - prefix: '1.0.0.0/8' + next_hops: + - index: + interface: '{{ interface2 }}' + nexthop_vrf: '{{vrf_1}}' + metric: 6 + tag: 7 + track: 8 + - prefix: '7.0.0.0/8' + next_hops: + - index: + nexthop_vrf: '{{vrf_1}}' + next_hop: '3.0.0.0' + metric: 9 + tag: 10 + track: 11 + - name: test_case_03 + description: Delete static routes metric, tag, and track configuration + state: deleted + input: + - vrf_name: 'default' + static_list: + - prefix: '2.0.0.0/8' + next_hops: + - index: + interface: '{{ interface1 }}' + metric: 10 + tag: 20 + track: 30 + - index: + next_hop: '3.0.0.0' + metric: 8 + tag: 10 + track: 12 + - vrf_name: '{{vrf_1}}' + static_list: + - prefix: '3.0.0.0/8' + next_hops: + - index: + blackhole: True + interface: '{{ interface1 }}' + nexthop_vrf: '{{vrf_2}}' + next_hop: '5.0.0.0' + metric: 11 + tag: 22 + track: 33 + - index: + interface: '{{ interface1 }}' + nexthop_vrf: '{{vrf_2}}' + next_hop: '4.0.0.0' + - name: test_case_04 + description: Delete static route index configuration + state: deleted + input: + - vrf_name: 'default' + static_list: + - prefix: '2.0.0.0/8' + next_hops: + - index: + interface: '{{ interface1 }}' + - index: + next_hop: '3.0.0.0' + - vrf_name: '{{vrf_2}}' + static_list: + - prefix: '1.0.0.0/8' + next_hops: + - index: + interface: '{{ interface3 }}' + next_hop: '2.0.0.0' + - name: test_case_05 + description: Delete static route prefix configuration + state: deleted + input: + - vrf_name: '{{vrf_1}}' + - vrf_name: '{{vrf_2}}' + static_list: + - prefix: '7.0.0.0/8' + - name: test_case_06 + description: Delete all static routes configuration + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/meta/main.yaml new file mode 100644 index 00000000..0b356217 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml new file mode 100644 index 00000000..c87965de --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml @@ -0,0 +1,11 @@ +- debug: msg="sonic_static_routes Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/preparation_tests.yaml new file mode 100644 index 00000000..d504873a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/preparation_tests.yaml @@ -0,0 +1,11 @@ +- name: "initialize VRFs" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_vrf }}" + +- name: Deletes old static routes configurations + sonic_static_routes: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template.yaml new file mode 100644 index 00000000..9695897c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_static_routes: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_static_routes: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml new file mode 100644 index 00000000..bbf2331d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_static_routes: + state: "{{ item.state }}" + config: + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_static_routes: + state: "{{ item.state }}" + config: + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml new file mode 100644 index 00000000..c5146db9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml @@ -0,0 +1,47 @@ +--- +ansible_connection: httpapi +module_name: system +tests: + - name: test_case_01 + description: System properties + state: merged + input: + hostname: SONIC-test + interface_naming: standard + anycast_address: + ipv4: false + ipv6: false + + - name: test_case_02 + description: Update created System properties + state: merged + input: + hostname: SONIC-new + + - name: test_case_03 + description: Update System properties - associate mac address + state: merged + input: + anycast_address: + mac_address: 00:09:5B:EC:EE:F2 + + - name: del_test_case_01 + description: Delete System properties + state: deleted + input: + hostname: SONIC-new + interface_naming: standard + anycast_address: + ipv4: false + + - name: del_test_case_02 + description: Delete System associated anycast mac address + state: deleted + input: + anycast_address: + mac_address: 00:09:5B:EC:EE:F2 + +test_delete_all: + - name: del_all_test_case_01 + description: Delete System properties + state: deleted diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/meta/main.yaml new file mode 100644 index 00000000..d0ceaf6f --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..816050f9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/cleanup_tests.yaml @@ -0,0 +1,13 @@ +#standard_command: interface-naming standard +#native_command: no interface-naming standard +#command: "{{ statandard_command if std_name in interface_mode else native_command }}" +#command: "{{ 'interface-naming standard' if std_name in interface_mode else 'no interface-naming standard' }}" +- name: Change the interface mode to old + vars: + ansible_connection: network_cli + command: interface-naming standard + no_command: no interface-naming standard + when: interface_mode == native_mode + sonic_config: + commands: "{{ command if std_name in interface_mode else no_command }}" + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/main.yml new file mode 100644 index 00000000..7f6652f1 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/main.yml @@ -0,0 +1,20 @@ +- debug: msg="sonic_system Test started ..." + +- name: Preparation Tests + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "test_delete_all {{ module_name }} stated ..." + include_tasks: tasks_template_del.yaml + loop: "{{ test_delete_all }}" + when: test_delete_all is defined + +- name: "Cleanup test {{ module_name }} started" + include_tasks: cleanup_tests.yaml + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/preparation_tests.yaml new file mode 100644 index 00000000..eaf12cdf --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes system configurations + sonic_system: + config: {} + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template.yaml new file mode 100644 index 00000000..fc1e58b3 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_system: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_system: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template_del.yaml new file mode 100644 index 00000000..1ac0e555 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/tasks/tasks_template_del.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_system: + state: "{{ item.state }}" + config: + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_system: + state: "{{ item.state }}" + config: + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml new file mode 100644 index 00000000..0f9b3e3c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml @@ -0,0 +1,91 @@ +--- +ansible_connection: httpapi +module_name: tacacs_server + +preparations_tests: + delete_tacacs_server: + - name: delete_all + description: Configure tacacs server + state: deleted + input: +tests: + - name: test_case_01 + description: Add new host configuration + state: merged + input: + auth_type: chap + source_interface: "{{ interface1 }}" + timeout: 12 + servers: + host: + - name: my_host + auth_type: chap + port: 55 + timeout: 12 + priority: 3 + - name: my_host1 + auth_type: login + port: 60 + timeout: 14 + priority: 4 + - name: my_host2 + auth_type: login + port: 60 + timeout: 14 + priority: 4 + - name: test_case_02 + description: Update specific params of tacacs server + state: merged + input: + auth_type: login + source_interface: "{{ interface2 }}" + timeout: 24 + servers: + host: + - name: my_host + auth_type: mschap + port: 45 + timeout: 9 + priority: 5 + vrf: default + - name: test_case_03 + description: Delete specific params of tacacs server + state: deleted + input: + key: login + timeout: 24 + servers: + host: + - name: my_host + - name: test_case_04 + description: Delete all hosts configurations + state: deleted + input: + servers: + host: + - name: test_case_05 + description: merge parameter of tacacs servers + state: merged + input: + servers: + host: + - name: my_host + auth_type: chap + port: 55 + timeout: 12 + priority: 3 + - name: my_host1 + auth_type: login + port: 60 + timeout: 14 + priority: 4 + - name: my_host2 + auth_type: login + port: 60 + timeout: 14 + priority: 4 + +test_delete_all: + - name: test_case_06 + description: delete all the configurations of tacacs server + state: deleted diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/meta/main.yaml new file mode 100644 index 00000000..0b356217 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..fd49cbd2 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/cleanup_tests.yaml @@ -0,0 +1,6 @@ +- name: Deletes old bgp + sonic_tacacs_server: + config: {} + state: deleted + ignore_errors: yes + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/main.yml new file mode 100644 index 00000000..4020d077 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/main.yml @@ -0,0 +1,16 @@ +- debug: msg="sonic_tacacs_server Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: "test_delete_all {{ module_name }} stated ..." + include_tasks: tasks_template_del.yaml + loop: "{{ test_delete_all }}" + when: test_delete_all is defined diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/preparation_tests.yaml new file mode 100644 index 00000000..7c009d68 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes old tacacs server configurations + sonic_tacacs_server: + config: {} + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template.yaml new file mode 100644 index 00000000..a73dd9ba --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_tacacs_server: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_tacacs_server: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template_del.yaml new file mode 100644 index 00000000..cf15eee9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/tasks/tasks_template_del.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_tacacs_server: + state: "{{ item.state }}" + config: + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_tacacs_server: + state: "{{ item.state }}" + config: + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/templates/cli_test_case_01.cfg new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml new file mode 100644 index 00000000..342cb2d4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml @@ -0,0 +1,96 @@ +--- +ansible_connection: httpapi +module_name: users + +tests_cli: + - name: cli_test_case_01 + description: Configure users + state: merged + expected_cli: + - username sysadmin password + - username operator1 password + input: + - name: sysadmin + role: admin + password: admin + update_password: always + - name: operator1 + role: operator + password: admin + update_password: always + +tests_single_run: + - name: test_case_sr_01 + description: Configure users + state: merged + task_condition_type: action + input: + - name: user1 + role: admin + password: admin + update_password: always + - name: user2 + role: operator + password: admin + update_password: always + - name: user2 + role: operator + password: admin + update_password: on_create + - name: test_case_sr_02 + description: Validate users creation + state: merged + task_condition_type: idempotent + input: + - name: user1 + role: admin + password: admin + update_password: on_create + - name: user2 + role: operator + password: admin + update_password: on_create + - name: user2 + role: operator + password: admin + update_password: on_create + + +tests: + - name: test_case_01 + description: Configure users + state: merged + input: + - name: user1 + role: admin + password: admin + update_password: on_create + - name: user2 + role: operator + password: admin + update_password: on_create + - name: user3 + role: admin + password: admin + update_password: on_create + - name: test_case_02 + description: Configure users role + state: merged + input: + - name: user1 + role: operator + password: admin + update_password: on_create + - name: user2 + role: admin + password: admin + update_password: on_create + - name: test_case_03 + description: Delete user + state: deleted + input: + - name: user1 + - name: test_case_04 + description: Update users role + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/meta/main.yaml new file mode 100644 index 00000000..0b356217 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml new file mode 100644 index 00000000..1e9bfc24 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml @@ -0,0 +1,14 @@ +- name: Test sonic multiple command with wait_for + vars: + ansible_connection: network_cli + sonic_command: + commands: + - show running-configuration + register: cli_contains_output + +- set_fact: + cli_contains_condition: "{{ 'username operator1 password' in cli_contains_output.stdout.0 }}" + +- import_role: + name: common + tasks_from: cli.contains.test.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml new file mode 100644 index 00000000..987174b9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml @@ -0,0 +1,39 @@ +- debug: msg="sonic_users Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +# - name: CLI test test +# include_tasks: cli_tests.yaml + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} Single run validation started ..." + include_tasks: single_run_template.yaml + loop: "{{ tests_single_run }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +# - name: "Test CLI validation started ..." +# include_role: +# name: common +# tasks_from: cli_tasks_template.yaml +# loop: "{{ tests_cli }}" + +# - name: Preparations test +# include_tasks: preparation_tests.yaml + + + +# - name: Preparations test +# include_tasks: preparation_tests.yaml + +# - name: Display all variables/facts known for a host +# debug: +# var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/preparation_tests.yaml new file mode 100644 index 00000000..3fe82414 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/preparation_tests.yaml @@ -0,0 +1,5 @@ +- name: Deletes all users except admin + sonic_users: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/single_run_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/single_run_template.yaml new file mode 100644 index 00000000..b6f7eab5 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/single_run_template.yaml @@ -0,0 +1,19 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_users: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: single_run_task_output + ignore_errors: yes + +- set_fact: + single_run_task_status: "{{ single_run_idem_condition }}" + when: item.task_condition_type == "idempotent" + +- set_fact: + single_run_task_status: "{{ single_run_condition }}" + when: item.task_condition_type != "idempotent" + +- import_role: + name: common + tasks_from: single.run.facts.report.yaml + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/tasks_template.yaml new file mode 100644 index 00000000..9713e0a9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_users: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_users: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/templates/cli_test_case_01.cfg new file mode 100644 index 00000000..0a2ceda7 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/templates/cli_test_case_01.cfg @@ -0,0 +1,2 @@ +username operator1 password $6$E3pDmCbrbYNEw952$6y9rQNgNRVCU4Fgu7QnNa.AYHuDqpzwYiTuR8QG0TUwcAbR/VFK1gZX1EEHpeE0wjgBf6USnLXcgBj8TIS5mg. role operator +username sysadmin password $6$UuxErB15tyodQ3SG$j02rdRERouNP3hed8w0Un5EO3tAhpoQqj6WlvJOU/.zuN3NzdoY/x1sahmgExMeOPeUtaSO.XdheJghewZG2y1 role admin diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml new file mode 100644 index 00000000..b7deed02 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml @@ -0,0 +1,42 @@ +--- +ansible_connection: httpapi +module_name: vlans + +preparations_tests: + add_vlans_input: + - vlan_id: 500 + - vlan_id: 501 + - vlan_id: 502 + - vlan_id: 503 + + +tests: + # merge test cases started + - name: test_case_01 + description: Add access and trunk VLANs + state: merged + input: + - vlan_id: 500 + description: "vlan500 description" + - vlan_id: 501 + description: "vlan501 description" + - name: test_case_02 + description: Update trunk VLANs + state: merged + input: + - vlan_id: 500 + description: "modified vlan500 description" + - vlan_id: 502 + - vlan_id: 503 + # delete test cases started + - name: test_case_03 + description: Delete Access VLAN + state: deleted + input: + - vlan_id: 500 + description: "modified vlan500 description" + - vlan_id: 501 + - name: test_case_04 + description: Delete specific trunk VLANs + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/main.yml new file mode 100644 index 00000000..be018fea --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/main.yml @@ -0,0 +1,12 @@ +- debug: msg="sonic_l2_interfaces Test started ..." + +- name: Preparations test, creates VLANs + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Display all variables/facts known for a host + debug: + var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/preparation_tests.yaml new file mode 100644 index 00000000..464180f6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/preparation_tests.yaml @@ -0,0 +1,23 @@ +--- +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize default interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ default_interface_cli }}" + register: output + ignore_errors: yes +- name: Delete VLANs Inputs + sonic_vlans: + config: "{{ preparations_tests.add_vlans_input }}" + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/tasks_template.yaml new file mode 100644 index 00000000..2e0c4135 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_vlans: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_vlans: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml new file mode 100644 index 00000000..afbfd754 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml @@ -0,0 +1,139 @@ +--- +ansible_connection: httpapi +module_name: vrf + +vrf_1: VrfReg1 +vrf_2: VrfReg2 + +po1: Portchannel 100 +vlan1: Vlan 100 +looopback1: Loopback 100 + +po2: Portchannel 101 +vlan2: Vlan 101 +looopback2: Loopback 101 + +preparations_tests: + delete_interfaces: + - "no interface {{ po1 }}" + - "no interface {{ vlan1 }}" + - "no interface {{ looopback1 }}" + - "no interface {{ po2 }}" + - "no interface {{ vlan2 }}" + - "no interface {{ looopback2 }}" + init_interfaces: + - "interface {{ po1 }}" + - "interface {{ vlan1 }}" + - "interface {{ looopback1 }}" + - "interface {{ po2 }}" + - "interface {{ vlan2 }}" + - "interface {{ looopback2 }}" + +tests_cli: + - name: cli_test_case_01 + description: creates VRF properties + state: merged + input: + - name: "{{ vrf_1 }}" + members: + interfaces: + - name: "{{ interface1 }}" + - name: "{{ po1 }}" + - name: "{{ vlan1 }}" + - name: "{{ looopback1 }}" + - name: "{{ vrf_2 }}" + +tests: + - name: test_case_01 + description: creates VRF properties + state: merged + input: + - name: "{{ vrf_1 }}" + members: + interfaces: + - name: "{{ interface1 }}" + - name: "{{ po1 }}" + - name: "{{ vrf_2 }}" + + - name: test_case_02 + description: Update VRF properties + state: merged + input: + - name: "{{ vrf_1 }}" + members: + interfaces: + - name: "{{ vlan1 }}" + - name: "{{ vrf_2 }}" + members: + interfaces: + - name: "{{ interface2 }}" + - name: "{{ po2 }}" + + - name: test_case_03 + description: Update VRF properties + state: merged + input: + - name: "{{ vrf_1 }}" + members: + interfaces: + - name: "{{ interface1 }}" + - name: "{{ po1 }}" + - name: "{{ looopback1 }}" + - name: "{{ vrf_2 }}" + members: + interfaces: + - name: "{{ interface2 }}" + - name: "{{ po2 }}" + - name: "{{ vlan2 }}" + - name: "{{ looopback2 }}" + + - name: del_test_case_04 + description: Delete VRF properties + state: deleted + input: + - name: "{{ vrf_1 }}" + members: + interfaces: + - name: "{{ interface1 }}" + - name: "{{ vrf_2 }}" + members: + interfaces: + - name: "{{ interface2 }}" + - name: "{{ po2 }}" + + - name: del_test_case_05 + description: Delete VRF properties + state: deleted + input: + - name: "{{ vrf_1 }}" + members: + interfaces: + + - name: del_test_case_06 + description: Delete VRF properties + state: deleted + input: + - name: "{{ vrf_2 }}" + + - name: test_case_07 + description: Merged VRF properties + state: merged + input: + - name: "{{ vrf_1 }}" + members: + interfaces: + - name: "{{ interface1 }}" + - name: "{{ po1 }}" + - name: "{{ looopback1 }}" + - name: "{{ vrf_2 }}" + members: + interfaces: + - name: "{{ interface2 }}" + - name: "{{ po2 }}" + - name: "{{ vlan2 }}" + - name: "{{ looopback2 }}" + + - name: del_test_case_08 + description: Delete VRF properties + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..c6a26a68 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml @@ -0,0 +1,6 @@ +- name: Deletes old vrf + sonic_vrfs: + config: [] + state: deleted + ignore_errors: yes + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml new file mode 100644 index 00000000..8e165bc6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml @@ -0,0 +1,34 @@ +- debug: msg="sonic_vrfs Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} CLI validation started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests_cli }}" + +- name: "Test CLI validation started ..." + include_role: + name: common + tasks_from: cli_tasks_template.yaml + loop: "{{ tests_cli }}" + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: Cleanup tests + include_tasks: cleanup_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +- name: Cleanup tests + include_tasks: cleanup_tests.yaml + +# - name: Display all variables/facts known for a host +# debug: +# var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml new file mode 100644 index 00000000..c6a33af4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml @@ -0,0 +1,34 @@ +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize default interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ default_interface_cli }}" + register: output + ignore_errors: yes +- name: Deletes old VRFs + sonic_vrfs: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_interfaces }}" + register: output + ignore_errors: yes +- name: Deletes old VRFs + sonic_vrfs: + config: [] + state: deleted + ignore_errors: yes diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/tasks_template.yaml new file mode 100644 index 00000000..85fdccc0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_vrfs: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_vrfs: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg new file mode 100644 index 00000000..35cd6ded --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg @@ -0,0 +1,10 @@ +ip vrf VrfReg1 +ip vrf VrfReg2 +interface Vlan100 + ip vrf forwarding VrfReg1 +interface Loopback 100 + ip vrf forwarding VrfReg1 +interface PortChannel 100 + ip vrf forwarding VrfReg1 +interface {{ interface1 }} + ip vrf forwarding VrfReg1 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml new file mode 100644 index 00000000..f2687a09 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml @@ -0,0 +1,121 @@ +--- +ansible_connection: httpapi +module_name: vxlan + +vrf1: VrfReg1 +vrf2: VrfReg2 + +po1: Portchannel 100 +vlan1: 105 +looopback1: Loopback 100 +po2: Portchannel 101 +vlan2: 106 +looopback2: Loopback 101 + +preparations_tests: + delete_interfaces: + - "no interface Vlan {{ vlan1 }}" + - "no interface Vlan {{ vlan2 }}" + init_interfaces: + - "interface Vlan {{ vlan1 }}" + - "interface Vlan {{ vlan2 }}" + - "exit" + - "ip vrf {{vrf1}}" + - "ip vrf {{vrf2}}" + +tests_cli: + - name: cli_test_case_01 + description: creates VXLAN properties + state: merged + input: + - name: vtep1 + source_ip: 1.1.1.1 + primary_ip: 2.2.2.2 + evpn_nvo: nvo5 + vlan_map: + - vni: 101 + vlan: "{{vlan1}}" + - vni: 102 + vlan: "{{vlan2}}" + vrf_map: + - vni: 101 + vrf: "{{vrf1}}" + - vni: 102 + vrf: "{{vrf2}}" + +tests: + - name: test_case_01 + description: creates Vxlan properties + state: merged + input: + - name: vtep1 + - name: test_case_02 + description: Update VRF properties + state: merged + input: + - name: vtep1 + source_ip: 1.1.1.1 + primary_ip: 2.2.2.2 + evpn_nvo: nvo5 + - name: test_case_03 + description: Update VRF properties + state: merged + input: + - name: vtep1 + vlan_map: + - vni: 101 + vlan: "{{vlan1}}" + - name: test_case_04 + description: Update VRF properties + state: merged + input: + - name: vtep1 + vrf_map: + - vni: 101 + vrf: "{{vrf1}}" + - name: test_case_05 + description: Update VRF properties + state: merged + input: + - name: vtep1 + source_ip: 1.1.1.1 + primary_ip: 2.2.2.2 + evpn_nvo: nvo5 + vlan_map: + - vni: 102 + vlan: "{{vlan2}}" + vrf_map: + - vni: 102 + vrf: "{{vrf2}}" + - name: del_test_case_06 + description: Delete VRF properties + state: deleted + input: + - name: vtep1 + source_ip: 1.1.1.1 + primary_ip: 2.2.2.2 + evpn_nvo: nvo5 + vlan_map: + - vni: 101 + vlan: "{{vlan1}}" + - vni: 102 + vlan: "{{vlan2}}" + vrf_map: + - vni: 101 + vrf: "{{vrf1}}" + - vni: 102 + vrf: "{{vrf2}}" + - name: del_test_case_07 + description: Delete VRF properties + state: deleted + input: + - name: vtep1 + - name: test_case_08 + description: Recreate Vxlan + state: merged + input: + - name: vtep1 + - name: del_test_case_09 + description: Delete VRF properties + state: deleted + input: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/meta/main.yaml new file mode 100644 index 00000000..611fd54d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/meta/main.yaml @@ -0,0 +1,5 @@ +--- +collections: + - dellemc.enterprise_sonic +dependencies: + - { role: common } \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/cleanup_tests.yaml new file mode 100644 index 00000000..42f3ebb9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/cleanup_tests.yaml @@ -0,0 +1,6 @@ +- name: Deletes old bgp + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes + \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/main.yml new file mode 100644 index 00000000..721097ca --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/main.yml @@ -0,0 +1,34 @@ +- debug: msg="sonic_interfaces Test started ..." + +- set_fact: + base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}" + +# - name: Preparations test +# include_tasks: preparation_tests.yaml + +# - name: "Test {{ module_name }} CLI validation started ..." +# include_tasks: tasks_template.yaml +# loop: "{{ tests_cli }}" + +# - name: "Test CLI validation started ..." +# include_role: +# name: common +# tasks_from: cli_tasks_template.yaml +# loop: "{{ tests_cli }}" + +- name: Cleanup tests + include_tasks: cleanup_tests.yaml + +- name: Preparations test + include_tasks: preparation_tests.yaml + +- name: "Test {{ module_name }} started ..." + include_tasks: tasks_template.yaml + loop: "{{ tests }}" + +# - name: Cleanup tests +# include_tasks: cleanup_tests.yaml + +# - name: Display all variables/facts known for a host +# debug: +# var: hostvars[inventory_hostname].ansible_facts.test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/preparation_tests.yaml new file mode 100644 index 00000000..5946f3f4 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/preparation_tests.yaml @@ -0,0 +1,29 @@ +- name: Delete existing mclag + sonic_mclag: + config: + state: deleted + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes +- name: "initialize interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.delete_interfaces }}" + register: output + ignore_errors: yes +- name: "initialize interfaces" + vars: + ansible_connection: network_cli + sonic_config: + commands: "{{ preparations_tests.init_interfaces }}" + register: output + ignore_errors: yes +- name: Deletes old vxlans + sonic_vxlans: + config: [] + state: deleted + ignore_errors: yes \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/tasks_template.yaml new file mode 100644 index 00000000..73d8250d --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/tasks/tasks_template.yaml @@ -0,0 +1,21 @@ +- name: "{{ item.name}} , {{ item.description}}" + sonic_vxlans: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: action_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: action.facts.report.yaml + +- name: "{{ item.name}} , {{ item.description}} Idempotent" + sonic_vxlans: + config: "{{ item.input }}" + state: "{{ item.state }}" + register: idempotent_task_output + ignore_errors: yes + +- import_role: + name: common + tasks_from: idempotent.facts.report.yaml \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/templates/cli_test_case_01.cfg new file mode 100644 index 00000000..ebdcb7c0 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/templates/cli_test_case_01.cfg @@ -0,0 +1,8 @@ +interface vxlan vtep1 + source-ip 1.1.1.1 + map vni 101 vlan 105 + map vni 102 vlan 106 + map vni 101 vrf VrfReg1 + map vni 102 vrf VrfReg2 +! + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/meta/main.yaml new file mode 100644 index 00000000..a84afac6 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/meta/main.yaml @@ -0,0 +1,3 @@ +--- +collections: + - dellemc.enterprise_sonic \ No newline at end of file diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/tasks/main.yml new file mode 100644 index 00000000..a116ce79 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/tasks/main.yml @@ -0,0 +1,12 @@ +- set_fact: + time: "{{ lookup('pipe', 'date +%H-%M-%S') }}" + date: "{{ lookup('pipe', 'date +%Y-%m-%d') }}" + report_dir: "{{ lookup('env', 'ANSIBLE_SONIC_REPORT_DIR') | default(REPORT_DIR, true) }}" +- debug: var=report_dir +- blockinfile: + path: "{{ report_dir }}/regression-{{ file_suffix }}.html" + block: "{{ lookup('template', 'regression_html_report.j2')}}" + mode: 00777 + create: true + marker: "" + delegate_to: localhost diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2 b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2 new file mode 100644 index 00000000..0695c86b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2 @@ -0,0 +1,328 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True + + + +

Regression report

+

Date: {{ date}} Time: {{ time }}

+ +{% set complete_passed = [0] %} +{% set complete_failed = [0] %} +{% set complete_total = [0] %} +

Summary report

+ + + + + + + + + + + +{% for module_name, test_data_list in ansible_facts.test_reports.items() %} +{% set passed = [0] %} +{% set failed = [0] %} +{% set total = [0] %} +{% for testcase_name, test_data in test_data_list.items() %} +{% if total.append(total.pop() + 1) %}{% endif %} +{% if complete_total.append(complete_total.pop() + 1) %}{% endif %} + +{% if 'Passed' in test_data.status %} +{% if passed.append(passed.pop() + 1) %}{% endif %} +{% if complete_passed.append(complete_passed.pop() + 1) %}{% endif %} +{% endif %} + +{% if 'Failed' in test_data.status %} +{% if failed.append(failed.pop() + 1) %}{% endif %} +{% if complete_failed.append(complete_failed.pop() + 1) %}{% endif %} +{% endif %} +{% endfor %} + + + + + + +{% endfor %} + +
ModuleTotal testcasesPassedFailed
{{ module_name }}{{ total[0] }}{{ passed[0] }}{{ failed[0] }}
+ +

Testcase Complete Summary: Total: {{ complete_total[0] }}, Passed: {{ complete_passed[0] }}, Failed: {{ complete_failed[0] }}

+ + +
+
+
+ + + + + + + + + + + + + + + + + + + + + +{% for module_name, test_data_list in ansible_facts.test_reports.items() %} +{% set passed = [0] %} +{% set failed = [0] %} +{% set total = [0] %} + +{% for testcase_name, test_data in test_data_list.items() %} +{% if total.append(total.pop() + 1) %}{% endif %} + +{% if 'Passed' in test_data.status %} +{% if passed.append(passed.pop() + 1) %}{% endif %} +{% endif %} + +{% if 'Failed' in test_data.status %} +{% if failed.append(failed.pop() + 1) %}{% endif %} +{% endif %} +{% endfor %} +

Detailed report of {{module_name }}

+

Testcase summary: Total: {{ total[0] }}, Passed: {{ passed[0] }}, Failed: {{ failed[0] }}

+ + + + + + + + + + + + + +{% for name, test_data in test_data_list.items() %} + + + +{% if 'Passed' in test_data.status %} + + + + + + +{% else %} + + + + + +{% endif %} + +{% endfor %} + +
Testcase nameStatusUser InputCommandsBeforeAfterModule exception
{{ name}}{{ test_data.status | default('Template Error')}}Input: {{ test_data.configs | default('Template Error') | to_nice_json(indent=3) }}Commands: {{ test_data.commands | default('Template Error') | to_nice_json(indent=3) }}Before: {{ test_data.before | default('Template Error') | to_nice_json(indent=3) }}After: {{ test_data.after | default('Template Error') | to_nice_json(indent=3) }}Error: {{ test_data.module_stderr | default('Template Error') | to_nice_json(indent=3) }}
+{% endfor %} + +

Testcase Complete Summary: Total: {{ complete_total[0] }}, Passed: {{ complete_passed[0] }}, Failed: {{ complete_failed[0] }}

+ + + diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml new file mode 100644 index 00000000..c34c286c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml @@ -0,0 +1,39 @@ +--- +- name: "Test SONiC CLI" + hosts: sonic1 + gather_facts: no + connection: httpapi + vars: + file_suffix: "{{ lookup('pipe','date +%Y-%m-%d-%H-%M-%S') }}" + collections: + - dellemc.enterprise_sonic + roles: + #- sonic_api + #- sonic_command + #- sonic_config + + - sonic_interfaces + - sonic_l2_interfaces + - sonic_lag_interfaces + - sonic_mclag + - sonic_vlans + - sonic_l3_interfaces + - sonic_bgp_communities + - sonic_bgp_ext_communities + - sonic_bgp_as_paths + - sonic_bgp + - sonic_bgp_af + - sonic_bgp_neighbors + - sonic_bgp_neighbors_af + - sonic_vrfs + - sonic_vxlan + - sonic_port_breakout + - sonic_users + - sonic_aaa + - sonic_tacacs_server + - sonic_radius_server + - sonic_system + - sonic_prefix_lists + - sonic_static_routes + - sonic_ntp + - test_reports diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..c2cf4ded --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.10.txt @@ -0,0 +1 @@ +plugins/action/sonic.py action-plugin-docs #action plugin for base class diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..c2cf4ded --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.11.txt @@ -0,0 +1 @@ +plugins/action/sonic.py action-plugin-docs #action plugin for base class diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.12.txt new file mode 100644 index 00000000..c2cf4ded --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.12.txt @@ -0,0 +1 @@ +plugins/action/sonic.py action-plugin-docs #action plugin for base class diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.13.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.13.txt new file mode 100644 index 00000000..c2cf4ded --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.13.txt @@ -0,0 +1 @@ +plugins/action/sonic.py action-plugin-docs #action plugin for base class diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.14.txt new file mode 100644 index 00000000..c2cf4ded --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.14.txt @@ -0,0 +1 @@ +plugins/action/sonic.py action-plugin-docs #action plugin for base class diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..c2cf4ded --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.9.txt @@ -0,0 +1 @@ +plugins/action/sonic.py action-plugin-docs #action plugin for base class diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/run_test_cases.sh b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/run_test_cases.sh new file mode 100755 index 00000000..ac35117e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/run_test_cases.sh @@ -0,0 +1,2 @@ +#!/bin/sh +pytest -vvvv test_diff_util.py diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_01_dict_diff_with_key_name.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_01_dict_diff_with_key_name.yaml new file mode 100644 index 00000000..ddab8994 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_01_dict_diff_with_key_name.yaml @@ -0,0 +1,43 @@ +--- +want: + name: tacacs + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +have: + name: radius + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +diff: + name: tacacs + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_02_dict_diff_with_key_other.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_02_dict_diff_with_key_other.yaml new file mode 100644 index 00000000..f7bd8a52 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_02_dict_diff_with_key_other.yaml @@ -0,0 +1,46 @@ +--- +test_keys: + - config: + - method +want: + method: tacacs + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +have: + method: radius + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +diff: + method: tacacs + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_03_dict_diff_without_key.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_03_dict_diff_without_key.yaml new file mode 100644 index 00000000..e09fe74a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_03_dict_diff_without_key.yaml @@ -0,0 +1,38 @@ +--- +want: + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +have: + auth_type: pap + key: pap + source_interface: Eth 11 + timeout: 12 + host: + name: my_host1 + auth_type: pap + key: pap + port: 55 + timeout: 11 + priority: 2 + vrf: default +diff: + auth_type: chap + key: chap + source_interface: Eth 12 + host: + name: my_host + auth_type: chap + key: chap + timeout: 12 + priority: 3 + vrf: mgmt diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_04_dict_diff_with_similar_dict.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_04_dict_diff_with_similar_dict.yaml new file mode 100644 index 00000000..0d754337 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_04_dict_diff_with_similar_dict.yaml @@ -0,0 +1,28 @@ +--- +want: + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +have: + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +diff: {} diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_05_dict_diff_left_only.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_05_dict_diff_left_only.yaml new file mode 100644 index 00000000..8a3e2e4a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_05_dict_diff_left_only.yaml @@ -0,0 +1,29 @@ +--- +want: + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +have: + key: chap + host: + name: my_host + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +diff: + auth_type: chap + source_interface: Eth 12 + timeout: 12 + host: + auth_type: chap + key: chap diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_06_dict_diff_left_only_with_none.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_06_dict_diff_left_only_with_none.yaml new file mode 100644 index 00000000..78c65d27 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_06_dict_diff_left_only_with_none.yaml @@ -0,0 +1,18 @@ +--- +want: + auth_type: chap + key: + source_interface: Eth 12 + timeout: 12 + host: + auth_type: + port: + timeout: 15 +have: + key: chap +diff: + auth_type: chap + source_interface: Eth 12 + timeout: 12 + host: + timeout: 15 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_07_dict_diff_skeleton_only.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_07_dict_diff_skeleton_only.yaml new file mode 100644 index 00000000..b2307a03 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_07_dict_diff_skeleton_only.yaml @@ -0,0 +1,30 @@ +--- +skeleton: True +want: + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +have: + key: pap + host: + name: my_host1 + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +diff: + auth_type: chap + source_interface: Eth 12 + timeout: 12 + host: + auth_type: chap + key: chap diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_08_list_diff_with_key_name.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_08_list_diff_with_key_name.yaml new file mode 100644 index 00000000..b2317f77 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_08_list_diff_with_key_name.yaml @@ -0,0 +1,34 @@ +--- +want: + - name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True +have: + - name: vrf1 + router_id: 100.2.2.30 + log_neighbor_changes: False + - name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - name: vrf3 + router_id: 102.2.2.30 + log_neighbor_changes: True +diff: + - name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_09_list_diff_with_multi_keys.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_09_list_diff_with_multi_keys.yaml new file mode 100644 index 00000000..e3e9701c --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_09_list_diff_with_multi_keys.yaml @@ -0,0 +1,50 @@ +--- +test_keys: + - config: + - vrf_name + - bgp_as +want: + - bgp_as: 51 + vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - bgp_as: 52 + vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - bgp_as: 53 + vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - bgp_as: 54 + vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True +have: + - bgp_as: 50 + vrf_name: vrf1 + - bgp_as: 51 + vrf_name: vrf1 + router_id: 100.2.2.30 + log_neighbor_changes: False + - bgp_as: 52 + vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - bgp_as: 53 + vrf_name: vrf3 + router_id: 102.2.2.30 + log_neighbor_changes: True +diff: + - bgp_as: 51 + vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - bgp_as: 53 + vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - bgp_as: 54 + vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_10_list_diff_with_key_other.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_10_list_diff_with_key_other.yaml new file mode 100644 index 00000000..3d4494b9 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_10_list_diff_with_key_other.yaml @@ -0,0 +1,37 @@ +--- +test_keys: + - config: + - vrf_name +want: + - vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True +have: + - vrf_name: vrf1 + router_id: 100.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf3 + router_id: 102.2.2.30 + log_neighbor_changes: True +diff: + - vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_11_list_diff_with_similar_list.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_11_list_diff_with_similar_list.yaml new file mode 100644 index 00000000..b5826a5a --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_11_list_diff_with_similar_list.yaml @@ -0,0 +1,31 @@ +--- +test_keys: + - config: + - vrf_name +want: + - vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True +have: + - vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True +diff: [] diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_12_list_diff_with_left_only.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_12_list_diff_with_left_only.yaml new file mode 100644 index 00000000..b491385e --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_12_list_diff_with_left_only.yaml @@ -0,0 +1,40 @@ +--- +test_keys: + - config: + - vrf_name +want: + - vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf5 + router_id: 114.2.2.30 + log_neighbor_changes: True +have: + - vrf_name: default + router_id: 100.2.2.30 + log_neighbor_changes: False +diff: + - vrf_name: vrf1 + router_id: 110.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf5 + router_id: 114.2.2.30 + log_neighbor_changes: True diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_13_list_diff_with_left_only_with_none.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_13_list_diff_with_left_only_with_none.yaml new file mode 100644 index 00000000..b15fea91 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_13_list_diff_with_left_only_with_none.yaml @@ -0,0 +1,48 @@ +--- +test_keys: + - config: + - vrf_name +want: + - vrf_name: vrf1 + router_id: + log_neighbor_changes: True + - vrf_name: vrf2 + router_id: 111.2.2.30 + log_neighbor_changes: + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf5 + router_id: 114.2.2.30 + log_neighbor_changes: True + best_path: + as_path: + med: + value: 5 + id: i1 +have: + - vrf_name: default + router_id: 100.2.2.30 + log_neighbor_changes: False + +diff: + - vrf_name: vrf1 + log_neighbor_changes: True + - vrf_name: vrf2 + router_id: 111.2.2.30 + - vrf_name: vrf3 + router_id: 112.2.2.30 + log_neighbor_changes: False + - vrf_name: vrf4 + router_id: 113.2.2.30 + log_neighbor_changes: True + - vrf_name: vrf5 + router_id: 114.2.2.30 + log_neighbor_changes: True + best_path: + as_path: + value: 5 + id: i1 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_14_list_diff_skeleton_only.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_14_list_diff_skeleton_only.yaml new file mode 100644 index 00000000..b2ead570 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_14_list_diff_skeleton_only.yaml @@ -0,0 +1,61 @@ +--- +skeleton: True +want: + - name: radius + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt + - name: tacacs + auth_type: chap + key: chap + source_interface: Eth 12 + timeout: 12 + host: + name: my_host + auth_type: chap + key: chap + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt +have: + - name: radius + key: pap + host: + name: my_host1 + port: 55 + timeout: 12 + priority: 3 + vrf: mgmt + - name: tacacs + auth_type: pap + source_interface: Eth 11 + timeout: 11 + host: + name: my_host_03 + auth_type: pap + timeout: 13 + priority: 4 +diff: + - name: radius + auth_type: chap + source_interface: Eth 12 + timeout: 12 + host: + auth_type: chap + key: chap + - name: tacacs + key: chap + host: + key: chap + port: 55 + vrf: mgmt diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_15_list_of_list_diff.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_15_list_of_list_diff.yaml new file mode 100644 index 00000000..77565dff --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_15_list_of_list_diff.yaml @@ -0,0 +1,163 @@ +--- +test_keys: + - marks: + - year + - subjects: + - code +want: + - name: test1 + marks: + - year: 1 + subjects: + - code: M + mark: 55 + - code: E + mark: 76 + - code: S + mark: 57 + - year: 2 + subjects: + - code: M + mark: 85 + - code: E + mark: 56 + - code: S + mark: 87 + - year: 3 + subjects: + - code: M + mark: 55 + - code: E + mark: 96 + - code: S + mark: 57 + - year: 4 + subjects: + - code: M + mark: 65 + - code: E + mark: 56 + - code: S + mark: 67 + - name: test2 + marks: + - year: 1 + subjects: + - code: M + mark: 55 + - code: E + mark: 76 + - code: S + mark: 57 + - year: 2 + subjects: + - code: M + mark: 85 + - code: E + mark: 56 + - code: S + mark: 87 + - code: G + mark: 58 + - name: test3 + marks: + - year: 1 + subjects: + - code: M + mark: 56 +have: + - name: test1 + marks: + - year: 1 + subjects: + - code: M + mark: 75 + - code: E + mark: 76 + - code: S + mark: 77 + - year: 2 + subjects: + - code: M + mark: 85 + - code: E + mark: 86 + - code: S + mark: 87 + - year: 3 + subjects: + - code: M + mark: 95 + - code: E + mark: 96 + - code: S + mark: 97 + - year: 4 + subjects: + - code: M + mark: 65 + - code: E + mark: 66 + - code: S + mark: 67 + - name: test2 + marks: + - year: 1 + subjects: + - code: M + mark: 75 + - code: E + mark: 76 + - code: S + mark: 77 + - year: 2 + subjects: + - code: M + mark: 85 + - code: E + mark: 86 + - code: S + mark: 87 +diff: + - name: test1 + marks: + - year: 1 + subjects: + - code: M + mark: 55 + - code: S + mark: 57 + - year: 2 + subjects: + - code: E + mark: 56 + - year: 3 + subjects: + - code: M + mark: 55 + - code: S + mark: 57 + - year: 4 + subjects: + - code: E + mark: 56 + - name: test2 + marks: + - year: 1 + subjects: + - code: M + mark: 55 + - code: S + mark: 57 + - year: 2 + subjects: + - code: E + mark: 56 + - code: G + mark: 58 + - name: test3 + marks: + - year: 1 + subjects: + - code: M + mark: 56 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_16_complex_list_with_dict_diff.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_16_complex_list_with_dict_diff.yaml new file mode 100644 index 00000000..a6d40b8b --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_16_complex_list_with_dict_diff.yaml @@ -0,0 +1,252 @@ +--- +test_keys: + - config: + - vrf_name + - bgp_as + - afis: + - afi + - safi + - redistribute: + - protocol + - route_advertise_list: + - advertise_afi +want: + - bgp_as: 51 + vrf_name: vrf1 + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 2 + ibgp: 3 + redistribute: + - metric: "20" + protocol: connected + route_map: rmap_reg1 + - metric: "26" + protocol: ospf + route_map: rmap_reg2 + - metric: "25" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 4 + redistribute: + - metric: "21" + protocol: connected + route_map: rmap_reg3 + - metric: "27" + protocol: ospf + route_map: rmap_reg1 + - metric: "28" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + advertise_all_vni: True + route_advertise_list: + - advertise_afi: ipv4 + route_map: rmap_reg1 + - advertise_afi: ipv6 + route_map: rmap_reg2 + - bgp_as: 52 + vrf_name: vrf2 + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 3 + ibgp: 2 + redistribute: + - metric: "21" + protocol: connected + route_map: rmap_reg1 + - metric: "27" + protocol: ospf + route_map: rmap_reg2.1 + - metric: "25" + protocol: static + route_map: rmap_reg3.1 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 4 + redistribute: + - metric: "22" + protocol: connected + route_map: rmap_rega + - metric: "28" + protocol: ospf + route_map: rmap_regb.1 + - metric: "28" + protocol: static + route_map: rmap_regc.1 + - afi: l2vpn + safi: evpn + route_advertise_list: + - advertise_afi: ipv6 + route_map: rmap_reg2 +have: + - bgp_as: 50 + vrf_name: vrf1 + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 2 + ibgp: 3 + redistribute: + - metric: "20" + protocol: connected + route_map: rmap_reg1 + - metric: "26" + protocol: ospf + route_map: rmap_reg2 + - metric: "25" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 4 + redistribute: + - metric: "21" + protocol: connected + route_map: rmap_reg3 + - metric: "27" + protocol: ospf + route_map: rmap_reg1 + - metric: "28" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + advertise_all_vni: True + route_advertise_list: + - advertise_afi: ipv4 + route_map: rmap_reg1 + - advertise_afi: ipv6 + route_map: rmap_reg2 + - bgp_as: 52 + vrf_name: vrf2 + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 1 + ibgp: 2 + redistribute: + - metric: "20" + protocol: connected + route_map: rmap_reg1 + - metric: "26" + protocol: ospf + route_map: rmap_reg2 + - metric: "25" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 2 + redistribute: + - metric: "21" + protocol: connected + route_map: rmap_rega + - metric: "27" + protocol: ospf + route_map: rmap_regb + - metric: "28" + protocol: static + route_map: rmap_regc + - afi: l2vpn + safi: evpn + route_advertise_list: + - advertise_afi: ipv4 + route_map: rmap_reg1 +diff: + - bgp_as: 51 + vrf_name: vrf1 + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 2 + ibgp: 3 + redistribute: + - metric: "20" + protocol: connected + route_map: rmap_reg1 + - metric: "26" + protocol: ospf + route_map: rmap_reg2 + - metric: "25" + protocol: static + route_map: rmap_reg3 + - afi: ipv6 + safi: unicast + max_path: + ebgp: 3 + ibgp: 4 + redistribute: + - metric: "21" + protocol: connected + route_map: rmap_reg3 + - metric: "27" + protocol: ospf + route_map: rmap_reg1 + - metric: "28" + protocol: static + route_map: rmap_reg2 + - afi: l2vpn + safi: evpn + advertise_all_vni: True + route_advertise_list: + - advertise_afi: ipv4 + route_map: rmap_reg1 + - advertise_afi: ipv6 + route_map: rmap_reg2 + - bgp_as: 52 + vrf_name: vrf2 + address_family: + afis: + - afi: ipv4 + safi: unicast + max_path: + ebgp: 3 + redistribute: + - metric: "21" + protocol: connected + - metric: "27" + protocol: ospf + route_map: rmap_reg2.1 + - protocol: static + route_map: rmap_reg3.1 + - afi: ipv6 + safi: unicast + max_path: + ibgp: 4 + redistribute: + - metric: "22" + protocol: connected + - metric: "28" + protocol: ospf + route_map: rmap_regb.1 + - protocol: static + route_map: rmap_regc.1 + - afi: l2vpn + safi: evpn + route_advertise_list: + - advertise_afi: ipv6 + route_map: rmap_reg2 diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_diff_util.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_diff_util.py new file mode 100644 index 00000000..65f384a1 --- /dev/null +++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/utils/test_diff_util.py @@ -0,0 +1,87 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import unittest +import os +import yaml + +import sys +sys.path.append('/root/.ansible/collections') + +from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import ( + get_diff, +) + + +class TestDiffUtils(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def read_and_compare(self, file_name): + file_name = os.path.join(os.path.dirname(__file__), file_name) + file_stream = open(file_name, "r") + data = yaml.full_load(file_stream) + file_stream.close() + + want = data.get('want', []) + have = data.get('have', []) + diff_exp = data.get('diff', []) + test_keys = data.get('test_keys', None) + is_skeleton = data.get('skeleton', None) + + diff_act = get_diff(want, have, test_keys, is_skeleton=is_skeleton) + + self.assertEqual(diff_exp, diff_act) + + def test_01_dict_diff_with_key_name(self): + self.read_and_compare("test_01_dict_diff_with_key_name.yaml") + + def test_02_dict_diff_with_key_other(self): + self.read_and_compare("test_02_dict_diff_with_key_other.yaml") + + def test_03_dict_diff_without_key(self): + self.read_and_compare("test_03_dict_diff_without_key.yaml") + + def test_04_dict_diff_with_similar_dict(self): + self.read_and_compare("test_04_dict_diff_with_similar_dict.yaml") + + def test_05_dict_diff_left_only(self): + self.read_and_compare("test_05_dict_diff_left_only.yaml") + + def test_06_dict_diff_left_only_with_none(self): + self.read_and_compare("test_06_dict_diff_left_only_with_none.yaml") + + def test_07_dict_diff_skeleton_only(self): + self.read_and_compare("test_07_dict_diff_skeleton_only.yaml") + + def test_08_list_diff_with_key_name(self): + self.read_and_compare("test_08_list_diff_with_key_name.yaml") + + def test_09_list_diff_with_multi_keys(self): + self.read_and_compare("test_09_list_diff_with_multi_keys.yaml") + + def test_10_list_diff_with_key_other(self): + self.read_and_compare("test_10_list_diff_with_key_other.yaml") + + def test_11_list_diff_with_similar_list(self): + self.read_and_compare("test_11_list_diff_with_similar_list.yaml") + + def test_12_list_diff_with_left_only(self): + self.read_and_compare("test_12_list_diff_with_left_only.yaml") + + def test_13_list_diff_with_left_only_with_none(self): + self.read_and_compare("test_13_list_diff_with_left_only_with_none.yaml") + + def test_14_list_diff_skeleton_only(self): + self.read_and_compare("test_14_list_diff_skeleton_only.yaml") + + def test_15_list_of_list_diff(self): + self.read_and_compare("test_15_list_of_list_diff.yaml") + + def test_16_complex_list_with_dict_diff(self): + self.read_and_compare("test_16_complex_list_with_dict_diff.yaml") diff --git a/ansible_collections/dellemc/openmanage/.github/CODEOWNERS b/ansible_collections/dellemc/openmanage/.github/CODEOWNERS new file mode 100644 index 00000000..c19af9b2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.github/CODEOWNERS @@ -0,0 +1,28 @@ +# CODEOWNERS +# +# documentation for this file can be found at: +# https://help.github.com/en/articles/about-code-owners + +# These are the default owners for the code and will +# be requested for review when someone opens a pull request. +# order is alphabetical for easier maintenance. +# +# Anirudh Kumar (Anirudh_Kumar1@Dell.com) +# Anooja Vardhineni (Anooja_Vardhineni@Dellteam.com) +# Chris Clonch (cacack) +# Deepak Joshi (deepakjoshishri) +# Felix Stephen (felixs88) +# Grant Curell (grantcurell) +# Husniya Hameed (husniya-hameed) +# Jagadeesh N V (jagadeeshnv) +# Jaya Gupta (Jaya_Gupta@Dell.com) +# Mario Lenz (mariolenz) +# Rajeev Arakkal (rajeevarakkal) +# Rajshekar P (rajshekarp87) +# Sachin Apagundi (sachin-apa) +# Sajna N Shetty (Sajna-Shetty) +# Sachin Kumar (Sachin_Kumar12@Dell.com) +# Vasanth Ds (Vasanth_Sathyanaraya@Dell.com) + +# for all files: +* @sachin-apa @jagadeeshnv @felixs88 diff --git a/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/ask_a_question.md b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/ask_a_question.md new file mode 100644 index 00000000..e35c949c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/ask_a_question.md @@ -0,0 +1,11 @@ +--- +name: 💬 Ask a question +about: Ask usage questions here +title: "[QUESTION]:" +labels: type/question +assignees: '' + +--- +### How can the team help? + +**Details: ?** diff --git a/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..5b156977 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,124 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve. +title: "[BUG]: " +labels: ["type/bug", "needs-triage"] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to create this bug report. To better help us investigate this bug, provide the following information. + - type: textarea + id: bug-description + attributes: + label: Bug Description + description: Provide a clear and concise description about the bug. + validations: + required: true + - type: input + id: component + attributes: + label: Component or Module Name + description: Enter the Component or Module Name. + placeholder: idrac_server_config_profile + validations: + required: true + - type: input + id: ansible + attributes: + label: Ansible Version + description: Enter the Ansible version. + placeholder: | + Ansible 2.12 + validations: + required: true + - type: input + id: python + attributes: + label: Python Version + description: Enter the Python version. + placeholder: | + Python 3.9 + validations: + required: true + - type: textarea + id: device + attributes: + label: iDRAC/OME/OME-M version + description: Enter the iDRAC/OME/OME-M version in which the bug is observed. + placeholder: | + iDRAC 5.10.00.00 + OME 3.8.4 + OME-M 1.40 + validations: + required: true + - type: textarea + id: os + attributes: + label: Operating System + description: Enter the operating system and version in which the bug is observed. + placeholder: | + Red Hat Enterprise Linux (RHEL) 8.5 and 8.4 + SUSE Linux Enterprise Server (SLES) 15 SP2 and 15 SP1 + Ubuntu 20.04.2 and 20.04.1 + validations: + required: true + - type: textarea + id: playbook + attributes: + label: Playbook Used + description: Enter the playbook used to run the module or component. + validations: + required: true + - type: textarea + id: files + attributes: + label: Logs + description: Copy any relevant log output here or drop the log files in the text area. + Please be sure to remove all sensitive data such as IP addresses, host names, credentials, etc. + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to Reproduce + description: Provide the steps to reproduce this bug. + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: Enter the clear and concise description about the expected behaviour. + validations: + required: true + - type: textarea + id: actual + attributes: + label: Actual Behavior + description: Enter the clear and concise description about the actual behaviour. + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots + description: Drop any relevant screenshots in the text area. + validations: + required: false + - type: input + id: ainfo + attributes: + label: Additional Information + description: Provide any additional information about the bug. + validations: + required: false + - type: markdown + attributes: + value: | + Community Note + * Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) + to the original issue to help the community and maintainers prioritize this request + * Please do not leave "+1" or other comments that do not add relevant new information or questions, + they generate extra noise for issue followers and do not help prioritize the request + * If you are interested in working on this issue or have submitted a pull request, please leave a comment diff --git a/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..45c176da --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: OpenManageAnsible mailing list + alias: OpenManageAnsible@Dell.com + about: Please use OpenManageAnsible contact for questions and queries. diff --git a/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/feature_request.md b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..55a53396 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,23 @@ +--- +name: 🚀 Feature Request +about: Suggest an idea for this project +title: "[FEATURE]:" +labels: type/feature-request, needs-triage +assignees: '' +--- +**Describe the solution you'd like** +Provide a clear and concise description about the new feature request. + +**Describe alternatives you've considered** +Provide a clear and concise description of any alternative solutions or features that you have considered. + +**Additional context** +Describe any relevant context or add any relevant screenshots about the feature request. + +**Community Note** + +* Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) + to the original issue to help the community and maintainers prioritize this request +* Please do not leave "+1" or other comments that do not add relevant new information or questions, + they generate extra noise for issue followers and do not help prioritize the request +* If you are interested in working on this issue or have submitted a pull request, please leave a comment diff --git a/ansible_collections/dellemc/openmanage/.github/PULL_REQUEST_TEMPLATE.md b/ansible_collections/dellemc/openmanage/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..7027bc44 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,43 @@ +# Description +A few sentences describing the overall goals of the pull request's commits. + +# GitHub Issues +List the GitHub issues impacted by this PR: + +| GitHub Issue # | +| -------------- | +| | + +# ISSUE TYPE + +- Bugfix Pull Request +- Docs Pull Request +- Feature Pull Request +- Test Pull Request + +##### COMPONENT NAME + + +##### OUTPUT + +```paste below + +``` +##### ADDITIONAL INFORMATION + + + + +```paste below + +``` +# Checklist: + +- [ ] I have performed a self-review of my own code to ensure there are no formatting, vetting, linting, or security issues +- [ ] I have verified that new and existing unit tests pass locally with my changes +- [ ] I have not allowed coverage numbers to degenerate +- [ ] I have maintained at least 90% code coverage +- [ ] I have commented my code, particularly in hard-to-understand areas +- [ ] I have made corresponding changes to the documentation +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] I have maintained backward compatibility \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml new file mode 100644 index 00000000..64a1aed8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml @@ -0,0 +1,130 @@ +name: CI + +on: + # Run CI against all pushes (direct commits, also merged PRs), Pull Requests + push: + pull_request: + # Runs CI on every day (at 06:00 UTC) + schedule: + - cron: '0 6 * * *' + +jobs: + build: + name: Build collection + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + ansible-version: [stable-2.10, stable-2.11, stable-2.12, stable-2.13] + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python 3.9 + uses: actions/setup-python@v1 + with: + python-version: 3.9 + + - name: Install ansible (${{ matrix.ansible-version }}) + run: pip install pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check + + - name: Build a collection tarball + run: ansible-galaxy collection build --output-path "${GITHUB_WORKSPACE}/.cache/collection-tarballs" + + - name: Store migrated collection artifacts + uses: actions/upload-artifact@v1 + with: + name: collection + path: .cache/collection-tarballs + +### +# Unit tests (OPTIONAL) +# +# https://docs.ansible.com/ansible/latest/dev_guide/testing_units.html + + unit: + name: Unit Tests + needs: [build] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [3.8, 3.9, '3.10'] + ansible-version: [stable-2.11, stable-2.12, stable-2.13] + exclude: + - ansible-version: stable-2.11 + python-version: '3.10' + + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install ansible (${{ matrix.ansible-version }}) version + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check + + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: collection + path: .cache/collection-tarballs + + - name: Setup Unit test Pre-requisites + run: | + ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz + git clone https://github.com/ansible/ansible.git + cp -rf ansible/test/units/compat /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/unit/ + cp -rf ansible/test/units/modules/utils.py /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/ + sed -i 's/units/ansible_collections.dellemc.openmanage.tests.unit/' /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/utils.py + if [ -f /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/requirements.txt ]; then pip install -r /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/requirements.txt; fi + + + - name: Run Unit tests using ansible-test + run: ansible-test units -v --color --python ${{ matrix.python-version }} --coverage + working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage + + #- name: Generate coverage report + # run: ansible-test coverage xml -v --group-by command --group-by version + # working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage + +### +# Sanity tests (REQUIRED) +# +# https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html + + sanity: + name: Sanity Tests + runs-on: ubuntu-latest + needs: [build] + strategy: + fail-fast: false + matrix: + ansible-version: [stable-2.11, stable-2.12, stable-2.13, devel] + + steps: + - name: Set up Python 3.9 + uses: actions/setup-python@v1 + with: + # it is just required to run that once as "ansible-test sanity" in the docker image + # will run on all python versions it supports. + python-version: 3.9 + + - name: Install ansible (${{ matrix.ansible-version }}) version + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check + + - name: Download migrated collection artifacts + uses: actions/download-artifact@v1 + with: + name: collection + path: .cache/collection-tarballs + + - name: Setup Sanity test Pre-requisites + run: ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz + + # run ansible-test sanity inside of Docker. + # The docker container has all the pinned dependencies that are required + # and all python versions ansible supports. + - name: Run sanity tests + run: ansible-test sanity --docker -v --color + working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage diff --git a/ansible_collections/dellemc/openmanage/.gitignore b/ansible_collections/dellemc/openmanage/.gitignore new file mode 100644 index 00000000..545a0000 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/.gitignore @@ -0,0 +1,9 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[co] +*.py[cod] +*$py.class +*.retry +.idea/ +*.bak +rstdocgen.sh diff --git a/ansible_collections/dellemc/openmanage/CHANGELOG.rst b/ansible_collections/dellemc/openmanage/CHANGELOG.rst new file mode 100644 index 00000000..9e9af81f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/CHANGELOG.rst @@ -0,0 +1,857 @@ +================================================= +Dell EMC OpenManage Ansible Modules Release Notes +================================================= + +.. contents:: Topics + + +v6.3.0 +====== + +Release Summary +--------------- + +Support for LockVirtualDisk operation and to configure Remote File Share settings using idrac_virtual_media module. + +Major Changes +------------- + +- idrac_redfish_storage_controller - This module is enhanced to support LockVirtualDisk operation. +- idrac_virtual_media - This module allows to configure Remote File Share settings. + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.idrac_virtual_media - Configure the virtual media settings. + +v6.2.0 +====== + +Release Summary +--------------- + +Added clear pending BIOS attributes, reset BIOS to default settings, and configure BIOS attribute using Redfish enhancements for idrac_bios. + +Major Changes +------------- + +- idrac_bios - The module is enhanced to support clear pending BIOS attributes, reset BIOS to default settings, and configure BIOS attribute using Redfish. + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +v6.1.0 +====== + +Release Summary +--------------- + +Support for device-specific operations on OpenManage Enterprise and configuring boot settings on iDRAC. + +Major Changes +------------- + +- idrac_boot - Support for configuring the boot settings on iDRAC. +- ome_device_group - The module is enhanced to support the removal of devices from a static device group. +- ome_devices - Support for performing device-specific operations on OpenManage Enterprise. + +Minor Changes +------------- + +- ome_configuration_compliance_info - The module is enhanced to report single device compliance information. + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.idrac_boot - Configure the boot order settings. +- dellemc.openmanage.ome_devices - Perform device-specific operations on target devices + +v6.0.0 +====== + +Release Summary +--------------- + +Added collection metadata for creating execution environments, deprecation of share parameters, and support for configuring iDRAC attributes using idrac_attributes module. + +Major Changes +------------- + +- Added collection metadata for creating execution environments. +- Refactored the Markdown (MD) files and content for better readability. +- The share parameters are deprecated from the following modules - idrac_network, idrac_timezone_ntp, dellemc_configure_idrac_eventing, dellemc_configure_idrac_services, dellemc_idrac_lc_attributes, dellemc_system_lockdown_mode. + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.idrac_attributes - Configure the iDRAC attributes + +v5.5.0 +====== + +Release Summary +--------------- + +Support to generate certificate signing request, import, and export certificates on iDRAC. + +Minor Changes +------------- + +- idrac_redfish_storage_controller - This module is updated to use the Job Service URL instead of Task Service URL for job tracking. +- idrac_server_config_profile - This module is updated to use the Job Service URL instead of Task Service URL for job tracking. +- redfish_firmware - This module is updated to use the Job Service URL instead of Task Service URL for job tracking. + +Bugfixes +-------- + +- idrac_server_config_profile - Issue(234817) – When an XML format is exported using the SCP, the module breaks while waiting for the job completion. +- ome_application_console_preferences - Issue(224690) - The module does not display a proper error message when an unsupported value is provided for the parameters report_row_limit, email_sender_settings, and metric_collection_settings, and the value is applied on OpenManage Enterprise + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.idrac_certificates - Configure certificates for iDRAC. + +v5.4.0 +====== + +Release Summary +--------------- + +Support for export, import, and preview the Server Configuration Profile (SCP) configuration using Redfish and added support for check mode. + +Major Changes +------------- + +- idrac_server_config_profile - The module is enhanced to support export, import, and preview the SCP configuration using Redfish and added support for check mode. + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_console_preferences - Issue(224690) - The module does not display a proper error message when an unsupported value is provided for the parameters report_row_limit, email_sender_settings, and metric_collection_settings, and the value is applied on OpenManage Enterprise. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +v5.3.0 +====== + +Release Summary +--------------- + +Added check mode and idempotency support for redfish_storage_volume and idempotency support for ome_smart_fabric_uplink. For ome_diagnostics, added support for debug logs and added supportassist_collection as a choice for the log_type argument to export SupportAssist logs. + +Minor Changes +------------- + +- ome_diagnostics - Added "supportassist_collection" as a choice for the log_type argument to export SupportAssist logs. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/309) +- ome_diagnostics - The module is enhanced to support debug logs. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/308) +- ome_smart_fabric_uplink - The module is enhanced to support idempotency. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/253) +- redfish_storage_volume - The module is enhanced to support check mode and idempotency. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/245) + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_console_preferences - Issue(224690) - The module does not display a proper error message when an unsupported value is provided for the parameters report_row_limit, email_sender_settings, and metric_collection_settings, and the value is applied on OpenManage Enterprise. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +v5.2.0 +====== + +Release Summary +--------------- + +Support to configure console preferences on OpenManage Enterprise. + +Minor Changes +------------- + +- idrac_redfish_storage_controller - This module is enhanced to support the following settings with check mode and idempotency - UnassignSpare, EnableControllerEncryption, BlinkTarget, UnBlinkTarget, ConvertToRAID, ConvertToNonRAID, ChangePDStateToOnline, ChangePDStateToOffline. +- ome_diagnostics - The module is enhanced to support check mode and idempotency. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/345) +- ome_diagnostics - This module is enhanced to extract log from lead chassis. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/310) +- ome_profile - The module is enhanced to support check mode and idempotency. +- ome_profile - The module is enhanced to support modifying a profile based on the attribute names instead of the ID. +- ome_template - The module is enhanced to support check mode and idempotency. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/255) +- ome_template - The module is enhanced to support modifying a template based on the attribute names instead of the ID. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/358) + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_console_preferences - Issue(224690) - The module does not display a proper error message when an unsupported value is provided for the parameters report_row_limit, email_sender_settings, and metric_collection_settings, and the value is applied on OpenManage Enterprise. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_application_console_preferences - Configures console preferences on OpenManage Enterprise. + +v5.1.0 +====== + +Release Summary +--------------- + +Support for OpenManage Enterprise Modular server interface management. + +Minor Changes +------------- + +- ome_application_network_address - The module is enhanced to support check mode and idempotency. +- ome_device_info - The module is enhanced to return a blank list when devices or baselines are not present in the system. +- ome_firmware_baseline_compliance_info - The module is enhanced to return a blank list when devices or baselines are not present in the system. +- ome_firmware_baseline_info - The module is enhanced to return a blank list when devices or baselines are not present in the system. +- ome_identity_pool - The iSCSI Initiator and Initiator IP Pool attributes are not mandatory to create an identity pool. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/329) +- ome_identity_pool - The module is enhanced to support check mode and idempotency. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/328) +- ome_template_identity_pool - The module is enhanced to support check mode and idempotency. +- redfish_event_subscription - The module is enhanced to support check mode and idempotency. + +Bugfixes +-------- + +- idrac_firmware - Issue (220130) The socket.timout issue that occurs during the wait_for_job_completion() job is fixed. + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module errors out with the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_server_interface_profile_info - Retrieves the information of server interface profile on OpenManage Enterprise Modular. +- dellemc.openmanage.ome_server_interface_profiles - Configures server interface profiles on OpenManage Enterprise Modular. + +v5.0.1 +====== + +Release Summary +--------------- + +Support to provide custom or organizational CA signed certificate for SSL validation from the environment variable. + +Major Changes +------------- + +- All modules can read custom or organizational CA signed certificate from the environment variables. Please refer to `SSL Certificate Validation `_ section in the `README.md `_ for modification to existing playbooks or setting environment variable. + +Bugfixes +-------- + +- All playbooks require modification because the validate_certs argument is set to True by default (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/357) +- The ome_application_network_time and ome_application_network_proxy modules are breaking due to the changes introduced for SSL validation.(https://github.com/dell/dellemc-openmanage-ansible-modules/issues/360) + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module errors out with the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +v5.0.0 +====== + +Release Summary +--------------- + +HTTPS SSL support for all modules and quick deploy settings. + +Major Changes +------------- + +- All modules now support SSL over HTTPS and socket level timeout. + +Breaking Changes / Porting Guide +-------------------------------- + +- HTTPS SSL certificate validation is a **breaking change** and will require modification in the existing playbooks. Please refer to `SSL Certificate Validation `_ section in the `README.md `_ for modification to existing playbooks. + +Bugfixes +-------- + +- idrac_bios - The issue while configuring boot sources is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/347) + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module errors out with the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_device_quick_deploy - Configure Quick Deploy settings on OpenManage Enterprise Modular + +v4.4.0 +====== + +Release Summary +--------------- + +Support to configure login security, session inactivity timeout, and local access settings. + +Minor Changes +------------- + +- ome_firmware - The module is enhanced to support check mode and idempotency (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/274) +- ome_template - An example task is added to create a compliance template from reference device (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/339) + +Bugfixes +-------- + +- ome_device_location - The issue that applies values of the location settings only in lowercase is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/341) + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_local_access_configuration - Issue(215035) - The module reports ``Successfully updated the local access setting`` if an unsupported value is provided for the parameter timeout_limit. However, this value is not actually applied on OpenManage Enterprise Modular. +- ome_device_local_access_configuration - Issue(217865) - The module does not display a proper error message if an unsupported value is provided for the user_defined and lcd_language parameters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module errors out with the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_application_network_settings - This module allows you to configure the session inactivity timeout settings +- dellemc.openmanage.ome_application_security_settings - Configure the login security properties +- dellemc.openmanage.ome_device_local_access_configuration - Configure local access settings on OpenManage Enterprise Modular + +v4.3.0 +====== + +Release Summary +--------------- + +Support to configure network services, syslog forwarding, and SMTP settings. + +Known Issues +------------ + +- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_application_alerts_smtp - Issue(212310) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters. +- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. +- ome_device_power_settings - Issue(212679) - The module errors out with the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_application_alerts_smtp - This module allows to configure SMTP or email configurations +- dellemc.openmanage.ome_application_alerts_syslog - Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular +- dellemc.openmanage.ome_device_network_services - Configure chassis network services settings on OpenManage Enterprise Modular + +v4.2.0 +====== + +Release Summary +--------------- + +Support to configure OME Modular devices network, power, and location settings. + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_device_power_settings - Issue(212679) The ome_device_power_settings module errors out with the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.`` +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_device_location - Configure device location settings on OpenManage Enterprise Modular +- dellemc.openmanage.ome_device_mgmt_network - Configure network settings of devices on OpenManage Enterprise Modular +- dellemc.openmanage.ome_device_power_settings - Configure chassis power settings on OpenManage Enterprise Modular + +v4.1.0 +====== + +Release Summary +--------------- + +Support for Redfish event subscriptions and enhancements to ome_firmware module. + +Major Changes +------------- + +- ome_firmware - Added option to stage the firmware update and support for selecting components and devices for baseline-based firmware update. + +Minor Changes +------------- + +- ome_template_network_vlan - Enabled check_mode support. + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.redfish_event_subscription - Manage Redfish Subscriptions + +v4.0.0 +====== + +Release Summary +--------------- + +Support for configuring active directory user group on OpenManage Enterprise and OpenManage Enterprise Modular. + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_active_directory - Configure Active Directory groups to be used with Directory Services on OpenManage Enterprise and OpenManage Enterprise Modular +- dellemc.openmanage.ome_domain_user_groups - Create, modify, or delete an Active Directory user group on OpenManage Enterprise and OpenManage Enterprise Modular + +v3.6.0 +====== + +Release Summary +--------------- + +Support for configuring device slot name and export SupportAssist device collections from OpenManage Enterprise and OpenManage Enterprise Modular. + +Bugfixes +-------- + +- dellemc_idrac_storage_volume - Module fails if the BlockSize, FreeSize, or Size state of the physical disk is set to "Not Available". + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_chassis_slots - Rename sled slots on OpenManage Enterprise Modular +- dellemc.openmanage.ome_diagnostics - Export technical support logs(TSR) to network share location + +v3.5.0 +====== + +Release Summary +--------------- + +Support for managing static device groups on OpenManage Enterprise. + +Major Changes +------------- + +- idrac_server_config_profile - Added support for exporting and importing Server Configuration Profile through HTTP/HTTPS share. +- ome_device_group - Added support for adding devices to a group using the IP addresses of the devices and group ID. + +Bugfixes +-------- + +- Handled invalid share and unused imports cleanup for iDRAC modules (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/268) + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_groups - Manages static device groups on OpenManage Enterprise + +v3.4.0 +====== + +Release Summary +--------------- + +OpenManage Enterprise firmware baseline and firmware catalog modules updated to support checkmode. + +Major Changes +------------- + +- ome_firmware_baseline - Module supports check mode, and allows the modification and deletion of firmware baselines. +- ome_firmware_catalog - Module supports check mode, and allows the modification and deletion of firmware catalogs. + +Minor Changes +------------- + +- ome_firmware_catalog - Added support for repositories available on the Dell support site. +- ome_template_network_vlan - Added the input option which allows to apply the modified VLAN settings immediately on the associated modular-system servers. + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +v3.3.0 +====== + +Release Summary +--------------- + +OpenManage Enterprise device group and device discovery support added + +Minor Changes +------------- + +- ome_firmware_baseline - Allows to retrieve the device even if it not in the first 50 device IDs + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_configuration_compliance_info - Issue(195592) Module may error out with the message ``unable to process the request because an error occurred``. If the issue persists, report it to the system administrator. +- ome_smart_fabric - Issue(185322) Only three design types are supported by OpenManage Enterprise Modular but the module successfully creates a fabric when the design type is not supported. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_device_group - Add devices to a static device group on OpenManage Enterprise +- dellemc.openmanage.ome_discovery - Create, modify, or delete a discovery job on OpenManage Enterprise + +v3.2.0 +====== + +Release Summary +--------------- + +Configuration compliance related modules added + +Minor Changes +------------- + +- ome_template - Allows to deploy a template on device groups. + +Known Issues +------------ + +- idrac_user - Issue(192043) Module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again. +- ome_configuration_compliance_info - Issue(195592) Module may error out with the message ``unable to process the request because an error occurred``. If the issue persists, report it to the system administrator. +- ome_smart_fabric - Issue(185322) Only three design types are supported by OpenManage Enterprise Modular but the module successfully creates a fabric when the design type is not supported. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_configuration_compliance_baseline - Create, modify, and delete a configuration compliance baseline and remediate non-compliant devices on OpenManage Enterprise +- dellemc.openmanage.ome_configuration_compliance_info - Device compliance report for devices managed in OpenManage Enterprise + +v3.1.0 +====== + +Release Summary +--------------- + +OpenManage Enterprise profiles management support added. + +Bugfixes +-------- + +- ome_firmware_baseline_compliance_info - OMEnt firmware baseline compliance info pagination support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/171) +- ome_network_proxy - OMEnt network proxy check mode support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/187) + +Known Issues +------------ + +- ome_smart_fabric - Issue(185322) Only three design types are supported by OpenManage Enterprise Modular but the module successfully creates a fabric when the design type is not supported. +- ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_profile - Create, modify, delete, assign, unassign and migrate a profile on OpenManage Enterprise + +v3.0.0 +====== + +Release Summary +--------------- + +Deprecations, issue fixes, and standardization of modules as per ansible guidelines. + +Major Changes +------------- + +- Removed the existing deprecated modules. + +Minor Changes +------------- + +- Coding Guidelines, Contributor Agreement, and Code of Conduct files are added to the collection. +- New deprecation changes for ``dellemc_get_system_inventory`` and ``dellemc_get_firmware_inventory`` ignored for ansible 2.9 sanity test. +- The modules are standardized as per ansible guidelines. + +Deprecated Features +------------------- + +- The ``dellemc_get_firmware_inventory`` module is deprecated and replaced with ``idrac_firmware_info``. +- The ``dellemc_get_system_inventory`` module is deprecated and replaced with ``idrac_system_info``. + +Bugfixes +-------- + +- GitHub issue fix - Module dellemc_idrac_storage_volume.py broken. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/212) +- GitHub issue fix - ome_smart_fabric Fabric management is not supported on the specified system. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/179) +- Known issue fix #187956: If an invalid job_id is provided, the idrac_lifecycle_controller_job_status_info module returns an error message with the description of the issue. +- Known issue fix #188267: No error message is displayed when the target iDRAC with firmware version less than 3.30.30.30 is updated. +- Sanity fixes as per ansible guidelines to all modules. + +Known Issues +------------ + +- Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +v2.1.5 +====== + +Release Summary +--------------- + +The idrac_firmware module is enhanced to include checkmode support and job tracking. + +Minor Changes +------------- + +- The idrac_server_config_profile module supports IPv6 address format. + +Bugfixes +-------- + +- Identity pool does not reset when a network VLAN is added to a template in the ome_template_network_vlan module. `#169 `_ +- Missing parameter added in ome_smart_fabric_uplink module documenation. `#181 `_ + +Known Issues +------------ + +- Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. +- Issue 2(187956): If an invalid job_id is provided, idrac_lifecycle_controller_job_status_info returns an error message. This error message does not contain information about the exact issue with the invalid job_id. +- Issue 3(188267): While updating the iDRAC firmware, the idrac_firmware module completes execution before the firmware update job is completed. An incorrect message is displayed in the task output as 'DRAC WSMAN endpoint returned HTTP code '400' Reason 'Bad Request''. This issue may occur if the target iDRAC firmware version is less than 3.30.30.30 + +v2.1.4 +====== + +Release Summary +--------------- + +Fabric management related modules ome_smart_fabric and ome_smart_fabric_uplink are added. + +Known Issues +------------ + +- Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation of multiple uplinks of the same name even though this is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified. + +New Modules +----------- + +- dellemc.openmanage.ome_smart_fabric - Create, modify or delete a fabric on OpenManage Enterprise Modular +- dellemc.openmanage.ome_smart_fabric_uplink - Create, modify or delete a uplink for a fabric on OpenManage Enterprise Modular + +v2.1.3 +====== + +Release Summary +--------------- + +Network configuration service related modules ome_network_vlan, ome_network_port_breakout and ome_network_vlan_info are added. + +New Modules +----------- + +- dellemc.openmanage.ome_network_port_breakout - This module allows to automate the port portioning or port breakout to logical sub ports +- dellemc.openmanage.ome_network_vlan - Create, modify & delete a VLAN +- dellemc.openmanage.ome_network_vlan_info - Retrieves the information about networks VLAN(s) present in OpenManage Enterprise + +v2.1.2 +====== + +Release Summary +--------------- + +The dellemc_change_power_state and dellemc_configure_idrac_users modules are standardized as per ansible guidelines. 8 GitHub issues are fixed. + +Minor Changes +------------- + +- The idrac_server_config_profile module supports a user provided file name for an export operation. + +Deprecated Features +------------------- + +- The dellemc_change_power_state module is deprecated and replaced with the redfish_powerstate module. +- The dellemc_configure_idrac_users module is deprecated and replaced with the idrac_user module. + +Bugfixes +-------- + +- Documentation improvement request `#140 `_ +- Executing dellemc_configure_idrac_users twice fails the second attempt `#100 `_ +- dellemc_change_power_state fails if host is already on `#132 `_ +- dellemc_change_power_state not idempotent `#115 `_ +- dellemc_configure_idrac_users error `#26 `_ +- dellemc_configure_idrac_users is unreliable - errors `#113 `_ +- idrac_server_config_profile improvement requested (request) `#137 `_ +- ome_firmware_catalog.yml example errors `#145 `_ + +New Modules +----------- + +- dellemc.openmanage.idrac_user - Configure settings for user accounts +- dellemc.openmanage.redfish_powerstate - Manage device power state + +v2.1.1 +====== + +Release Summary +--------------- + +Support for OpenManage Enterprise Modular and other enhancements. + +Major Changes +------------- + +- Standardization of ten iDRAC ansible modules based on ansible guidelines. +- Support for OpenManage Enterprise Modular. + +Deprecated Features +------------------- + +- The dellemc_configure_bios module is deprecated and replaced with the idrac_bios module. +- The dellemc_configure_idrac_network module is deprecated and replaced with the idrac_network module. +- The dellemc_configure_idrac_timezone module is deprecated and replaced with the idrac_timezone_ntp module. +- The dellemc_delete_lc_job and dellemc_delete_lc_job_queue modules are deprecated and replaced with the idrac_lifecycle_controller_jobs module. +- The dellemc_export_lc_logs module is deprecated and replaced with the idrac_lifecycle_controller_logs module. +- The dellemc_get_lc_job_status module is deprecated and replaced with the idrac_lifecycle_controller_job_status_info module. +- The dellemc_get_lcstatus module is deprecated and replaced with the idrac_lifecycle_controller_status_info module. +- The dellemc_idrac_reset module is deprecated and replaced with the idrac_reset module. +- The dellemc_setup_idrac_syslog module is deprecated and replaced with the idrac_syslog module. + +New Modules +----------- + +- dellemc.openmanage.idrac_bios - Configure the BIOS attributes +- dellemc.openmanage.idrac_lifecycle_controller_job_status_info - Get the status of a Lifecycle Controller job +- dellemc.openmanage.idrac_lifecycle_controller_jobs - Delete the Lifecycle Controller Jobs +- dellemc.openmanage.idrac_lifecycle_controller_logs - Export Lifecycle Controller logs to a network share or local path. +- dellemc.openmanage.idrac_lifecycle_controller_status_info - Get the status of the Lifecycle Controller +- dellemc.openmanage.idrac_network - Configures the iDRAC network attributes +- dellemc.openmanage.idrac_reset - Reset iDRAC +- dellemc.openmanage.idrac_syslog - Enable or disable the syslog on iDRAC +- dellemc.openmanage.idrac_timezone_ntp - Configures time zone and NTP on iDRAC + +v2.1.0 +====== + +Release Summary +--------------- + +The `Dell EMC OpenManage Ansible Modules `_ are available on Ansible Galaxy as a collection. diff --git a/ansible_collections/dellemc/openmanage/FILES.json b/ansible_collections/dellemc/openmanage/FILES.json new file mode 100644 index 00000000..09db209d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/FILES.json @@ -0,0 +1,3113 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/CODEOWNERS", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22c50cccda3a7ff5dcb7285a2bfebca1428e8b6b355f3f376072bffbb12f6c5b", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/ask_a_question.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e172d0ed215d9561ccf644c10a0f7fb5ea722a427ff13741f8634053d62a9338", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56f4cdaffe4a97636427b91eb59acb67bc92f869451d137308428771d48e6aad", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "155b8ff8561f87047a5e2cebac1737edfbba156c21cc733fe636d84ca222d627", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e15deb214aa7cb6847890eb95491e027c021b7f1ec01c7fbda8dca021d1162a0", + "format": 1 + }, + { + "name": ".github/PULL_REQUEST_TEMPLATE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "186d97efed69e2211f154847a8ac52f263fa54319620bf1dcea00b59f494a969", + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/ansible-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecbc3e86c750323e17fbb6b19e901e96126a8473bde151d7c37ce99317c3ebcc", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7425e318ae94763315621de05efc6ebf3386a7ba4df51a8421399931e4c9d559", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33d93cd495acc97b2e672a688db5b7137217a3940c8294d1549e3544198f1931", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39f289e03218c0a4781c684393bf036c8581169ac7e5bc8b778fc8258959e350", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a1f1aeaa0094c7ae47c9210c9fd5ccca2f3cbc78e3da83bd6164ec847ed9757", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f81e61db5decb63a54e21faa199199646405a20d8f40ec0e74aa2c8493ba22d1", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8af435bbe577b80a5fda86567dcac849e7762f04a7251e029833b3f073272d84", + "format": 1 + }, + { + "name": "docs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/ADDITIONAL_INFORMATION.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83c4a24d8d137a954c891ffcc1c75a8157d7957204a59797c5fd4a9dfd3326da", + "format": 1 + }, + { + "name": "docs/BRANCHING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "347e9f1d9850cbb807f3ac5680a4db0145a90f6714ee23c2d9ea3618806c8609", + "format": 1 + }, + { + "name": "docs/CODE_OF_CONDUCT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b115034e51a649fda55572de32880b12224311bc35517d0534de26223683230a", + "format": 1 + }, + { + "name": "docs/COMMITTER_GUIDE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9faa99978e41d71c8f84fb1e764ab495075c57e81c8a26edcf0bd41bdedda326", + "format": 1 + }, + { + "name": "docs/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffe89c3652d21757f689570ac1a14eaa4dc6e35bc99793f48e6eef21d2b84d54", + "format": 1 + }, + { + "name": "docs/DEBUG.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b2ce4f68fc99226b34a0ebced6fd1919f514f0a33c877b377566b146972d4b6", + "format": 1 + }, + { + "name": "docs/DOCUMENTATION.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3fd57baac6fe401783ff5171a9eeb304514bc4b78c7b1182adeb3d0eb3be9ea", + "format": 1 + }, + { + "name": "docs/EXECUTION_ENVIRONMENT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e3129e2e646856ae56581aee0eae12edd28f4cef976e5095449905f34d5f989", + "format": 1 + }, + { + "name": "docs/ISSUE_TRIAGE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25dd3183365794c152fe138272727efcded3456422c5dc16c18f2b31a8f2ab16", + "format": 1 + }, + { + "name": "docs/MAINTAINERS.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "480717a42cbfbb59ee6afb30d1df16f4e6f586b1375a262ba2c87032811fb929", + "format": 1 + }, + { + "name": "docs/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e55574f9fc130356178fd08b9c0a0bd0c678542b3f4d51fe27f0df6025174e8", + "format": 1 + }, + { + "name": "docs/SECURITY.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b4bcf55efb2913e91247bcbc9e2944cd6bdca0c80a2f6314dbd62802b167f3c0", + "format": 1 + }, + { + "name": "docs/SUPPORT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd2fd536176f16d1722ee4fb035b8c8a7b92d77395a8fd6b74f56a9dc5cb3d86", + "format": 1 + }, + { + "name": "docs/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/modules/dellemc_configure_idrac_eventing.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7dc08f312c63d95f824a9326c084cf5839f89dd87a29aea765009ea08913a79", + "format": 1 + }, + { + "name": "docs/modules/dellemc_configure_idrac_services.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0c959f50de0a8fa989f419fae45c05f48083e96a7a6f22f5d1411ba2a9b4240", + "format": 1 + }, + { + "name": "docs/modules/dellemc_get_firmware_inventory.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30e7d15f0b351cfb57213e57624f40c1e8600ee91de9ffb7263f66c14dd778c8", + "format": 1 + }, + { + "name": "docs/modules/dellemc_get_system_inventory.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "605a7522a3d1afca04f6dba379db42b1c32fe2abdc9dbcb5b3dcbe7a69407f41", + "format": 1 + }, + { + "name": "docs/modules/dellemc_idrac_lc_attributes.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c3ef5e5e65f160201344d5f1e9793097bd0a0697ad13f531b01c0d0da5c3e2a", + "format": 1 + }, + { + "name": "docs/modules/dellemc_idrac_storage_volume.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8e2d75172282d6c4d469d615751fa32230be32036cddd2795d61461124e755b", + "format": 1 + }, + { + "name": "docs/modules/dellemc_system_lockdown_mode.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3beff0c42a0ce26a45a8ee4a84d078793a413a4dbae424be2a4f07fdb5ba5ca", + "format": 1 + }, + { + "name": "docs/modules/idrac_attributes.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf4f956744aba6400dbf4befc937d2ff427deb864106dfb926eecba5952eecd1", + "format": 1 + }, + { + "name": "docs/modules/idrac_bios.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39acaa62f5d57ed094fcb0806ef32a223c90ed18ad2d24ad346b4c6faca07de0", + "format": 1 + }, + { + "name": "docs/modules/idrac_boot.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f92a3f16c33c26c60d13c6b7f04805a5a54fb62e768f0316a3ae94bae4856e61", + "format": 1 + }, + { + "name": "docs/modules/idrac_certificates.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2e7fa82293b4ad1657835f3b662b26fd6e88477d58b2f162475117ac84b3fc3", + "format": 1 + }, + { + "name": "docs/modules/idrac_firmware.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e529552080ae3d94468acbc1e4fef21eb7d2e90924824dd1b07efb96ad447986", + "format": 1 + }, + { + "name": "docs/modules/idrac_firmware_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1be081797ba06209876b64831d73289a0593b8fd623324b297c770e2f5e49b2a", + "format": 1 + }, + { + "name": "docs/modules/idrac_lifecycle_controller_job_status_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5235511d649d1dae82d844adcc9e23ad518c2ba036fca74d2961209ff8a6c1f0", + "format": 1 + }, + { + "name": "docs/modules/idrac_lifecycle_controller_jobs.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44ffce0f2130bd614527d649dc558299435f8dcdcc92b34786c8e642a06cfbeb", + "format": 1 + }, + { + "name": "docs/modules/idrac_lifecycle_controller_logs.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8179c7953a0bcf51cffc41820621a51e4e3c6396aa6c0cfd36d02edb5df0d3ab", + "format": 1 + }, + { + "name": "docs/modules/idrac_lifecycle_controller_status_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48f9d4b1674f6d3c51c48331ac174be17e54fc397541431d644d4733d6e0be03", + "format": 1 + }, + { + "name": "docs/modules/idrac_network.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9bef17251a619d1b5a5ae58a3f2c80ea00fa81b99bb27bbe84a1599f0131173", + "format": 1 + }, + { + "name": "docs/modules/idrac_os_deployment.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3147488b8b3f658defd0731694961fd8d39c4cd1dafa53916f23a3ff5407bd5c", + "format": 1 + }, + { + "name": "docs/modules/idrac_redfish_storage_controller.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c540294d86abb08227e787e97062281e8af81d4eadafdbabfef5bd39ce44d016", + "format": 1 + }, + { + "name": "docs/modules/idrac_reset.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "888b687468837582fbe609b1a0329dbe9c1acb4703020b4270ef022441f72047", + "format": 1 + }, + { + "name": "docs/modules/idrac_server_config_profile.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ba24f4e1ad7a537fa6f7ff6491c72c8a4726dfcc30e4a2bd3f84a60ca6e1a77", + "format": 1 + }, + { + "name": "docs/modules/idrac_syslog.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01ef3f80fa0325f3a645b3547ebb4bdb93049009c098d6b370751cc6a5c4818b", + "format": 1 + }, + { + "name": "docs/modules/idrac_system_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1a06be853fc394c2cab75d0a7ea0b51e5a4844bff757b46eb7e27cd133a043b", + "format": 1 + }, + { + "name": "docs/modules/idrac_timezone_ntp.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a81533bcc3786ccdce796e3220377693cef67afdd4bb825d3f5c272624fcf986", + "format": 1 + }, + { + "name": "docs/modules/idrac_user.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d488071ac6f8ee3fa1c062ec2e8bd0f0031dd48077a14aaa7562df5c7a716f0c", + "format": 1 + }, + { + "name": "docs/modules/idrac_virtual_media.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48cdb83e815c19a5cb03a3d9a58b7c299aa1a79df02a3fca3a0dbf74e100092d", + "format": 1 + }, + { + "name": "docs/modules/ome_active_directory.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "268326a562c7f86cc156fab384d249adc48ce528440f70fc6d34cd922409e374", + "format": 1 + }, + { + "name": "docs/modules/ome_application_alerts_smtp.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "432872f2f50db4c30275de9493002ef4f4cf8a100515933565a4b5b1e164b1f2", + "format": 1 + }, + { + "name": "docs/modules/ome_application_alerts_syslog.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e50fff16a925677caf682764f2c042c24bdf54df3739515a5bd8b93fcdf559d3", + "format": 1 + }, + { + "name": "docs/modules/ome_application_certificate.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "716271a0b867c3196027821f7263cf48bbf84c5e9721eda0a1c3f60adab115ab", + "format": 1 + }, + { + "name": "docs/modules/ome_application_console_preferences.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ffe9ecf5bf8ebe75da17bc6052e420d90ab78e7a24ee822783140cdfda7065a", + "format": 1 + }, + { + "name": "docs/modules/ome_application_network_address.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00ae99a219dcbcd8e828ac36f8613fb11fd838bbc81f65c9ca836434bb88018c", + "format": 1 + }, + { + "name": "docs/modules/ome_application_network_proxy.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de03d8f58756723fb93199312cdd6b2da4ebfda2447ed3cb34d009275e8ced66", + "format": 1 + }, + { + "name": "docs/modules/ome_application_network_settings.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9cd87b6e95d8692f3e351ae92641aa005966c18fde6922994060735ac37235e", + "format": 1 + }, + { + "name": "docs/modules/ome_application_network_time.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58b88646e11c35d898cda6e9223d86fdd0aad2b541e216c62cbbca64100a8ff5", + "format": 1 + }, + { + "name": "docs/modules/ome_application_network_webserver.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d59a6f47e46c6ffd8ba4fb5b7f78fc1747124e299641e4667372afb3657de8df", + "format": 1 + }, + { + "name": "docs/modules/ome_application_security_settings.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6954d37ac2c89de427b3fd076a7e3cc80f2c43a22a11cbaac346034931ed340", + "format": 1 + }, + { + "name": "docs/modules/ome_chassis_slots.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c14e2b5f0bc7ce3df14a190d857dac3c90eea09585d0d242181ea4dbfdac914b", + "format": 1 + }, + { + "name": "docs/modules/ome_configuration_compliance_baseline.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27b77e4b2c33b96aff213f762bd43f3867311d5ff959de456bf028900fbaa333", + "format": 1 + }, + { + "name": "docs/modules/ome_configuration_compliance_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "585af29399c6395d976a6039d931cb7c159df5334d59ef2b9a170db119a8f250", + "format": 1 + }, + { + "name": "docs/modules/ome_device_group.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c1fceaa89a11c61408b78a964b59b5350a9eb7cc188703c866cf0cb653ac64b", + "format": 1 + }, + { + "name": "docs/modules/ome_device_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aba861ece6349eacae599821f44e863abadd45b155c440f5c3a28b319afe164a", + "format": 1 + }, + { + "name": "docs/modules/ome_device_local_access_configuration.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "234109a943b1157c8583226757f1f2c592bba0a8e1b9ed22ea3a2a5b26abdfb7", + "format": 1 + }, + { + "name": "docs/modules/ome_device_location.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f3fd616d9cb1f3587a31efe2be8e604edb1043de5dd07e005af2f7b627c2039", + "format": 1 + }, + { + "name": "docs/modules/ome_device_mgmt_network.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7323a62e564f3990ca74c34179afefaebb264d313b1799888620ce236532455e", + "format": 1 + }, + { + "name": "docs/modules/ome_device_network_services.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6e7cdd552c4910c2a2dcc3b659d3b1176d9a4e404f76d409d97ad66c833b314", + "format": 1 + }, + { + "name": "docs/modules/ome_device_power_settings.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61f69b8173d714735cee021295d212cd3e4d82ce3e8f43668396dad6e719005d", + "format": 1 + }, + { + "name": "docs/modules/ome_device_quick_deploy.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acad4ef2ecbd4ca878f1f3c65779cba67cdba7ba5a323ce9645ccfd34a15aa8c", + "format": 1 + }, + { + "name": "docs/modules/ome_devices.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d951d5e26fd41565b879285a9492a9a625070152fa3885549d407df56ca870d3", + "format": 1 + }, + { + "name": "docs/modules/ome_diagnostics.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a209635ba378e6725e75f001d26ed19438172e04f6a76bf5ccdbf504051c4498", + "format": 1 + }, + { + "name": "docs/modules/ome_discovery.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a69136c164915d893ab8068517c0c048db4cb5fc06bfa602f2f2b2a9f8c05f82", + "format": 1 + }, + { + "name": "docs/modules/ome_domain_user_groups.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6bfadb6b820c870834a370f7ffb6e971e8758adc58c6eb7078a9141fb0b56f2f", + "format": 1 + }, + { + "name": "docs/modules/ome_firmware.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "332ec42546f08695245525769a4dc5e45c7096bda5d2b6d4eb99807b1038fcf9", + "format": 1 + }, + { + "name": "docs/modules/ome_firmware_baseline.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4967891ecf5d69206d3b9d0c60f7f2eaaf450b9ade4ca902858d34b45e00c153", + "format": 1 + }, + { + "name": "docs/modules/ome_firmware_baseline_compliance_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "864f64325e318d0f15b329795d4b58128347105a6800d99d9a85edfcbbadfe97", + "format": 1 + }, + { + "name": "docs/modules/ome_firmware_baseline_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc83bb2f009bedce749480f16dfabb8fc5e9371b44591be107eee71c26154c80", + "format": 1 + }, + { + "name": "docs/modules/ome_firmware_catalog.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "279326b24c52170adb5dff1ce923c47a15a2560dd50b20ce7a546096f6bf384c", + "format": 1 + }, + { + "name": "docs/modules/ome_groups.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a096b583ae88c5bc5e6c2be3baad7cd60cf3843c4f932b1970660ad753f8fbf1", + "format": 1 + }, + { + "name": "docs/modules/ome_identity_pool.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84c0f4be18d5e3247a88d0860376d533fba33fba2f7b5c0660c2407c7ee32b2d", + "format": 1 + }, + { + "name": "docs/modules/ome_job_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a33363116c9d992d3c556898c239a01dd48f070c1a7dc0ea4493bb22b0c1f9a", + "format": 1 + }, + { + "name": "docs/modules/ome_network_port_breakout.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee0535da07d00419882e821ae00cea6cfe28d9bfae9217e2673166e06059fa0c", + "format": 1 + }, + { + "name": "docs/modules/ome_network_vlan.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "914c7bd96c695211a571dc33119df25ebb50aeb33182cbc60968959317e25ecd", + "format": 1 + }, + { + "name": "docs/modules/ome_network_vlan_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a99a0a50385c7111edf89d20581736f0b4b058e931d25b1a47532ea94daa002e", + "format": 1 + }, + { + "name": "docs/modules/ome_powerstate.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "708a394de7c9bf69ee36e79298c0a7a823554c36e6e6cb99dc1e5377a870549f", + "format": 1 + }, + { + "name": "docs/modules/ome_profile.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50a7b05d6853cbe55087f02e3d221b68a0acc2086ebabd7bcb4c865942272a3b", + "format": 1 + }, + { + "name": "docs/modules/ome_server_interface_profile_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7037f9dea23e53a78adf07f622361f869c67e4aff716aefc3b15f59e0fea2f4e", + "format": 1 + }, + { + "name": "docs/modules/ome_server_interface_profiles.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d24f7e16d1d3a2c73bb67d7f8db22de47ccbce64d0e6f4091ab409e06191f565", + "format": 1 + }, + { + "name": "docs/modules/ome_smart_fabric.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c21899815d4a0ac3d70d1d0735aa5fc5e72648dab6044abe8227247e1ebf351d", + "format": 1 + }, + { + "name": "docs/modules/ome_smart_fabric_uplink.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d7004088e81fd324b804615b266a79bb4db1371ec5e96ebc9228ded55041ebb", + "format": 1 + }, + { + "name": "docs/modules/ome_template.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79763ff81bfead903a906964d1108ed21097319860976aa8719065266c55af08", + "format": 1 + }, + { + "name": "docs/modules/ome_template_identity_pool.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c188ae92f5a2972e9fb594f0424deb1f79d30cfca6b06d35e10f395d5f53ad02", + "format": 1 + }, + { + "name": "docs/modules/ome_template_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c0227f0fe04b75993ae186171002f5f9e54bf015880c3982f7ae69683c84d7a", + "format": 1 + }, + { + "name": "docs/modules/ome_template_network_vlan.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8dc5270d4a4486699adbf1a62fcc92cc044cf7922802b4d46a8bc999fe9e7d6e", + "format": 1 + }, + { + "name": "docs/modules/ome_user.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7df833bd369a4b367c592ae97d7bab2ea489a1be2266cfcc953749ee922d2baf", + "format": 1 + }, + { + "name": "docs/modules/ome_user_info.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d1fcfc5ad0dff3d3bd5fbe179de9301a45af464bd7cfd8cd65a58a2ef96c2fe", + "format": 1 + }, + { + "name": "docs/modules/redfish_event_subscription.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80daa0edd8aad83fa2aa24f60930db7ded8acc68155da8c0ae7e3e1862d7e3b7", + "format": 1 + }, + { + "name": "docs/modules/redfish_firmware.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f46c91e2c9ce4f584a25ade7454617ae74f3811540f2a9b39b14e5192da25378", + "format": 1 + }, + { + "name": "docs/modules/redfish_powerstate.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb3c460a2e44539da06de6bc5c01cc9c580e4cb1b4e1df1c7611ca640920bdd1", + "format": 1 + }, + { + "name": "docs/modules/redfish_storage_volume.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eaf09f31c67fa3ff0707779f894d7108fe306eaed30076e87eaa73231b88cd6c", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6fecf89b56560e79560dba9f860201828a8df82323d02c3a6a4e5fbfaa0aed3a", + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a7ab22a7f198e1918e75a2750bf0697251cae5f7f71efc8caeeaae813074bc9", + "format": 1 + }, + { + "name": "playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/idrac", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/idrac/dellemc_idrac_storage_volume.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4e923ec8f3493b1a5d20303f00332d26cc5cb5c4b9b3a3e5da70bfb352ac1be", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "322fb5ab9587243f1b6437ceeed4779c5eb5494f854cda1ae098329c73d61a46", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0636a02c804ef71a43eae32813bd7bc8d8ccea7b11b4fa40d895492987c5f402", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28578e8a776d1f0fbfb31127e75e0ab58fc472c77b2fd3abba1344db5fb6c17e", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/dellemc_get_system_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1a86c91fe04e4c081a4227e0042a557ab6556f3c01f5420921798235ef1ba45", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc31c68c48ac4bee5ecddb652ff404f84f79ad703d4af87986e6711bae3a6eca", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b12db44b841dd333d7f0d25f15b30ff98c0821142377656c2d23c5b47a73a2c", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/idrac_network.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94bda1741ed253a935332f581dc5672a261306f2f571c70183d6070898c20140", + "format": 1 + }, + { + "name": "playbooks/idrac/deprecated/idrac_timezone_ntp.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60326f75e44105ea46591fb5cdcfada01c291c43e296dc06a0da35f7597f081c", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_attributes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eccd3c328599cbcf02f39f7b3b48246d2a7b201f90d6f3af6c07a08c04867e7e", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_bios.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "082c12b369fa03c9a40c1c945f539e7d65ac50066bcd380256479b370c84996a", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_boot.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2137b8237ddf8764aba72b884d53f4fa6aa7c728e848dc4c9c85b33bda2f5d2c", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_boot_virtual_media_workflow.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f06808baeed1b3d6e2875c11c7d972a8d8a35e6a0a6b1949fd26e95601bbcdd", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_certificates.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5f5c85475347df18d3a2966b1b61a1d252b0276e7dc5bac66d16f50866a896b", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_firmware.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cd01e8c28b213252d3cd833f76c9db3da03d1394ed29c762183bf52fa141de1", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_firmware_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb0aeea474dc6cd72d00a85b319f233e467c214d88a8345d226d0cb5228e9e2f", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99fc8d52c8e7dc90298ee3230de3db5c00a39e00d18103fd6b9a71a2401c3f96", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_lifecycle_controller_jobs.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac0275f30fc430f75856502da81fbc34b8af2e353d886dac4de10a66243bcd15", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_lifecycle_controller_logs.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99f8a8ae26678d1d7a84d4d99e387971d7c6c756ce1d7fe5933d3630926c6f93", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_lifecycle_controller_status_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e30a3612d0c4518b5c2848ffb4e530817ed1fff4fe5dc7b29841342af7817b3c", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_os_deployment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5cf86fd3833eb3e5fcbd19a053ac17bd5a78b2beb720318f2f69b68a99dc6d3", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_redfish_storage_controller.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52d7e3f086ad6e5df4d5d30d43e42ad0dfad6e9423ea514c800ff36c7715bfcb", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b27d21824a5be3572c3cce6626e1c422510d4a5c03bc22c631998f08a285ec07", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_reset.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c6c3a2132bb6071d1282f87980cc4ebac0345a2c2ea95024032bcd007dc2879", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_reset_result_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a44b8833cf72fb84947759b7d73c7c3114748a738e6da7b2b3874a717a2251ba", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_server_config_profile.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07699a1ad62ca26961f1411576dc0707ccc5569ad2e31dfb619b9d81880a4b6c", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_syslog.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de87c5144721e93561b13196929340e9fc44878aca57ce51650e103e04a96634", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_system_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f471523a4b5190be049e9d12cfc08e78da059c0f8fd58b760f6dcc57c52afc7", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_user.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b22a4a0e57cd943dcf4d9e5a2ef2e99c1e0e8a5c472a3f9f9a6b3bd74cbf67ba", + "format": 1 + }, + { + "name": "playbooks/idrac/idrac_virtual_media.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2d60a70a720f44cd85081cdbb5b5f42eaae581ac8e391ccfb3111bdd246bba1", + "format": 1 + }, + { + "name": "playbooks/ome", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/application", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_alerts_smtp.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "544c2f793802573b9f19bbdfd1ce4ae24a6e25bde7b17f576b2730a0cb7f63f9", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_alerts_syslog.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a776cb3ce6ae73720e3b550c0255b703eeb0e1c159dfd5037dcf016bf716b479", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_certificate.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f23b249bf4e4439bfa6a0a1ef54e2b1c20ad1ef5e3c9926d75eaf8f2def66ae", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_console_preferences.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5ee4e96f76ca81645647da6d68f65bd4b159e2124aebc989a266dbb3309d61e", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_address.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef48ea765db99c34ba07c1be262208bdd050a2f2eeb764fcc9615a15e7feb6dd", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_address_with_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3656f48e615f71c05a8d38a7526fbbbe51bd376e09e86a2ff5103f650e1c1336", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_proxy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "360c0590089be634f0b4e45470737a040f81d1aa9fa14b2123fc965e7c9f6f04", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_settings.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e3f0479c2adab5a95bf5c932e47b13ce4ecc54de305e5d16be8fc1486a5ecc0", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_time.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd1db03d4ebefc98e7184c0695b741eb9171e4e2f7f15692e435c9acb834ccc4", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_time_zone_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d30a435e8ec0030ca04fee849029f392ef17633308f065a868dc0ee2d51128e7", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_webserver.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8a1c0390466bc5b526a92e6bd6f8c1c83f879d316002989b64e50bcaaa76af5", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e35ab885e81de689a47db1b9d39e7006ee14822e200c1c2479deefb61c2ba90", + "format": 1 + }, + { + "name": "playbooks/ome/application/ome_application_security_settings.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44ac28893c235740bb266a81004ca7704f35a008174f089053f2acd5cb1f1226", + "format": 1 + }, + { + "name": "playbooks/ome/compliance", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/compliance/ome_configuration_compliance_baseline.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca546c9b11fd07d4e3a86135083f9460cc29343095efce34e132bb8577dd0276", + "format": 1 + }, + { + "name": "playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "357998e3518962f59d84c8b8d4bde12dfef469974ffbc36f55722a3c21393b15", + "format": 1 + }, + { + "name": "playbooks/ome/compliance/ome_configuration_compliance_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f30f74f501c6b2cab2c82581964f619bae2d59d01d80b43cd4c20aaeafb0ae2", + "format": 1 + }, + { + "name": "playbooks/ome/firmware", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline/component_reports_filtering", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4f4fba305e8ba3b9687ccd7165bc0fefc8fe37aef496eae36408d82c01bd4c3", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "161590e42d75ddc7ab16a8d90c0f6ffa8a43b391c04c636f7e3b81c4fde7cd41", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline/ome_firmware_baseline.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25d3dcb15e46c2c88873163dfe1a01ff37c0dda93cd951c078b3c96b4753e5c1", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cddff7a98e07d0e2d24433e757d0f430b0e11aed2f4aa66ace94b25bbe9aef19", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f902f16153e996b6fb701d1ade662509238e76e6f0ba84f05cd79216af275c6d", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71287900811e03a38e685b904e6e09b989b626ec598f2fdec5904997a6282358", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/catalog", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/firmware/catalog/ome_firmware_catalog.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d449dc412c5b1d6b3bd8efd3f0c333ad5a6597d512fcfc13c0b43e979aa86db3", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/ome_firmware.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "737487a1c9a931ea1afe538951ade5b79771cacc9b7661949d5f80d15ba1319a", + "format": 1 + }, + { + "name": "playbooks/ome/firmware/ome_firmware_with_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d04f917b100916a94dfcaad72fcb9a5cc94bfa2bea7aae1f8adfdc76a71f5d09", + "format": 1 + }, + { + "name": "playbooks/ome/ome_active_directory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac51ececa4b91a8e6365da60f6ce5c5a11a49232872d53f542edb41871521d3c", + "format": 1 + }, + { + "name": "playbooks/ome/ome_chassis_slots.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3906545b9e6ad6b47992b4d861750e93fcdacebafe4aa198d1cafed1d3d39f3", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_group.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57f5bbb13a97f70c4b455879f054fe91f687256b0cd2d3966ced9c50b3147b3c", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3f5581491a4f964d974de1f0f46eb145d6396aae2ae3d213aeb25420792a7a9", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_local_access_configuration.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d98875b5dda6bd407f8bc834349419e89ea35b2d602ebf8c809e37b6917f392", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_location.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68f148f9362f3414166ad37b147ba314480a69ed0734900ce968dfb9cc4cbe4d", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_mgmt_network.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18f3a9b956e41112b3ade4e47dda3a80499633111c159be5cdb15ae980c13172", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_network_services.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ef5220d73ae9b58102ddebd5b8090e91a75afdb3fc9fcd3b7095c56fb7755b0", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_power_settings.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c95532bb6ec7d855a8b4aeb16836150d74b944f6018b0e17e4f6a825391e31df", + "format": 1 + }, + { + "name": "playbooks/ome/ome_device_quick_deploy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfd6cbe0ac40ba5f7e729c7eba16014ff3fd42a0a7992f05a5f4f8422ba8b355", + "format": 1 + }, + { + "name": "playbooks/ome/ome_devices.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "611add2a18b80c04b6470bc768459117d0b2c8ab76456cc0a2613f4244a06796", + "format": 1 + }, + { + "name": "playbooks/ome/ome_diagnostics.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05b3c6d7f35cd3b463b01fa3548efa203f0ba070cc7ff19347a5181c0e3922e", + "format": 1 + }, + { + "name": "playbooks/ome/ome_discovery.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e22c8dc2476de8eab891577e8da856ccb49c9c5e5c41b2a13f7430a7a754c23", + "format": 1 + }, + { + "name": "playbooks/ome/ome_domain_user_groups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44fda4cdcbbf62a75180e9513f9ca67ce65478a1ede4a323c73acac12f6cd015", + "format": 1 + }, + { + "name": "playbooks/ome/ome_group_device_action.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d40958dea3987aadfcc269ff813f8fd9f784f0651132954ca063ddc5b4c46ccd", + "format": 1 + }, + { + "name": "playbooks/ome/ome_groups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57211cd828202d0aaf0b402bdb0517c606f589b3d6c4b25f73041b3e08ddbcd1", + "format": 1 + }, + { + "name": "playbooks/ome/ome_identity_pool.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c150987a26461e26fafae4376d23aa0781dea40f8d2970f8bf3a07b0be3244c", + "format": 1 + }, + { + "name": "playbooks/ome/ome_job_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e9d6427fee7e3c8ba92e51df45daf3dfab6a870ee460a51596c70db9d20503c", + "format": 1 + }, + { + "name": "playbooks/ome/ome_network_port_breakout.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a754a0c3de07eb3509967c427cbc461051ca185f771c0d0987a5c9e00c1f4c9", + "format": 1 + }, + { + "name": "playbooks/ome/ome_network_port_breakout_job_traking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c46ea16722e826c7db8517caa61623b917794d3601886b45077cb58abe58ea8", + "format": 1 + }, + { + "name": "playbooks/ome/ome_network_vlan.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f4fa99f16d190b0f4885a52547ded02f23f3133edc4aaf6b4628410fa60dfb8", + "format": 1 + }, + { + "name": "playbooks/ome/ome_network_vlan_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f0d58633b8fb443663af749de213075f5e94ae69663d0c6ecd12ddcf133d92d", + "format": 1 + }, + { + "name": "playbooks/ome/ome_server_interface_profile_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6ed36fb1d3f8bebccd89b848fbb374f26a79132eaf33e298705ca16085cba17", + "format": 1 + }, + { + "name": "playbooks/ome/ome_server_interface_profile_workflow.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09ac99eedc5d5d88280c9ec2cbc3eadb2dfa88fd8fab9a6098dcf8ae6183cf34", + "format": 1 + }, + { + "name": "playbooks/ome/ome_server_interface_profiles.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "881a358b39e8378718af6e682ed10e666079a3994924af5d1d96d5b5cb20919d", + "format": 1 + }, + { + "name": "playbooks/ome/ome_smart_fabric.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ee25defbd00757d41a854400eda3cabe2e64e0df88bb8ed0fc011cdbc5a247f", + "format": 1 + }, + { + "name": "playbooks/ome/ome_smart_fabric_uplink.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc88f3b1dea0d8e3100913370ee019e362a1e80ba1435254284a459d03986a50", + "format": 1 + }, + { + "name": "playbooks/ome/ome_template_identity_pool.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "884cefd59aace543ad64ba50025fdb798a37c21989264b8dee86fbceb590fcbd", + "format": 1 + }, + { + "name": "playbooks/ome/powerstate", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/powerstate/ome_powerstate.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3a69ad92917c874fa543aed3403b5d2558252d891b47fe6dea9db527456ccfa", + "format": 1 + }, + { + "name": "playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4d5b3d2830d9f98c494f97c75c6645f283aff5eab1371acc75df6a9f80cee8e", + "format": 1 + }, + { + "name": "playbooks/ome/profile", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/profile/ome_profile.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb6fca02f836fe844b3af07802a8c9161ee0ade5b1d67ae40d5b16a4cd17682d", + "format": 1 + }, + { + "name": "playbooks/ome/profile/ome_profile_assign_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "063b91c58e92b166063205d412ab82b37230dd8e4ffda760373e9eb4e28b0384", + "format": 1 + }, + { + "name": "playbooks/ome/profile/ome_profile_migrate_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6801e62c9ac8c1f44fa9ea203c66f1e3a0cbcfbc3d76341954fd2d6d783b5cab", + "format": 1 + }, + { + "name": "playbooks/ome/profile/ome_profile_unassign_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f49b8e2766a212e544d1c50b5e5212443be9c9bcb45ceffc7e53618fdc8a0395", + "format": 1 + }, + { + "name": "playbooks/ome/template", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/template/ome_template.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8738432b573fbc779bd22e434c68cb1d87cbc0948ff4a718657469b620ad55a9", + "format": 1 + }, + { + "name": "playbooks/ome/template/ome_template_create_modify_lcd_display.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b723fb3b8e855c02630df9a5b4550bfd86dc8df22e8a1436a01b171404497d34", + "format": 1 + }, + { + "name": "playbooks/ome/template/ome_template_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe843587c05f8478ee9c0277febf97d18bae114680dddabf7279ed7ffba41bf6", + "format": 1 + }, + { + "name": "playbooks/ome/template/ome_template_info_with_filter.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28e95655a53e5e79a8629a52baa3c9659fc480a4f3406550f8d48bee8d7edbb3", + "format": 1 + }, + { + "name": "playbooks/ome/template/ome_template_lcd_display_string_deploy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b448ba7018559dee692e9fdbfd3b1d40949d3c751af3b81731d814e2ed3f711", + "format": 1 + }, + { + "name": "playbooks/ome/template/ome_template_network_vlan.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fbb9b0c74a22c30a93958302c7dfe1390d96fa3540f274cefda7e59780469432", + "format": 1 + }, + { + "name": "playbooks/ome/template/ome_template_with_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e9e6697191b9135c4e75f94021f7432c948126398f79fb37d8d3996004aa9a2", + "format": 1 + }, + { + "name": "playbooks/ome/user", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/ome/user/ome_user.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65b37b05b2bcbf5d41f1a6466df0f07e72fc65b143feb30c07c6d97a3c9218d5", + "format": 1 + }, + { + "name": "playbooks/ome/user/ome_user_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d75ebdd03fc9c5d1ae875f9ad341ff686ff06fea4e51f56df4d75afa9b82ac2", + "format": 1 + }, + { + "name": "playbooks/redfish", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/redfish/firmware", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/redfish/firmware/redfish_firmware.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ccf1a697f3f96f8463001be793b23e2c511226a40033d56f5bdcfe4d1a82bff", + "format": 1 + }, + { + "name": "playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8b6dade0af8249dfe1b4bba5eedea3751ca67a9331e94e00027633a1422f106", + "format": 1 + }, + { + "name": "playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7908ad83d6f8be21b4136e88cdd0526e1b7e165de80313da9b981e9be4b940f7", + "format": 1 + }, + { + "name": "playbooks/redfish/redfish_event_subscription.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "129aed58783f80733247d34c27a507546985b6b2d97793f4dc03021022fbf166", + "format": 1 + }, + { + "name": "playbooks/redfish/redfish_powerstate.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d15e424913842eeaef979265128a62ce872cde5b1f95933773f86597f728ca86", + "format": 1 + }, + { + "name": "playbooks/redfish/storage", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/redfish/storage/redfish_storage_volume.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8d0bfd51d4fe7176b4bc50169f7ef67d8f56a806fbf32a022df648ae0d137ae", + "format": 1 + }, + { + "name": "playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce3a0ee4237d2835d73bc50a308d1bca27f086cf9b7e2e83de6d038f3f766d3a", + "format": 1 + }, + { + "name": "playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afc62ace65f0cfcca325e6c82db1ab2fe75a0d44d79be5d2437e16275da99fba", + "format": 1 + }, + { + "name": "playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "990cfcc7f5f58fa573a0018cfd52f9e10ef1fb851b8c37d9c1dc4b49c3a9aa4a", + "format": 1 + }, + { + "name": "playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5839ad66c1e6e5eea70cdf830d87a9b4476663012c820f66b8b68d6ba555750", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8714f96c428a41767a9a643e047e5c72ca3fa5e0d3b7129bbb611187904c3c9", + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/doc_fragments/idrac_auth_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31e6a083ae597d771eae6213edf24ce2e729f3c35b20dc669f787a4bb43a7009", + "format": 1 + }, + { + "name": "plugins/doc_fragments/network_share_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98d8877a3ceb891a40e3ecb802f3843c6775aba98a9e2760f01bf913ed69a964", + "format": 1 + }, + { + "name": "plugins/doc_fragments/ome_auth_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1b4468bf5d1469949eae6d6bc6a8b4f861353e7ab7b392ad63ac51c0e7e1caf4", + "format": 1 + }, + { + "name": "plugins/doc_fragments/omem_auth_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90d5031fb1cad35dd5b4450ccc8682f09709ae17d2871c24694190e8fb2efcba", + "format": 1 + }, + { + "name": "plugins/doc_fragments/oment_auth_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8fcdde0c2d0d36bb9eb6ab67a4c5a897096b2e45d87a6d3b70d58bc5947569e5", + "format": 1 + }, + { + "name": "plugins/doc_fragments/redfish_auth_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d321d8ac3b53cf3196a98682f445482e0de8de3740613113e58e7cc62a80af4", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/dellemc_idrac.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2318a704f1d71853ce4d50180c758378d21ffa37a6bf88dfda2182b17b634428", + "format": 1 + }, + { + "name": "plugins/module_utils/idrac_redfish.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c1fc94a9adb9dce2477b746d05482a9fc1de9c236e4f3a8dc095a2ac5a62f32", + "format": 1 + }, + { + "name": "plugins/module_utils/ome.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23fbd69b9f491d36896c0cd996e6514ef74bed23fc4057ed1580eb57bc52147a", + "format": 1 + }, + { + "name": "plugins/module_utils/redfish.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94570535cf3dd3f0381ebac589c272030211329043813cabea6fc805b7178a3e", + "format": 1 + }, + { + "name": "plugins/module_utils/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80a1eb37b3d39259af40ba8c206f40ea9c19d069b594ee8873106576a9a620bf", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/modules/dellemc_configure_idrac_eventing.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ceb170c26435678f63273556f894583b64ab733868c8033fdbee39ef0788cc2a", + "format": 1 + }, + { + "name": "plugins/modules/dellemc_configure_idrac_services.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b07cbe10a36aaecb250fe8351499f05ffca31472815f6f73b40d904f306184bb", + "format": 1 + }, + { + "name": "plugins/modules/dellemc_get_firmware_inventory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59a8f7f805ca38912b39b0c41e28108afc34eab2cdf485e2b33b1464002f8770", + "format": 1 + }, + { + "name": "plugins/modules/dellemc_get_system_inventory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcd3478f09ce0a00cec8c43409f536be51eef85e3e13350e618ef612f35bfef4", + "format": 1 + }, + { + "name": "plugins/modules/dellemc_idrac_lc_attributes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3774739c8c7a941ced8b22cf985ca09b9133af3b927cee98960ca4feca9bdfe", + "format": 1 + }, + { + "name": "plugins/modules/dellemc_idrac_storage_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc8ad88f9f12af0efd986284122a2d25063540d5247f41ea1bbed43a23bec5f2", + "format": 1 + }, + { + "name": "plugins/modules/dellemc_system_lockdown_mode.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe9b423e68e3e81fe2b98ea82a10c33fa25f1e81d0e27ee8e4bb1a627a7569d", + "format": 1 + }, + { + "name": "plugins/modules/idrac_attributes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31f9b22864a15832ab4882f0cae0f9699204b0ec0b2702c78c8e64aeb247d512", + "format": 1 + }, + { + "name": "plugins/modules/idrac_bios.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ca114a526185fb5d09074bc38101d362f83edc55b794ea604b062b7c9b7617c", + "format": 1 + }, + { + "name": "plugins/modules/idrac_boot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3214788e021feeb9001fbc3ec6badf14e178e29f2be9825b5983c9987b0d9a07", + "format": 1 + }, + { + "name": "plugins/modules/idrac_certificates.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c44feb0cfdcc6485f8c0bba85444269ca555c28c08773665700e74f9513e007d", + "format": 1 + }, + { + "name": "plugins/modules/idrac_firmware.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa03d293a819d57ab6ee9ff5c799aa16c24ff1b950fe201a3c73eedb91a1ae41", + "format": 1 + }, + { + "name": "plugins/modules/idrac_firmware_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43090db00165687d027122b2035c9353a1c4f9945171907442f83efa2cbbb7d7", + "format": 1 + }, + { + "name": "plugins/modules/idrac_lifecycle_controller_job_status_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29cc921c4cacd2dd7094ba803d6168595b3e3db3d3c6deaef0cf70f2357248c0", + "format": 1 + }, + { + "name": "plugins/modules/idrac_lifecycle_controller_jobs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "480a35714b5ae18d8c902c470b9c46e72b8cfa2e40518722ded7127e2c96c7ac", + "format": 1 + }, + { + "name": "plugins/modules/idrac_lifecycle_controller_logs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1883741f413c7c5b7670bb152f76e4672cf25abaf44409cb1e9d45cd8af8df38", + "format": 1 + }, + { + "name": "plugins/modules/idrac_lifecycle_controller_status_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3eb8b534fa96b830fd73ac8b8c8d88b6b6c8b3aaa0a0c47e982c49b9dd25e55", + "format": 1 + }, + { + "name": "plugins/modules/idrac_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9913f4317fafacc168e30ea3e7e677bc87098f01e6b878c6513b3b35a74ae4fc", + "format": 1 + }, + { + "name": "plugins/modules/idrac_os_deployment.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7fcf34fe7d1bbb017765842e399827402cb3001891f0b80619726b8f5b75d15", + "format": 1 + }, + { + "name": "plugins/modules/idrac_redfish_storage_controller.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be4045a15b9013f38a90ba84e303fbde55f7ef08dc9c9a068e1a55e14cd40998", + "format": 1 + }, + { + "name": "plugins/modules/idrac_reset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83356cad46959040735dc8210e53a70ba252aecbf4c4eaf08d2c7f7ec0abed4f", + "format": 1 + }, + { + "name": "plugins/modules/idrac_server_config_profile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26c2126af51f3017c525d1c4324d7ada615b6ebb7d0cbc7eeab911863d0d13b1", + "format": 1 + }, + { + "name": "plugins/modules/idrac_syslog.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3785b5d4739f47dd34c9a766b5e705310a67fd7f8cc731e5f75773c63e6845ab", + "format": 1 + }, + { + "name": "plugins/modules/idrac_system_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb989a436c357d2a5275838f727fe2c19be39fb47b1352a08be5a0771f41dc31", + "format": 1 + }, + { + "name": "plugins/modules/idrac_timezone_ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77bb42b0c2ffd8597dd1bc4d855e38aab9535562c394fc703abf1dbb0ae2c09a", + "format": 1 + }, + { + "name": "plugins/modules/idrac_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4343e4bf3645750d1d8e35ad511c3d3c5eacde15469cca1aea5648097ba6834", + "format": 1 + }, + { + "name": "plugins/modules/idrac_virtual_media.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fd6086e474f1344834c5a6bd85f4619125733840b5b0194b7a020e7f899a13a", + "format": 1 + }, + { + "name": "plugins/modules/ome_active_directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a6e1062ac7734548dcab7d3175572cfa2d1a3a6b322511b316ab1d6af6fe33d", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_alerts_smtp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82edb362407d109837d2695564b40b766caeddb96720d093b56e0b801aee7db5", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_alerts_syslog.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f27c3801fe3ea34752dd2d205d0458599e2e37d51189c216bda2c897c4adac23", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce57def988dc3d482b4be0f482ec7d43076ede102f9dee391ad1ec63e1dab9a1", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_console_preferences.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1504367066c5ba86568df43a31258637c52e950d40a134f401ecfb555bb0e4ef", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_network_address.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "564853dbfd3706607e5dd89d9eeaf1be01aa87e2feeb8d4ac89672e3bdec5c28", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_network_proxy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96dcb54a8ea5fa8a34b7b8b3c846d7725788fbd71833890506ee3d4e20045650", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_network_settings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "315eb7f6c14a82286497e9204c5a1c8430fb93d3496e85e5c865f2f0f25e8327", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_network_time.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d011173df4e1d7d9b1215d49a3695cf28d3225eaccc457c0bf2183829d24ba18", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_network_webserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "034b29abff49479cfefa20ba569d09f13b700c1cecf301334ed66f8611ec903f", + "format": 1 + }, + { + "name": "plugins/modules/ome_application_security_settings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f13f076503572a0720d756cdd21125e904b886b4aef50d630cdbc78e9c8bfebd", + "format": 1 + }, + { + "name": "plugins/modules/ome_chassis_slots.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c3934ac69f6a2d3b5c4cc7c1e97b0542eed1886f807cb534feb7c2e87a9dc73", + "format": 1 + }, + { + "name": "plugins/modules/ome_configuration_compliance_baseline.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3dc4e8b58d37a525e9e0520e22b7012e9169833bbd31cd6d2410b143703f792e", + "format": 1 + }, + { + "name": "plugins/modules/ome_configuration_compliance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9aef3c20ecd24903646033f2b5230b79cca997ea86ca73a1c2fc370cba5b669", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c4ee487708d0b773fa38e831a76f71ae094effe0736d91705f86598506c8e42", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6104b52e807ef7772aae114878af1b1016616a8a74c8e9dbcb7d8f19a7500d13", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_local_access_configuration.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6ac6f03a4157a1cddfc9e6990364b2847b035e5fe87dd5bbbbcc1b12959a26a", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_location.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6476d5af6166b0cd9e5dbc7887a95247658081eb090f9b2aea47010a30334e9", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_mgmt_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c2a132a8e7f55be9d1bc16097ae7f5f66f293d446deb53612747a704bb2e0eb", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_network_services.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "355d08452f5c2ed3b50052b48d08d4fc52f967d8f3add1db2dc1526682dfecb7", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_power_settings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b77d544ee955b28494a1a86c8db48557624a58a3714376795b41aeea5da347fe", + "format": 1 + }, + { + "name": "plugins/modules/ome_device_quick_deploy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c13650b999e2ec1c5fe9937579081a2a1f97e9b6b3740954574220f3e702f09d", + "format": 1 + }, + { + "name": "plugins/modules/ome_devices.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26b6db0d1eda5962466408a52329b943651c149f5ffb1bfec87c3355834d070c", + "format": 1 + }, + { + "name": "plugins/modules/ome_diagnostics.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0bde9547a9320d763bf7e9bf2343a2529bae27f196b009ee2fd176a9827391a", + "format": 1 + }, + { + "name": "plugins/modules/ome_discovery.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c103d3c7d685088923393b0878b2b53ffbe9fc4f64a954e082bdace0c34616f", + "format": 1 + }, + { + "name": "plugins/modules/ome_domain_user_groups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "263a02ed96c8ab2f109291493ceab109efaa9f69d4dbc2bbfb8f0a78f6cd43a4", + "format": 1 + }, + { + "name": "plugins/modules/ome_firmware.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe6bda6df29da0f03efcb1e8e9ba3c8a4601be4723edd7dc898da7cbd988444e", + "format": 1 + }, + { + "name": "plugins/modules/ome_firmware_baseline.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "045d19af4d6e166a8e4a23c7aef9304b6a7b1de2c1c8f4230ebf86358532d5b6", + "format": 1 + }, + { + "name": "plugins/modules/ome_firmware_baseline_compliance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca3c12a956151dd75036d5f8cb11ef2478d0d0c10e4fc3cc5d545e6f302385c1", + "format": 1 + }, + { + "name": "plugins/modules/ome_firmware_baseline_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48e72d3d61c747396545aa29ade0e4efb946f00b05c2d7ca865c7cbd090e2498", + "format": 1 + }, + { + "name": "plugins/modules/ome_firmware_catalog.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d58b5ac3f9a9122260f068fb8754368d5e18fbf6e1f9e73e710ad29a843cb0c", + "format": 1 + }, + { + "name": "plugins/modules/ome_groups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05847ecea350efe57fc3a54881cb5450997aad795ceae0d795a37d5eaa62a862", + "format": 1 + }, + { + "name": "plugins/modules/ome_identity_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f4c0a0b8f9d532f957bf8064eb3738c958a3b254485d2511b2fa7165be101e0", + "format": 1 + }, + { + "name": "plugins/modules/ome_job_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "413bb6a7e717a290cff61bacc4773513ca0624a8803742f5bbe1c78ffa805b8d", + "format": 1 + }, + { + "name": "plugins/modules/ome_network_port_breakout.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9acfd349291413a4edfc5750772e38a8c67bc724c7d9ec000cb4305d2ea6c368", + "format": 1 + }, + { + "name": "plugins/modules/ome_network_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9265ca6ae16c19449eff9250938e00203c7ad32aa5bac730175dbaac1f4201b", + "format": 1 + }, + { + "name": "plugins/modules/ome_network_vlan_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27d41fa508d95a3355f9bb0db06cdbc5b30bada29b433cf4331783d9320fb9b0", + "format": 1 + }, + { + "name": "plugins/modules/ome_powerstate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b42c64d44be41fa1181e738343fc0ca10cf7b697556139594d59b9a8a8d5efc9", + "format": 1 + }, + { + "name": "plugins/modules/ome_profile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff602c4917fd764dc2166c8c5db3156d2edd6485e85cc509b2dd58701c71364b", + "format": 1 + }, + { + "name": "plugins/modules/ome_server_interface_profile_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd019157af898d93fcfa244bb0ca2c7d8fb0cb740977f5ff60444f7fead493f4", + "format": 1 + }, + { + "name": "plugins/modules/ome_server_interface_profiles.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf98010dfb823d0845e276973896aedcae48ee1a79669e7e00c2b6b4f788ee3c", + "format": 1 + }, + { + "name": "plugins/modules/ome_smart_fabric.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae5fa8026a21d5eb90c18a6284d2ffdf0f82a2ac3513575b39800f4471d1791c", + "format": 1 + }, + { + "name": "plugins/modules/ome_smart_fabric_uplink.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7bfc89fbd055371a3488f64107a80bb92a0b3d610e76fcf94d7a777ef27202dc", + "format": 1 + }, + { + "name": "plugins/modules/ome_template.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1c169dba48c6563e1d30fec5334f67fc7f0771e48311fca90de7c6da05de0b2", + "format": 1 + }, + { + "name": "plugins/modules/ome_template_identity_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94d2c5e2a7db82c8119b8651e35d26f0e791e944257608d79ef6fe5be48e2025", + "format": 1 + }, + { + "name": "plugins/modules/ome_template_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8975be230630553feb5243db9e01d054735f27caaa680bdfcb31a6c7f2892b51", + "format": 1 + }, + { + "name": "plugins/modules/ome_template_network_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80b6eddf62c19f45f7d4bc992d1d6f0e704b7885b8cfffe53c85b24938bd7c35", + "format": 1 + }, + { + "name": "plugins/modules/ome_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "399d69ddd06d2b39875a38cd10bc062721b0780c135970b5e205a4aac8d910fd", + "format": 1 + }, + { + "name": "plugins/modules/ome_user_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4916a4f9ae03fe5f5de7f3de1ecfc1df0797d437288b3b8d13bd94d9cec6cfd8", + "format": 1 + }, + { + "name": "plugins/modules/redfish_event_subscription.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae1a0df42858a2276f00a574c71e05565070aa789ff9b0645d8bdcacf84c0359", + "format": 1 + }, + { + "name": "plugins/modules/redfish_firmware.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "792d527767437fa41784d6f458c9638bbd3858e6d4b5285c15aa8f88e31e7ed7", + "format": 1 + }, + { + "name": "plugins/modules/redfish_powerstate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d94dd14da6dc9c2c63e630201382eb2c6fae7e725b561cd4d9aa5d652822824d", + "format": 1 + }, + { + "name": "plugins/modules/redfish_storage_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98d36b7e595fac936671741fb344c1c5f7e85211a9028d4215dd9497ec9872a9", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee48376dbe970009538ca8b62c580b70cb7c6f8085e1b0ff2474cd252dfd71bd", + "format": 1 + }, + { + "name": "requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "730cd2af3466c3077bd9e8f684da582f2ed7d5d43cacb7281446046ad108d26a", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67b0f799f12726b149fd9677334c10b059e231bf2fa7150d55e9b4d5fd242062", + "format": 1 + }, + { + "name": "tests/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84332312381ea3f7b69a0d14fdb69eafe91071bd64fdc007c82b1cd17b0b68eb", + "format": 1 + }, + { + "name": "tests/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef1902714bbfbe85a80e4ce867174506597fec7b7867402bcbd1011d0fb32673", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2701753b2debbb5754db1f7cdd5bfc4e7cf93df20a06fbc86c7ea979c3151db7", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2701753b2debbb5754db1f7cdd5bfc4e7cf93df20a06fbc86c7ea979c3151db7", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2701753b2debbb5754db1f7cdd5bfc4e7cf93df20a06fbc86c7ea979c3151db7", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77ca00eb5c1ea0580f4e5d12778ac5029c3bb190d5355d5e8ffae6a3a53b97fb", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_ome.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37c80e12462773e83d956cf3a3f8617b05963c88e269da05315b00647da9f8c2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "960b3c2958d995b6074a108530432a7ab2597d47fe73a7b16ee160198a29b775", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/conftest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5646cb89ba9ea0220ca9ab036024052501028194d70145c435afb769a3494852", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92185c0bcbe6d290c52f7a1ee53e28f25385fcc945f32c3144fac277eccdc545", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31c7b0ce637329346756df7d707ef66ec3b516ffe37ad05410a609392863c6be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "192b51bc6c5d5b120f6bed0654157a75d12ef0e7adaa5ce7ccf4c25dfb29d4d9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_dellemc_get_system_inventory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "342e7173304f3621bbfcc9b8fdbd8766a14cfb9fe16b6bbaf99f41f5920b0ca6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91b13c79c1af8aa457d6bc98a074a5d6b0150bf33aefac418a86e29fba52da3e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d1e840a6652513c91b19cb21c6c35a38bc75df95cef68fc40caa437fe5d518b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcb93f93630fc1ced41ddf55471a6fb5f826768e2732f004d18344f81999da87", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_attributes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f859e8548c7e75db09982441ff4acb324c52f98021f9a4ed916a6c1b32723cdb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_bios.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33822ba4405b88f1a76869fd98c72fbc9e59727e0517e13f90b0f24e43072232", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_boot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d517d38965a354afaad821408a0978ae1d8e2a24bc39f95c94b4651d9fd5e519", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_certificates.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a02e9ca6da9dd543e87e1b02fb7323efc3bd16517d144964e888a7642a5aa99c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_firmware.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88890ddae0fe9a5ea035d56fca9b672b901d1a80a17a6d822f7eaa7be78adbe6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_firmware_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1da9b9002bb4b546225a66e57164938f8969d4fc1e7368c8f8b87062b0ed39ca", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acb928f15232a99435d45e6e684196e236d712d6a2e7c27e30d3650195f051de", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40f7f1b4c42cbb536c38792f03dddf62dc62a2c3c51ca9c20f1339675c4989e5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b805dc930fde093b383a4ff27d6aa7ef12f378506ebb5d940cd15225c36acc64", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57decc6ea27edfc32a8229433b1194ccfc59376f8891b0eac045525204c84a90", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7abff9586412878c5e681cb012f95800045ae16ca4ad5f698bcb7d5f87647fd6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_os_deployment.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50b2386c930591f9337bd433807a88269e9a1a85ae73492852388bce34d5529d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ef7a9070d3f865d5b84d55c00681dab6078d368d496e6cf6172927a946e59ef", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_reset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fc21d1e73ed80f994fd7fd9964357cbf532bf8ec632399569bf37519bef88c2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_server_config_profile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "277b5672b6891e6d80a3ab14b29b9b095173a842e33e4a1949a540d4f392a11f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_syslog.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8dc717a1ebfd0ccc1328769d917479cb2d3cc398c67e271348f7e1184596c00e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_system_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1ade1cb0073aecac90d23f082d461074b5bf63af4f0af77e9dc98cec85cde97", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_timezone_ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33f6bb244cd45cbb59227e509d668bad071d06160d52b6319fac2d271b67d745", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46c9919f488c45f869999e0c840eeacfb65ed956e5e5ac205aa436f3a96eec76", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_idrac_virtual_media.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41fba36528bc9af58e7462e3bec64abb938ce399e214134ee8b44894a28deda4", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_active_directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9db993dfd0c18c5eb3777adeb6e955151b2f179cc7b0e00419646aef923c2fab", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_alerts_smtp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7afa094e8262bdbd5fb9e9aaf875f82b3bf810a9237b01b04ec071771bd132da", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_alerts_syslog.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e6544e27d7b35884ea54cc11816f1713f8a9222220c1b591f3a89ddfc8f5607", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0280f7afa09beaad65dfb503782bace0a6f04ff767447970c1950b216b4982d7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_console_preferences.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9caa4436405ad6911180207944159f0db1118781e6b0c1763ca50cd332313ed", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_network_address.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "15297916606687affd3c076c580f0c56572415e85728bbe23fedd1a0c6806be2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_network_proxy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba4cd21055ebb897c785bf1687f912882d577b4813d3054b164f0c13a519e8fe", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_network_settings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8b8136508a8e29d4c9ffd399fa65ac64c85c5e42a141ea5d12b5c74c02483a4", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_network_time.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d5459e09e63dacb0cd57368db8f3fb7d90ee83ea4c58707557708480e390237", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_network_webserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3d502a99e19c97418dde416d97c4acb8bc37800b43a648ebb546b7e6373c7f9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_application_security_settings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a28065c767dadb2abf22fb897cd041c14da1e25749f678a29c58ae7e268a42f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_chassis_slots.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9416329532e9c2b8a49292f17457cfd03bbe20357b4762e30e59985b00e3225", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bae45e27cd4bebba88c9c63c5207ce8026abcd6d1049de3e7210a8291497142", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_configuration_compliance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09aec29c2fba54c5647c9f31eb7ae4593c545b6cb5f0c7e57f509c18353523b2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18b7c5a817f2aeda11912f9fa3a8d5d422201f2f44746f6879fd5c8fe378d74b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c041b4e641a484fe40b34ad4cf922891bfe04405dd39134a37829792650963c6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_local_access_configuration.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "281c3b191312c53b58a9fa44004c90471e9fbf99313674e148c779cd6f2f2b71", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_location.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "239b6a2129a8be0d24445fe02c651822082ff9f9f3628ef84eb899c6ce70630b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_mgmt_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a8e0119234dceafca94eb8e1513bd6bc841cdafb91be67d211d6db09fdb31f9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_network_services.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09d03bf7553daf2d1bb54674150f0362e37b8b900f9807f0ddf1b22ed9a950b6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_power_settings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dde98180f35d1fda81f26fb545d3fdec1936c6164445767f98906944ba2846a3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_device_quick_deploy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2eccc9c43322c647a17eda27c85e874226ff98ec9ff25d68ce925ee67b311ad", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_devices.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ef1681f638521e22f07bfd3926e6e79f16eec1074ccbb84e15abc52974a1abd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_diagnostics.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c5654d0976ffb20d78aa32c04312233790b0f810673db2bc46e0e90dfa56494", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_discovery.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0bbe3fffa0bc1d54658fcac812d787feb3ee5de3869341c2037dc1197faa32c3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_domain_user_groups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0df53445f6faaafc19973b47d9f6d643fd3b6cda327df3f34ec185cae7d3790e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_firmware.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17e93cead0aad7aad613fa61ab9eb5b5c3e0eb2157247ad006a32596ab5e7017", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_firmware_baseline.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "976f2556e6e2be1b42ebe07fa7e4410418551243050a4ed40f1c905e18acfdd9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d8f1ea2ad7ef1ac0c41e59726581910afe3bb9c2bbcd3e41ac6506ec4d0bfd2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_firmware_baseline_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3282786c382feb202b2ae38de4a51603f1f9e1fc762e3460bb7b01ed91068312", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_firmware_catalog.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "efeef89081bb08744cbf92322a30ae18e01bee531c2d383ff32c4b13ce27bfd5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_groups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8fe26ff99f76dc75ede732ee72ea1bd1a07d8fb8066fb0c96d0a7f06929d56d7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_identity_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "335ccc1edd47cc0153682dc450ed579339d3b0d0f7e7adc9cacdcefd058d8d7b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_job_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ada75733f8a26f1e792bb93a337bf227c78f9ef2509060825329fcc55e9cd7f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_network_port_breakout.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7a9f682f5b6132c7b4cf54681e248fdd6cee77c729b5951098e78168e702004", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_network_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57fc1a04a1dbea86e0b2c436f8f5f678d70332c50e7c70af96604a09bfddf652", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_network_vlan_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8536f0b33ee05b6973f38e77c2a50446186b99b424192bdfc593d1b6c8197326", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_powerstate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a187e509fdf9269072ffd282da85693af212cd85817f874ea5062783c3e2f887", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_profile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d23c1d700c72b0663cd5620d64d81f3a59bf22e07914a6763e477c32a9781c2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_server_interface_profile_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c23a8f4a91c1e3af9f5f951e61ff6f1b9d3f4e13b4d1ab2ee35d8d36ca1f167", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_server_interface_profiles.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5beec3e90640353ae9ea8de8300c5f8d3df704e8c6f17c559074c6c7f7460a42", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_smart_fabric.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea1d81efc9c430d920c9ab37abd1deb39bb9db13072867fd7dcf65818a9b7d8d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f7041c4e812a11db9b4ca755729910d8d07199eb6d409f9665dbafc6150627ac", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_template.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00074a9ce2a71a0d38af3bcff76f25e11f37e5f54af1d8830223db15a12c9857", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_template_identity_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da23834fc8cf6bd9d27ecfa4a5a07765a568dee144224defdc25344ccfef1c3c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_template_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf7dcd18048f2ddcd377ba3eb024b18d17b7df0731283eb5c5be80f4d7caacdb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_template_network_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "97daf83dcd98ab11b99af9a79583cb74c21cd23291d7c61af8383cea043cfe04", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f5f6782d929465d30fb064978558615967eae893165a1f2df440dbcadc7c6b7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ome_user_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ac605d2d50cf2cd3d064bb11519585c970b846f3be3a5c45a418cd96181b83a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_redfish_event_subscription.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9da2edc940d5473608e1655f3d22fe0fba52ee05b68bf9d0293eb972c2663018", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_redfish_firmware.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38a6470438ce17ee89338e5663590cf990ca250607863d92fbbb72ac902d8889", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_redfish_powerstate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "795fd10edde86cb737936299817cfd5f81f4bfc44af76bd1a26db9f514975c39", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_redfish_storage_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b67db8d0c6d281a7a671d00e90c1a52c861baff11906f072979c0666e9f0aae7", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/LICENSE b/ansible_collections/dellemc/openmanage/LICENSE new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/openmanage/MANIFEST.json b/ansible_collections/dellemc/openmanage/MANIFEST.json new file mode 100644 index 00000000..c99e320f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/MANIFEST.json @@ -0,0 +1,45 @@ +{ + "collection_info": { + "namespace": "dellemc", + "name": "openmanage", + "version": "6.3.0", + "authors": [ + "Jagadeesh N V ", + "Felix Stephen ", + "Sachin Apagundi ", + "Husniya Hameed ", + "Abhishek Sinha ", + "Kritika Bhateja " + ], + "readme": "README.md", + "tags": [ + "dellemc", + "openmanage", + "infrastructure", + "idrac", + "ome", + "openmanage_enterprise", + "redfish", + "redfish_api", + "poweredge_servers", + "automation", + "devops" + ], + "description": "Dell OpenManage Ansible Modules allows data center and IT administrators to use RedHat Ansible to automate and orchestrate the configuration, deployment, and update of Dell PowerEdge Servers and modular infrastructure by leveraging the management automation capabilities in-built into the Integrated Dell Remote Access Controller (iDRAC), OpenManage Enterprise and OpenManage Enterprise Modular.", + "license": [], + "license_file": "LICENSE", + "dependencies": {}, + "repository": "https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections", + "documentation": "https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/docs", + "homepage": "https://github.com/dell/dellemc-openmanage-ansible-modules", + "issues": "https://github.com/dell/dellemc-openmanage-ansible-modules/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82aa686e05007eff6aad01f6bc9016f567b6949b57492af62a6d84371138e9da", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/README.md b/ansible_collections/dellemc/openmanage/README.md new file mode 100644 index 00000000..66f04c77 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/README.md @@ -0,0 +1,56 @@ +# Dell OpenManage Ansible Modules + +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.1%20adopted-ff69b4.svg)](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/CODE_OF_CONDUCT.md) +[![License](https://img.shields.io/github/license/dell/dellemc-openmanage-ansible-modules)](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/LICENSE) +[![Python version](https://img.shields.io/badge/python-3.9.6+-blue.svg)](https://www.python.org/downloads/) +[![Ansible version](https://img.shields.io/badge/ansible-2.13.0+-blue.svg)](https://pypi.org/project/ansible/) +[![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/dell/dellemc-openmanage-ansible-modules?include_prereleases&label=latest&style=flat-square)](https://github.com/dell/dellemc-openmanage-ansible-modules/releases) + +Dell OpenManage Ansible Modules allows data center and IT administrators to use RedHat Ansible to automate and orchestrate the configuration, deployment, and update of Dell PowerEdge Servers and modular infrastructure by leveraging the management automation capabilities in-built into the Integrated Dell Remote Access Controller (iDRAC), OpenManage Enterprise (OME) and OpenManage Enterprise Modular (OMEM). + +OpenManage Ansible Modules simplifies and automates provisioning, deployment, and updates of PowerEdge servers and modular infrastructure. It allows system administrators and software developers to introduce the physical infrastructure provisioning into their software provisioning stack, integrate with existing DevOps pipelines and manage their infrastructure using version-controlled playbooks, server configuration profiles, and templates in line with the Infrastructure-as-Code (IaC) principles. + +## Table of Contents + + * [Code of Conduct](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/CODE_OF_CONDUCT.md) + * [Committer Guide](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/COMMITTER_GUIDE.md) + * [Contributing Guide](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/CONTRIBUTING.md) + * [Maintainers](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/MAINTAINERS.md) + * [Support](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/SUPPORT.md) + * [Security](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/SECURITY.md) + * [Documentation](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/DOCUMENTATION.md) + * [Execution Environment](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/EXECUTION_ENVIRONMENT.md) + * [Additional Information](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/ADDITIONAL_INFORMATION.md) + +## Supported Platforms + * iDRAC7 based Dell PowerEdge Servers with firmware versions 2.63.60.62 and above. + * iDRAC8 based Dell PowerEdge Servers with firmware versions 2.82.82.82 and above. + * iDRAC9 based Dell PowerEdge Servers with firmware versions 5.10.50.00 and above. + * Dell OpenManage Enterprise versions 3.8.3 and above. + * Dell OpenManage Enterprise Modular versions 1.40.20 and above. + +## Prerequisites + * [Ansible >= 2.13.2](https://github.com/ansible/ansible) + * Python >= 3.9.6 + * To run the iDRAC modules, install OpenManage Python Software Development Kit (OMSDK) + using either ```pip install omsdk --upgrade``` or ```pip install -r requirements.txt```. + OMSDK can also be installed from [Dell OpenManage Python SDK](https://github.com/dell/omsdk) + * Operating System + * Red Hat Enterprise Linux (RHEL) 8.6 and 9.0 + * SUSE Linux Enterprise Server (SLES) 15 SP3 and 15 SP4 + * Ubuntu 22.04 and 20.04.04 + +## Installation + +* From [galaxy](https://galaxy.ansible.com/dellemc/openmanage): +```ansible-galaxy collection install dellemc.openmanage``` + + - For offline installation on the Ansible control machine, download the required tar archive version of the collection from [Dell OpenManage collection](https://galaxy.ansible.com/dellemc/openmanage) and run the command given below: + ```ansible-galaxy collection install dellemc-openmanage-.tar.gz``` + +* From [github](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections): +Install the collection from the github repository using the latest commit on the branch 'collections' +```ansible-galaxy collection install git+https://github.com/dell/dellemc-openmanage-ansible-modules.git,collections``` + +## About +Dell OpenManage Ansible Modules is 100% open source and community-driven. All components are available under [GPL-3.0 license](https://www.gnu.org/licenses/gpl-3.0.html) on GitHub. diff --git a/ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml new file mode 100644 index 00000000..73891787 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml @@ -0,0 +1,340 @@ +objects: {} +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + httpapi: {} + inventory: {} + lookup: {} + module: + dellemc_configure_idrac_eventing: + description: Configures the iDRAC eventing related attributes + name: dellemc_configure_idrac_eventing + namespace: '' + version_added: 1.0.0 + dellemc_configure_idrac_services: + description: Configures the iDRAC services related attributes + name: dellemc_configure_idrac_services + namespace: '' + version_added: 1.0.0 + dellemc_get_firmware_inventory: + description: Get Firmware Inventory + name: dellemc_get_firmware_inventory + namespace: '' + version_added: 1.0.0 + dellemc_get_system_inventory: + description: Get the PowerEdge Server System Inventory + name: dellemc_get_system_inventory + namespace: '' + version_added: 1.0.0 + dellemc_idrac_lc_attributes: + description: Enable or disable Collect System Inventory on Restart (CSIOR) property + for all iDRAC/LC jobs + name: dellemc_idrac_lc_attributes + namespace: '' + version_added: 1.0.0 + dellemc_idrac_storage_volume: + description: Configures the RAID configuration attributes + name: dellemc_idrac_storage_volume + namespace: '' + version_added: 2.0.0 + dellemc_system_lockdown_mode: + description: Configures system lockdown mode for iDRAC + name: dellemc_system_lockdown_mode + namespace: '' + version_added: 1.0.0 + idrac_bios: + description: Configure the BIOS attributes + name: idrac_bios + namespace: '' + version_added: 2.1.0 + idrac_firmware: + description: Firmware update from a repository on a network share (CIFS, NFS, + HTTP, HTTPS, FTP) + name: idrac_firmware + namespace: '' + version_added: 2.1.0 + idrac_firmware_info: + description: Get Firmware Inventory + name: idrac_firmware_info + namespace: '' + version_added: 3.0.0 + idrac_lifecycle_controller_job_status_info: + description: Get the status of a Lifecycle Controller job + name: idrac_lifecycle_controller_job_status_info + namespace: '' + version_added: 2.1.0 + idrac_lifecycle_controller_jobs: + description: Delete the Lifecycle Controller Jobs + name: idrac_lifecycle_controller_jobs + namespace: '' + version_added: 2.1.0 + idrac_lifecycle_controller_logs: + description: Export Lifecycle Controller logs to a network share or local path. + name: idrac_lifecycle_controller_logs + namespace: '' + version_added: 2.1.0 + idrac_lifecycle_controller_status_info: + description: Get the status of the Lifecycle Controller + name: idrac_lifecycle_controller_status_info + namespace: '' + version_added: 2.1.0 + idrac_network: + description: Configures the iDRAC network attributes + name: idrac_network + namespace: '' + version_added: 2.1.0 + idrac_os_deployment: + description: Boot to a network ISO image + name: idrac_os_deployment + namespace: '' + version_added: 2.1.0 + idrac_redfish_storage_controller: + description: Configures the storage controller settings + name: idrac_redfish_storage_controller + namespace: '' + version_added: 2.1.0 + idrac_reset: + description: Reset iDRAC + name: idrac_reset + namespace: '' + version_added: 2.1.0 + idrac_server_config_profile: + description: Export or Import iDRAC Server Configuration Profile (SCP) + name: idrac_server_config_profile + namespace: '' + version_added: 2.1.0 + idrac_syslog: + description: Enable or disable the syslog on iDRAC + name: idrac_syslog + namespace: '' + version_added: 2.1.0 + idrac_system_info: + description: Get the PowerEdge Server System Inventory + name: idrac_system_info + namespace: '' + version_added: 3.0.0 + idrac_timezone_ntp: + description: Configures time zone and NTP on iDRAC + name: idrac_timezone_ntp + namespace: '' + version_added: 2.1.0 + idrac_user: + description: Configure settings for user accounts + name: idrac_user + namespace: '' + version_added: 2.1.0 + ome_active_directory: + description: Configure Active Directory groups to be used with Directory Services + on OpenManage Enterprise + name: ome_active_directory + namespace: '' + version_added: 4.0.0 + ome_application_certificate: + description: This module allows to generate a CSR and upload the certificate + name: ome_application_certificate + namespace: '' + version_added: 2.1.0 + ome_application_network_address: + description: Updates the network configuration on OpenManage Enterprise + name: ome_application_network_address + namespace: '' + version_added: 2.1.0 + ome_application_network_proxy: + description: Updates the proxy configuration on OpenManage Enterprise + name: ome_application_network_proxy + namespace: '' + version_added: 2.1.0 + ome_application_network_time: + description: Updates the network time on OpenManage Enterprise + name: ome_application_network_time + namespace: '' + version_added: 2.1.0 + ome_application_network_webserver: + description: Updates the Web server configuration on OpenManage Enterprise + name: ome_application_network_webserver + namespace: '' + version_added: 2.1.0 + ome_chassis_slots: + description: Rename sled slots on OpenManage Enterprise Modular + name: ome_chassis_slots + namespace: '' + version_added: 3.6.0 + ome_configuration_compliance_baseline: + description: Create, modify, and delete a configuration compliance baseline + and remediate non-compliant devices on OpenManage Enterprise + name: ome_configuration_compliance_baseline + namespace: '' + version_added: 3.2.0 + ome_configuration_compliance_info: + description: Device compliance report for devices managed in OpenManage Enterprise + name: ome_configuration_compliance_info + namespace: '' + version_added: 3.2.0 + ome_device_group: + description: Add devices to a static device group on OpenManage Enterprise + name: ome_device_group + namespace: '' + version_added: 3.3.0 + ome_device_info: + description: Retrieves the information of devices inventoried by OpenManage + Enterprise + name: ome_device_info + namespace: '' + version_added: 2.0.0 + ome_diagnostics: + description: Export technical support logs(TSR) to network share location + name: ome_diagnostics + namespace: '' + version_added: 3.6.0 + ome_discovery: + description: Create, modify, or delete a discovery job on OpenManage Enterprise + name: ome_discovery + namespace: '' + version_added: 3.3.0 + ome_domain_user_groups: + description: Create, modify, or delete an Active Directory user group on OpenManage + Enterprise and OpenManage Enterprise Modular. + name: ome_domain_user_groups + namespace: '' + version_added: 4.0.0 + ome_firmware: + description: Firmware update of PowerEdge devices and its components through + OpenManage Enterprise + name: ome_firmware + namespace: '' + version_added: 2.0.0 + ome_firmware_baseline: + description: Create, modify, or delete a firmware baseline on OpenManage Enterprise + or OpenManage Enterprise Modular + name: ome_firmware_baseline + namespace: '' + version_added: 2.0.0 + ome_firmware_baseline_compliance_info: + description: Retrieves baseline compliance details on OpenManage Enterprise + name: ome_firmware_baseline_compliance_info + namespace: '' + version_added: 2.0.0 + ome_firmware_baseline_info: + description: Retrieves baseline details from OpenManage Enterprise + name: ome_firmware_baseline_info + namespace: '' + version_added: 2.0.0 + ome_firmware_catalog: + description: Create, modify, or delete a firmware catalog on OpenManage Enterprise + or OpenManage Enterprise Modular + name: ome_firmware_catalog + namespace: '' + version_added: 2.0.0 + ome_groups: + description: Manages static device groups on OpenManage Enterprise + name: ome_groups + namespace: '' + version_added: 3.5.0 + ome_identity_pool: + description: Manages identity pool settings on OpenManage Enterprise + name: ome_identity_pool + namespace: '' + version_added: 2.1.0 + ome_job_info: + description: Get job details for a given job ID or an entire job queue on OpenMange + Enterprise + name: ome_job_info + namespace: '' + version_added: 2.0.0 + ome_network_port_breakout: + description: This module allows to automate the port portioning or port breakout + to logical sub ports + name: ome_network_port_breakout + namespace: '' + version_added: 2.1.0 + ome_network_vlan: + description: Create, modify & delete a VLAN + name: ome_network_vlan + namespace: '' + version_added: 2.1.0 + ome_network_vlan_info: + description: Retrieves the information about networks VLAN(s) present in OpenManage + Enterprise + name: ome_network_vlan_info + namespace: '' + version_added: 2.1.0 + ome_powerstate: + description: Performs the power management operations on OpenManage Enterprise + name: ome_powerstate + namespace: '' + version_added: 2.1.0 + ome_profile: + description: Create, modify, delete, assign, unassign and migrate a profile + on OpenManage Enterprise + name: ome_profile + namespace: '' + version_added: 3.1.0 + ome_smart_fabric: + description: Create, modify or delete a fabric on OpenManage Enterprise Modular + name: ome_smart_fabric + namespace: '' + version_added: 2.1.0 + ome_smart_fabric_uplink: + description: Create, modify or delete a uplink for a fabric on OpenManage Enterprise + Modular + name: ome_smart_fabric_uplink + namespace: '' + version_added: 2.1.0 + ome_template: + description: Create, modify, deploy, delete, export, import and clone a template + on OpenManage Enterprise + name: ome_template + namespace: '' + version_added: 2.0.0 + ome_template_identity_pool: + description: Attach or detach an identity pool to a requested template on OpenManage + Enterprise + name: ome_template_identity_pool + namespace: '' + version_added: 2.0.0 + ome_template_info: + description: Retrieves template details from OpenManage Enterprise + name: ome_template_info + namespace: '' + version_added: 2.0.0 + ome_template_network_vlan: + description: Set tagged and untagged vlans to native network card supported + by a template on OpenManage Enterprise + name: ome_template_network_vlan + namespace: '' + version_added: 2.0.0 + ome_user: + description: Create, modify or delete a user on OpenManage Enterprise + name: ome_user + namespace: '' + version_added: 2.0.0 + ome_user_info: + description: Retrieves details of all accounts or a specific account on OpenManage + Enterprise + name: ome_user_info + namespace: '' + version_added: 2.0.0 + redfish_firmware: + description: To perform a component firmware update using the image file available + on the local or remote system + name: redfish_firmware + namespace: '' + version_added: 2.1.0 + redfish_powerstate: + description: Manage device power state + name: redfish_powerstate + namespace: '' + version_added: 2.1.0 + redfish_storage_volume: + description: Manages the storage volume configuration + name: redfish_storage_volume + namespace: '' + version_added: 2.1.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 4.0.0 diff --git a/ansible_collections/dellemc/openmanage/changelogs/changelog.yaml b/ansible_collections/dellemc/openmanage/changelogs/changelog.yaml new file mode 100644 index 00000000..f1b64d48 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/changelogs/changelog.yaml @@ -0,0 +1,993 @@ +ancestor: null +releases: + 2.1.0: + changes: + release_summary: The `Dell EMC OpenManage Ansible Modules `_ + are available on Ansible Galaxy as a collection. + release_date: '2020-07-29' + 2.1.1: + changes: + deprecated_features: + - The dellemc_configure_bios module is deprecated and replaced with the idrac_bios + module. + - The dellemc_configure_idrac_network module is deprecated and replaced with + the idrac_network module. + - The dellemc_configure_idrac_timezone module is deprecated and replaced with + the idrac_timezone_ntp module. + - The dellemc_delete_lc_job and dellemc_delete_lc_job_queue modules are deprecated + and replaced with the idrac_lifecycle_controller_jobs module. + - The dellemc_export_lc_logs module is deprecated and replaced with the idrac_lifecycle_controller_logs + module. + - The dellemc_get_lc_job_status module is deprecated and replaced with the idrac_lifecycle_controller_job_status_info + module. + - The dellemc_get_lcstatus module is deprecated and replaced with the idrac_lifecycle_controller_status_info + module. + - The dellemc_idrac_reset module is deprecated and replaced with the idrac_reset + module. + - The dellemc_setup_idrac_syslog module is deprecated and replaced with the + idrac_syslog module. + major_changes: + - Standardization of ten iDRAC ansible modules based on ansible guidelines. + - Support for OpenManage Enterprise Modular. + release_summary: Support for OpenManage Enterprise Modular and other enhancements. + modules: + - description: Configure the BIOS attributes + name: idrac_bios + namespace: '' + - description: Get the status of a Lifecycle Controller job + name: idrac_lifecycle_controller_job_status_info + namespace: '' + - description: Delete the Lifecycle Controller Jobs + name: idrac_lifecycle_controller_jobs + namespace: '' + - description: Export Lifecycle Controller logs to a network share or local path. + name: idrac_lifecycle_controller_logs + namespace: '' + - description: Get the status of the Lifecycle Controller + name: idrac_lifecycle_controller_status_info + namespace: '' + - description: Configures the iDRAC network attributes + name: idrac_network + namespace: '' + - description: Reset iDRAC + name: idrac_reset + namespace: '' + - description: Enable or disable the syslog on iDRAC + name: idrac_syslog + namespace: '' + - description: Configures time zone and NTP on iDRAC + name: idrac_timezone_ntp + namespace: '' + release_date: '2020-08-26' + 2.1.2: + changes: + bugfixes: + - Documentation improvement request `#140 `_ + - Executing dellemc_configure_idrac_users twice fails the second attempt `#100 + `_ + - dellemc_change_power_state fails if host is already on `#132 `_ + - dellemc_change_power_state not idempotent `#115 `_ + - dellemc_configure_idrac_users error `#26 `_ + - dellemc_configure_idrac_users is unreliable - errors `#113 `_ + - idrac_server_config_profile improvement requested (request) `#137 `_ + - ome_firmware_catalog.yml example errors `#145 `_ + deprecated_features: + - The dellemc_change_power_state module is deprecated and replaced with the + redfish_powerstate module. + - The dellemc_configure_idrac_users module is deprecated and replaced with the + idrac_user module. + minor_changes: + - The idrac_server_config_profile module supports a user provided file name + for an export operation. + release_summary: The dellemc_change_power_state and dellemc_configure_idrac_users + modules are standardized as per ansible guidelines. 8 GitHub issues are fixed. + modules: + - description: Configure settings for user accounts + name: idrac_user + namespace: '' + - description: Manage device power state + name: redfish_powerstate + namespace: '' + release_date: '2020-09-23' + 2.1.3: + changes: + release_summary: Network configuration service related modules ome_network_vlan, + ome_network_port_breakout and ome_network_vlan_info are added. + modules: + - description: This module allows to automate the port portioning or port breakout + to logical sub ports + name: ome_network_port_breakout + namespace: '' + - description: Create, modify & delete a VLAN + name: ome_network_vlan + namespace: '' + - description: Retrieves the information about networks VLAN(s) present in OpenManage + Enterprise + name: ome_network_vlan_info + namespace: '' + release_date: '2020-10-29' + 2.1.4: + changes: + known_issues: + - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation + of multiple uplinks of the same name even though this is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified.' + release_summary: Fabric management related modules ome_smart_fabric and ome_smart_fabric_uplink + are added. + modules: + - description: Create, modify or delete a fabric on OpenManage Enterprise Modular + name: ome_smart_fabric + namespace: '' + - description: Create, modify or delete a uplink for a fabric on OpenManage Enterprise + Modular + name: ome_smart_fabric_uplink + namespace: '' + release_date: '2020-11-25' + 2.1.5: + changes: + bugfixes: + - Identity pool does not reset when a network VLAN is added to a template in + the ome_template_network_vlan module. `#169 `_ + - Missing parameter added in ome_smart_fabric_uplink module documenation. `#181 + `_ + known_issues: + - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation + of multiple uplinks of the same name even though this is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified.' + - 'Issue 2(187956): If an invalid job_id is provided, idrac_lifecycle_controller_job_status_info + returns an error message. This error message does not contain information + about the exact issue with the invalid job_id.' + - 'Issue 3(188267): While updating the iDRAC firmware, the idrac_firmware module + completes execution before the firmware update job is completed. An incorrect + message is displayed in the task output as ''DRAC WSMAN endpoint returned + HTTP code ''400'' Reason ''Bad Request''''. This issue may occur if the target + iDRAC firmware version is less than 3.30.30.30' + minor_changes: + - The idrac_server_config_profile module supports IPv6 address format. + release_summary: The idrac_firmware module is enhanced to include checkmode + support and job tracking. + release_date: '2020-12-30' + 3.0.0: + changes: + bugfixes: + - GitHub issue fix - Module dellemc_idrac_storage_volume.py broken. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/212) + - GitHub issue fix - ome_smart_fabric Fabric management is not supported on + the specified system. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/179) + - 'Known issue fix #187956: If an invalid job_id is provided, the idrac_lifecycle_controller_job_status_info + module returns an error message with the description of the issue.' + - 'Known issue fix #188267: No error message is displayed when the target iDRAC + with firmware version less than 3.30.30.30 is updated.' + - Sanity fixes as per ansible guidelines to all modules. + deprecated_features: + - The ``dellemc_get_firmware_inventory`` module is deprecated and replaced with + ``idrac_firmware_info``. + - The ``dellemc_get_system_inventory`` module is deprecated and replaced with + ``idrac_system_info``. + known_issues: + - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation + of multiple uplinks of the same name even though this is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified.' + major_changes: + - Removed the existing deprecated modules. + minor_changes: + - Coding Guidelines, Contributor Agreement, and Code of Conduct files are added + to the collection. + - New deprecation changes for ``dellemc_get_system_inventory`` and ``dellemc_get_firmware_inventory`` + ignored for ansible 2.9 sanity test. + - The modules are standardized as per ansible guidelines. + release_summary: Deprecations, issue fixes, and standardization of modules as + per ansible guidelines. + release_date: '2021-01-25' + 3.1.0: + changes: + bugfixes: + - ome_firmware_baseline_compliance_info - OMEnt firmware baseline compliance + info pagination support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/171) + - ome_network_proxy - OMEnt network proxy check mode support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/187) + known_issues: + - ome_smart_fabric - Issue(185322) Only three design types are supported by + OpenManage Enterprise Modular but the module successfully creates a fabric + when the design type is not supported. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though this + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + release_summary: OpenManage Enterprise profiles management support added. + modules: + - description: Create, modify, delete, assign, unassign and migrate a profile + on OpenManage Enterprise + name: ome_profile + namespace: '' + release_date: '2021-02-24' + 3.2.0: + changes: + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_configuration_compliance_info - Issue(195592) Module may error out with + the message ``unable to process the request because an error occurred``. If + the issue persists, report it to the system administrator. + - ome_smart_fabric - Issue(185322) Only three design types are supported by + OpenManage Enterprise Modular but the module successfully creates a fabric + when the design type is not supported. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though this + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + minor_changes: + - ome_template - Allows to deploy a template on device groups. + release_summary: Configuration compliance related modules added + modules: + - description: Create, modify, and delete a configuration compliance baseline + and remediate non-compliant devices on OpenManage Enterprise + name: ome_configuration_compliance_baseline + namespace: '' + - description: Device compliance report for devices managed in OpenManage Enterprise + name: ome_configuration_compliance_info + namespace: '' + release_date: '2021-03-24' + 3.3.0: + changes: + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_configuration_compliance_info - Issue(195592) Module may error out with + the message ``unable to process the request because an error occurred``. If + the issue persists, report it to the system administrator. + - ome_smart_fabric - Issue(185322) Only three design types are supported by + OpenManage Enterprise Modular but the module successfully creates a fabric + when the design type is not supported. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though this + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + minor_changes: + - ome_firmware_baseline - Allows to retrieve the device even if it not in the + first 50 device IDs + release_summary: OpenManage Enterprise device group and device discovery support + added + modules: + - description: Add devices to a static device group on OpenManage Enterprise + name: ome_device_group + namespace: '' + - description: Create, modify, or delete a discovery job on OpenManage Enterprise + name: ome_discovery + namespace: '' + release_date: '2021-04-28' + 3.4.0: + changes: + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though this + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + major_changes: + - ome_firmware_baseline - Module supports check mode, and allows the modification + and deletion of firmware baselines. + - ome_firmware_catalog - Module supports check mode, and allows the modification + and deletion of firmware catalogs. + minor_changes: + - ome_firmware_catalog - Added support for repositories available on the Dell + support site. + - ome_template_network_vlan - Added the input option which allows to apply the + modified VLAN settings immediately on the associated modular-system servers. + release_summary: OpenManage Enterprise firmware baseline and firmware catalog + modules updated to support checkmode. + release_date: '2021-05-26' + 3.5.0: + changes: + bugfixes: + - Handled invalid share and unused imports cleanup for iDRAC modules (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/268) + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though this + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + major_changes: + - idrac_server_config_profile - Added support for exporting and importing Server + Configuration Profile through HTTP/HTTPS share. + - ome_device_group - Added support for adding devices to a group using the IP + addresses of the devices and group ID. + release_summary: Support for managing static device groups on OpenManage Enterprise. + modules: + - description: Manages static device groups on OpenManage Enterprise + name: ome_groups + namespace: '' + release_date: '2021-06-28' + 3.6.0: + changes: + bugfixes: + - dellemc_idrac_storage_volume - Module fails if the BlockSize, FreeSize, or + Size state of the physical disk is set to "Not Available". + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though this + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + release_summary: Support for configuring device slot name and export SupportAssist + device collections from OpenManage Enterprise and OpenManage Enterprise Modular. + modules: + - description: Rename sled slots on OpenManage Enterprise Modular + name: ome_chassis_slots + namespace: '' + - description: Export technical support logs(TSR) to network share location + name: ome_diagnostics + namespace: '' + release_date: '2021-07-28' + 4.0.0: + changes: + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though this + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + release_summary: Support for configuring active directory user group on OpenManage Enterprise and OpenManage Enterprise Modular. + modules: + - description: Configure Active Directory groups to be used with Directory Services + on OpenManage Enterprise and OpenManage Enterprise Modular + name: ome_active_directory + namespace: '' + - description: Create, modify, or delete an Active Directory user group on OpenManage + Enterprise and OpenManage Enterprise Modular + name: ome_domain_user_groups + namespace: '' + release_date: '2021-08-27' + 4.1.0: + changes: + major_changes: + - ome_firmware - Added option to stage the firmware update and support for selecting components and devices for baseline-based firmware update. + minor_changes: + - ome_template_network_vlan - Enabled check_mode support. + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though it + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + release_summary: Support for Redfish event subscriptions and enhancements to ome_firmware module. + modules: + - description: Manage Redfish Subscriptions + name: redfish_event_subscription + namespace: '' + release_date: '2021-09-28' + 4.2.0: + changes: + known_issues: + - idrac_user - Issue(192043) Module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does + not allow the creation of multiple uplinks of the same name even though it + is supported by OpenManage Enterprise Modular. If an uplink is created using + the same name as an existing uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) The ome_device_power_settings module + errors out with the following message if the value provided for the + parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to + complete the request because PowerCap does not exist or is not applicable + for the resource URI.`` + release_summary: Support to configure OME Modular devices network, power, and location settings. + modules: + - description: Configure network settings of devices on OpenManage Enterprise Modular + name: ome_device_mgmt_network + namespace: '' + - description: Configure device location settings on OpenManage Enterprise Modular + name: ome_device_location + namespace: '' + - description: Configure chassis power settings on OpenManage Enterprise Modular + name: ome_device_power_settings + namespace: '' + release_date: '2021-10-27' + 4.3.0: + changes: + known_issues: + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module errors out with the + following message if the value provided for the parameter ``power_cap`` is + not within the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support to configure network services, syslog forwarding, and SMTP settings. + modules: + - description: Configure chassis network services settings on OpenManage Enterprise Modular + name: ome_device_network_services + namespace: '' + - description: This module allows to configure SMTP or email configurations + name: ome_application_alerts_smtp + namespace: '' + - description: Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular + name: ome_application_alerts_syslog + namespace: '' + release_date: '2021-11-26' + 4.4.0: + changes: + bugfixes: + - ome_device_location - The issue that applies values of the location settings only in lowercase is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/341) + minor_changes: + - ome_firmware - The module is enhanced to support check mode and idempotency (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/274) + - ome_template - An example task is added to create a compliance template from reference device (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/339) + known_issues: + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module errors out with the + following message if the value provided for the parameter ``power_cap`` is + not within the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support to configure login security, session inactivity timeout, and local access settings. + modules: + - description: Configure the login security properties + name: ome_application_security_settings + namespace: '' + - description: This module allows you to configure the session inactivity timeout settings + name: ome_application_network_settings + namespace: '' + - description: Configure local access settings on OpenManage Enterprise Modular + name: ome_device_local_access_configuration + namespace: '' + release_date: '2021-12-24' + 5.0.0: + changes: + major_changes: + - All modules now support SSL over HTTPS and socket level timeout. + breaking_changes: + - HTTPS SSL certificate validation is a **breaking change** and will require modification in the + existing playbooks. Please refer to `SSL Certificate Validation `_ section in the `README.md `_ for modification to existing playbooks. + bugfixes: + - idrac_bios - The issue while configuring boot sources is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/347) + known_issues: + - ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message + if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module errors out with the + following message if the value provided for the parameter ``power_cap`` is + not within the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: HTTPS SSL support for all modules and quick deploy settings. + modules: + - description: Configure Quick Deploy settings on OpenManage Enterprise Modular + name: ome_device_quick_deploy + namespace: '' + release_date: '2022-01-27' + 5.0.1: + changes: + major_changes: + - All modules can read custom or organizational CA signed certificate from the environment variables. + Please refer to `SSL Certificate Validation `_ section in the `README.md `_ for modification to existing playbooks or setting environment variable. + bugfixes: + - The ome_application_network_time and ome_application_network_proxy modules are breaking due + to the changes introduced for SSL validation.(https://github.com/dell/dellemc-openmanage-ansible-modules/issues/360) + - All playbooks require modification because the validate_certs argument is set to True by default + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/357) + known_issues: + - ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message + if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module errors out with the + following message if the value provided for the parameter ``power_cap`` is + not within the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support to provide custom or organizational CA signed certificate for SSL validation from the environment variable. + release_date: '2022-02-11' + 5.1.0: + changes: + bugfixes: + - idrac_firmware - Issue (220130) The socket.timout issue that occurs + during the wait_for_job_completion() job is fixed. + minor_changes: + - ome_identity_pool - The module is enhanced to support check mode and + idempotency. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/328) + - ome_template_identity_pool - The module is enhanced to support check + mode and idempotency. + - ome_application_network_address - The module is enhanced to support + check mode and idempotency. + - redfish_event_subscription - The module is enhanced to support check + mode and idempotency. + - ome_identity_pool - The iSCSI Initiator and Initiator IP Pool + attributes are not mandatory to create an identity pool. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/329) + - ome_device_info - The module is enhanced to return a blank list when + devices or baselines are not present in the system. + - ome_firmware_baseline_compliance_info - The module is enhanced to + return a blank list when devices or baselines are not present in the system. + - ome_firmware_baseline_info - The module is enhanced to return a blank + list when devices or baselines are not present in the system. + known_issues: + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module errors out with the + following message if the value provided for the parameter ``power_cap`` is + not within the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support for OpenManage Enterprise Modular server interface management. + modules: + - description: Configures server interface profiles on OpenManage Enterprise Modular. + name: ome_server_interface_profiles + namespace: '' + - description: Retrieves the information of server interface profile on OpenManage Enterprise Modular. + name: ome_server_interface_profile_info + namespace: '' + release_date: '2022-02-24' + 5.2.0: + changes: + minor_changes: + - ome_template - The module is enhanced to support check mode and + idempotency. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/255) + - ome_template - The module is enhanced to support modifying a template + based on the attribute names instead of the ID. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/358) + - ome_profile - The module is enhanced to support check mode and + idempotency. + - ome_profile - The module is enhanced to support modifying a profile + based on the attribute names instead of the ID. + - ome_diagnostics - The module is enhanced to support check mode and + idempotency. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/345) + - ome_diagnostics - This module is enhanced to extract log from + lead chassis. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/310) + - idrac_redfish_storage_controller - This module is enhanced to support + the following settings with check mode and idempotency - UnassignSpare, + EnableControllerEncryption, BlinkTarget, UnBlinkTarget, ConvertToRAID, + ConvertToNonRAID, ChangePDStateToOnline, ChangePDStateToOffline. + known_issues: + - ome_application_console_preferences - Issue(224690) - The module does + not display a proper error message when an unsupported value is provided + for the parameters report_row_limit, email_sender_settings, and + metric_collection_settings, and the value is applied on OpenManage Enterprise. + - ome_device_quick_deploy - Issue(216352) - The module does not display a + proper error message if an unsupported value is provided for the + ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support to configure console preferences on OpenManage Enterprise. + modules: + - description: Configures console preferences on OpenManage Enterprise. + name: ome_application_console_preferences + namespace: '' + release_date: '2022-03-29' + 5.3.0: + changes: + minor_changes: + - redfish_storage_volume - The module is enhanced to support check mode and idempotency. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/245) + - ome_smart_fabric_uplink - The module is enhanced to support idempotency. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/253) + - ome_diagnostics - The module is enhanced to support debug logs. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/308) + - ome_diagnostics - Added "supportassist_collection" as a choice for the log_type argument + to export SupportAssist logs. + (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/309) + known_issues: + - ome_application_console_preferences - Issue(224690) - The module does + not display a proper error message when an unsupported value is provided + for the parameters report_row_limit, email_sender_settings, and + metric_collection_settings, and the value is applied on OpenManage Enterprise. + - ome_device_quick_deploy - Issue(216352) - The module does not display a + proper error message if an unsupported value is provided for the + ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Added check mode and idempotency support for redfish_storage_volume + and idempotency support for ome_smart_fabric_uplink. For ome_diagnostics, added + support for debug logs and added supportassist_collection as a choice for the log_type + argument to export SupportAssist logs. + release_date: '2022-04-26' + 5.4.0: + changes: + major_changes: + - idrac_server_config_profile - The module is enhanced to support export, + import, and preview the SCP configuration using Redfish and added support + for check mode. + known_issues: + - ome_application_console_preferences - Issue(224690) - The module does + not display a proper error message when an unsupported value is provided + for the parameters report_row_limit, email_sender_settings, and + metric_collection_settings, and the value is applied on OpenManage Enterprise. + - ome_device_quick_deploy - Issue(216352) - The module does not display a + proper error message if an unsupported value is provided for the + ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support for export, import, and preview the Server Configuration + Profile (SCP) configuration using Redfish and added support for check mode. + release_date: '2022-05-26' + 5.5.0: + changes: + minor_changes: + - redfish_firmware - This module is updated to use the Job Service URL instead of + Task Service URL for job tracking. + - idrac_redfish_storage_controller - This module is updated to use the Job Service URL + instead of Task Service URL for job tracking. + - idrac_server_config_profile - This module is updated to use the Job Service URL + instead of Task Service URL for job tracking. + bugfixes: + - ome_application_console_preferences - Issue(224690) - The module does + not display a proper error message when an unsupported value is provided + for the parameters report_row_limit, email_sender_settings, and + metric_collection_settings, and the value is applied on OpenManage Enterprise + - idrac_server_config_profile - Issue(234817) – When an XML format is exported + using the SCP, the module breaks while waiting for the job completion. + known_issues: + - ome_device_quick_deploy - Issue(216352) - The module does not display a + proper error message if an unsupported value is provided for the + ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support to generate certificate signing request, import, and export + certificates on iDRAC. + modules: + - description: Configure certificates for iDRAC. + name: idrac_certificates + namespace: '' + release_date: '2022-06-29' + 6.0.0: + changes: + major_changes: + - The share parameters are deprecated from the following modules - idrac_network, + idrac_timezone_ntp, dellemc_configure_idrac_eventing, dellemc_configure_idrac_services, + dellemc_idrac_lc_attributes, dellemc_system_lockdown_mode. + - Added collection metadata for creating execution environments. + - Refactored the Markdown (MD) files and content for better readability. + known_issues: + - ome_device_quick_deploy - Issue(216352) - The module does not display a + proper error message if an unsupported value is provided for the + ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Added collection metadata for creating execution environments, + deprecation of share parameters, and support for configuring iDRAC attributes + using idrac_attributes module. + modules: + - description: Configure the iDRAC attributes + name: idrac_attributes + namespace: '' + release_date: '2022-07-28' + 6.1.0: + changes: + major_changes: + - ome_devices - Support for performing device-specific operations on OpenManage Enterprise. + - idrac_boot - Support for configuring the boot settings on iDRAC. + - ome_device_group - The module is enhanced to support the removal of devices from a static device group. + minor_changes: + - ome_configuration_compliance_info - The module is enhanced to report single device compliance information. + known_issues: + - ome_device_quick_deploy - Issue(216352) - The module does not display a + proper error message if an unsupported value is provided for the + ipv6_prefix_length and vlan_id parameters. + - ome_device_local_access_configuration - Issue(217865) - The module does not + display a proper error message if an unsupported value is provided for the + user_defined and lcd_language parameters. + - ome_device_local_access_configuration - Issue(215035) - The module reports + ``Successfully updated the local access setting`` if an unsupported value is + provided for the parameter timeout_limit. However, this value is not + actually applied on OpenManage Enterprise Modular. + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - ome_application_alerts_smtp - Issue(212310) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support for device-specific operations on OpenManage Enterprise and configuring boot settings on iDRAC. + modules: + - description: Perform device-specific operations on target devices + name: ome_devices + namespace: '' + - description: Configure the boot order settings. + name: idrac_boot + namespace: '' + release_date: '2022-08-26' + 6.2.0: + changes: + major_changes: + - idrac_bios - The module is enhanced to support clear pending BIOS attributes, + reset BIOS to default settings, and configure BIOS attribute using Redfish. + known_issues: + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Added clear pending BIOS attributes, reset BIOS to default settings, + and configure BIOS attribute using Redfish enhancements for idrac_bios. + release_date: '2022-09-28' + 6.3.0: + changes: + major_changes: + - idrac_virtual_media - This module allows to configure Remote File Share settings. + - idrac_redfish_storage_controller - This module is enhanced to support + LockVirtualDisk operation. + known_issues: + - ome_device_network_services - Issue(212681) - The module does not provide a + proper error message if unsupported values are provided for the parameters- + port_number, community_name, max_sessions, max_auth_retries, and idle_timeout. + - ome_application_alerts_syslog - Issue(215374) - The module does not provide a + proper error message if the destination_address is more than 255 characters. + - idrac_user - Issue(192043) The module may error out with the message ``unable + to perform the import or export operation because there are pending attribute + changes or a configuration job is in progress``. Wait for the job to complete + and run the task again. + - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation + of multiple uplinks of the same name even though it is supported by OpenManage + Enterprise Modular. If an uplink is created using the same name as an existing + uplink, the existing uplink is modified. + - ome_device_power_settings - Issue(212679) - The module displays the following + message if the value provided for the parameter ``power_cap`` is not within + the supported range of 0 to 32767, ``Unable to complete the request + because PowerCap does not exist or is not applicable for the resource URI.`` + release_summary: Support for LockVirtualDisk operation and to configure Remote File + Share settings using idrac_virtual_media module. + modules: + - description: Configure the virtual media settings. + name: idrac_virtual_media + namespace: '' + release_date: '2022-10-28' + diff --git a/ansible_collections/dellemc/openmanage/changelogs/config.yaml b/ansible_collections/dellemc/openmanage/changelogs/config.yaml new file mode 100644 index 00000000..cfc04bfa --- /dev/null +++ b/ansible_collections/dellemc/openmanage/changelogs/config.yaml @@ -0,0 +1,31 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Dell EMC OpenManage Ansible Modules +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/dellemc/openmanage/docs/ADDITIONAL_INFORMATION.md b/ansible_collections/dellemc/openmanage/docs/ADDITIONAL_INFORMATION.md new file mode 100644 index 00000000..b2c8b47a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/ADDITIONAL_INFORMATION.md @@ -0,0 +1,20 @@ + +# Additional Information + +## Release cadence +* OpenManage Ansible Modules releases follows a monthly release cycle. On the last week of every month, + the updated modules are posted to this repository. + +## Versioning +* This product releases follow [semantic versioning](https://semver.org/). + +## Deprecation +* OpenManage Ansible Modules deprecation cycle is aligned with [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html). diff --git a/ansible_collections/dellemc/openmanage/docs/BRANCHING.md b/ansible_collections/dellemc/openmanage/docs/BRANCHING.md new file mode 100644 index 00000000..5e6d554b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/BRANCHING.md @@ -0,0 +1,10 @@ + +TBD \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/docs/CODE_OF_CONDUCT.md b/ansible_collections/dellemc/openmanage/docs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..45d257b2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[INSERT CONTACT METHOD]. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/ansible_collections/dellemc/openmanage/docs/COMMITTER_GUIDE.md b/ansible_collections/dellemc/openmanage/docs/COMMITTER_GUIDE.md new file mode 100644 index 00000000..e04756b8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/COMMITTER_GUIDE.md @@ -0,0 +1,41 @@ + + +# Committer Guidelines + +These are the guidelines for people with commit privileges on the GitHub repository. Committers act as members of the Core Team and not necessarily employees of Dell. + +These guidelines apply to everyone and as Committers you have been given access to commit changes because you exhibit good judgment and have demonstrated your commitment to the vision of the project. We trust that you will use these privileges wisely and not abuse it. + +If these privileges are abused in any way and the quality of the project is compromised, our trust will be diminished and you may be asked to not commit or lose these privileges all together. + +## General Rules + +### Don't + +* Break the build. +* Commit directly. +* Compromise backward compatibility. +* Disrespect your Community Team members. Help them grow. +* Think it is someone elses job to test your code. Write tests for all the code you produce. +* Forget to keep thing simple. +* Create technical debt. Fix-in-place and make it the highest priority above everything else. + +### Do + +* Always follow the defined coding guideline +* Keep the design of your software clean and maintainable. +* Squash your commits, avoid merges. +* Write tests for all your deliverables. +* Automate everything. +* Maintain a high code coverage, equal to or greater than 90%. +* Keep an open communication with other Committers. +* Ask questions. +* Document your contributions and remember to keep it simple. diff --git a/ansible_collections/dellemc/openmanage/docs/CONTRIBUTING.md b/ansible_collections/dellemc/openmanage/docs/CONTRIBUTING.md new file mode 100644 index 00000000..3b1a1fdb --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/CONTRIBUTING.md @@ -0,0 +1,197 @@ + + +# How to Contribute + +Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](CODE_OF_CONDUCT.md). + +## Table of Contents + +* [Become a contributor](#Become-a-contributor) +* [Contributor agreement](#Contributor-agreement) +* [Submitting issues](#Submitting-issues) +* [Triage issues](#Triage-issues) +* [Your first contribution](#Your-first-contribution) +* [Branching](#Branching) +* [Signing your commits](#Signing-your-commits) +* [Pull requests](#Pull-requests) +* [Code reviews](#Code-reviews) +* [Code Style](#Code-style) + +## Become a contributor + +You can contribute to this project in several ways. Here are some examples: + +* Contribute to the project documentation and codebase. +* Report and triage bugs. +* Feature requests +* Write technical documentation and blog posts, for users and contributors. +* Help others by answering questions about this project. + +## Contributor Agreement +All contributions shall be made under the Developer Certification of Origin ("DCO") (see http://elinux.org/Developer_Certificate_Of_Origin) which is reproduced below. Specifically, the Git commit message for the contribution should contain the following tag information signifying use of the DCO: + + +"Signed-off-by: [Your Name] [youremail@company.com]" + + +________________________________________ +### Developer's Certificate of Origin 1.1 +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or + +(b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or + +(c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. + +(d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. + +## Submitting issues + +All issues related to the associated Ansible modules, roles, playbooks, regardless of the service/repository the issue belongs to (see table above), should be submitted [here](https://github.com/dell/dellemc-openmanage-ansible-modules/issues). Issues will be triaged and labels will be used to indicate the type of issue. This section outlines the types of issues that can be submitted. + +### Report bugs + +We aim to track and document everything related to the repository via the Issues page. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process. + +Before submitting a new issue, make sure someone hasn't already reported the problem. Look through the [existing issues](https://github.com/dell/dellemc-openmanage-ansible-modules/issues) for similar issues. + +Report a bug by submitting a [bug report](https://github.com/dell/dellemc-openmanage-ansible-modules/issues/new?labels=type%2Fbug%2C+needs-triage&template=bug_report.md&title=%5BBUG%5D%3A). Make sure that you provide as much information as possible on how to reproduce the bug. + +When opening a Bug please include the following information to help with debugging: + +1. Version of relevant software: this software, Python version, Dell Server/Storage Platform, etc. +2. Details of the issue explaining the problem: what, when, where +3. The expected outcome that was not met (if any) +4. Supporting troubleshooting information. __Note: Do not provide private company information that could compromise your company's security.__ + +An Issue __must__ be created before submitting any pull request. Any pull request that is created should be linked to an Issue. + +### Feature request + +If you have an idea of how to improve this project, submit a [feature request](https://github.com/dell/dellemc-openmanage-ansible-modules/issues/new?labels=type%2Ffeature-request%2C+needs-triage&template=feature_request.md&title=%5BFEATURE%5D%3A). + +### Answering questions + +If you have a question and you can't find the answer in the documentation or issues, the next step is to submit a [question.](https://github.com/dell/dellemc-openmanage-ansible-modules/issues/new?labels=type%2Fquestion&template=ask-a-question.md&title=%5BQUESTION%5D%3A) + +We'd love your help answering questions being asked by other CSM users. + +## Triage issues + +Triage helps ensure that issues resolve quickly by: + +* Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +* Giving a contributor the information they need before they commit to resolving an issue. +* Lowering the issue count by preventing duplicate issues. +* Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with _issue triage_. The Dell Ansible community will thank you for saving them time by spending some of yours. + +Read more about the ways you can [Triage issues](ISSUE_TRIAGE.md). + +## Your first contribution + +Unsure where to begin contributing? Start by browsing issues labeled `beginner friendly` or `help wanted`. + +* [Beginner-friendly](https://github.com/dell/dellemc-openmanage-ansible-modules/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22) issues are generally straightforward to complete. +* [Help wanted](https://github.com/dell/dellemc-openmanage-ansible-modules/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) issues are problems we would like the community to help us with regardless of complexity. + +When you're ready to contribute, it's time to create a pull request. + +## Testing +See [here](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/tests/README.md) for further information on testing. + +## Debugging +To debug OpenManage Ansible Modules using IDE, see [here](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/docs/DEBUG.md) + +## Branching + +* [Branching Strategy](BRANCHING.md) + +## Signing your commits + +We require that developers sign off their commits to certify that they have permission to contribute the code in a pull request. This way of certifying is commonly known as the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). We encourage all contributors to read the DCO text before signing a commit and making contributions. + +GitHub will prevent a pull request from being merged if there are any unsigned commits. + +### Signing a commit + +GPG (GNU Privacy Guard) will be used to sign commits. Follow the instructions [here](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-commits) to create a GPG key and configure your GitHub account to use that key. + +Make sure you have your user name and e-mail set. This will be required for your signed commit to be properly verified. Check the following references: + +* Setting up your github user name [reference](https://help.github.com/articles/setting-your-username-in-git/) +* Setting up your e-mail address [reference](https://help.github.com/articles/setting-your-commit-email-address-in-git/) + +Once Git and your GitHub account have been properly configured, you can add the -S flag to the git commits: + +```console +$ git commit -S -m your commit message +# Creates a signed commit +``` + +### Commit message format + +This repository uses the guidelines for commit messages outlined in [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/) + +## Pull Requests + +If this is your first time contributing to an open-source project on GitHub, make sure you read about [Creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it. + +To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines: + +* Title and description matches the implementation. +* Commits within the pull request follow the formatting guidelines. +* The pull request closes one related issue. +* The pull request contains necessary tests that verify the intended behavior. +* If your pull request has conflicts, rebase your branch onto the main branch. + +If the pull request fixes a bug: + +* The pull request description must include `Fixes #`. +* To avoid regressions, the pull request should include tests that replicate the fixed bug. + +The owning team _squashes_ all commits into one when we accept a pull request. The title of the pull request becomes the subject line of the squashed commit message. We still encourage contributors to write informative commit messages, as they becomes a part of the Git commit body. + +We use the pull request title when we generate change logs for releases. As such, we strive to make the title as informative as possible. + +Make sure that the title for your pull request uses the same format as the subject line in the commit message. + +### Quality Gates for pull requests + +GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the code repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](SUPPORT.md). + +#### Code build/test/coverage + +[GitHub action](https://github.com/dell/dellemc-openmanage-ansible-modules/actions) that runs unit tests and checks that the code coverage of each package meets a configured threshold (currently 90%). An error is flagged if a given pull request does not meet the test coverage threshold and blocks the pull request from being merged. + +## Code Reviews + +All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. + +A pull request must satisfy following for it to be merged: + +* A pull request will require at least 2 maintainer approvals. +* Maintainers must perform a review to ensure the changes adhere to guidelines laid out in this document. +* If any commits are made after the PR has been approved, the PR approval will automatically be removed and the above process must happen again. + +## Code Style + +For the Python code in the repository, we expect the code styling outlined in [Ansible python guide]( https://docs.ansible.com/ansible/latest/dev_guide/developing_python_3.html). In addition to this, we have the following supplements: +* Contributions should adhere to ansible Coding standard guidelines as we follow these standards. +* Should include [test](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/tests/) scripts for your changes. +* Do not submit a contribution request on our deprecated modules. They are just meant for backward compatibility. + +### Handle Errors +TBD diff --git a/ansible_collections/dellemc/openmanage/docs/DEBUG.md b/ansible_collections/dellemc/openmanage/docs/DEBUG.md new file mode 100644 index 00000000..c433d1f9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/DEBUG.md @@ -0,0 +1,58 @@ + +# How to Perform Debugging + +The following steps enables you to debug OpenManage Ansible Modules from an IDE either from local Linux or from a remote debugger on Windows. + +1. Install OpenManage Ansible Modules from Galaxy `ansible-galaxy collection install dellemc.openmanage`. On Ubuntu this defaults to `$HOME/.ansible/collections/ansible_collections/dellemc/openmanage/` The problem with this is that this location is not in PYTHONPATH which will cause problems with the debugger. +2. To resolve python path issues, move the `openmanage ` collection to align with the rest of Dell's code which is in PYTHONPATH with `sudo mv $HOME/.ansible/collections/ansible_collections/dellemc/openmanage/ /usr/local/lib/python3.X/dist-packages/ansible_collections/dellemc/`. The path may be different on your system, but it should be placed with your other python packages. + Alternatively, you can add the directory `$HOME/.ansible/collections/ansible_collections/dellemc/openmanage/` to PYTHONPATH with `export PYTHONPATH=$PYTHONPATH:$HOME/.ansible/collections`. + 1. The location may be different for but the key is `openmanage` must be accessible within the `ansible_collections.dellemc` namespace. That is to say, the path should look like `/ansible_collections/dellemc/openmanage` + 2. Sometimes, `from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError` still reports an error. This error can be ignored safely. + 3. Some IDEs may require a restart in order to rescan the available packages for import statements to resolve. +3. Create a file with any name. We will use `args.json`. Fill it with the arguments you wish to provide to the module: + + { + "ANSIBLE_MODULE_ARGS": { + "idrac_ip": "192.168.1.63", + "idrac_user": "root", + "idrac_password": "password", + "share_name": "some_share", + "share_user": "some_username", + "share_password": "some_password" + } + } + +For more information about injecting arguments , see the Ansible [docs](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_general.html#exercising-module-code-locally). + +4. Execute one of the modules by running `python -m ansible.modules. /tmp/args.json`. + +How to run this in an IDE is described below. + +## PyCharm + +### On Remote Windows + +The following steps helps to run and debug OpenManage Ansible Modules installed on the Linux VM using a remote debugger configured on Windows PyCharm IDE. + +1. Download a copy of [the code](https://github.com/dell/dellemc-openmanage-ansible-modules) and open the folder with PyCharm. +2. Go to File->Settings->Project:->Python Interpreter +3. Click the gear and then click add +4. Use `SSH Interpreter` and then add the Linux box mentioned above or another remote target of your choice. + +### On Local Linux + +1. You will need to configure the IDE to use the `args.json` file you created above. In PyCharm do this by going to Run-Edit Configurations. In `Parameters` add `\args.json`. This will pass the JSON file as an argument to the module when it runs. You should now be able to debug the module directly. +2. It is also possible to pass the arguments within the Python script itself by updating the `main` function with: + + basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {"idrac_ip": "192.168.0.1", "idrac_user": "username", "idrac_password": "password"}})) + set_module_args(args) + +3. If you would like to set PYTHONPATH with PyCharm you can do that by going to Run->Edit Configurations->Environment Variables and add 'PYTHONPATH=$PYTHONPATH:$HOME/.ansible/collections/'. diff --git a/ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md b/ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md new file mode 100644 index 00000000..b89afba7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md @@ -0,0 +1,40 @@ + +## Playbooks and Tutorials +* For the latest sample playbooks and examples, see [playbooks](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/playbooks). +* For the tutorials and sample use cases, see the tutorials available at [developer.dell.com](https://developer.dell.com/). + +## Module documentations +- For the OpenManage Ansible collection documentation, see [Documentation](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/docs). This documentation page is updated for every major and minor (patch release) and has the latest collection documentation. +- OpenManage Ansible collection is an Ansible certified collection and also available as part of the Ansible Community Releases version v3.0.0 and later. Consequently, the documentation can also be accessed at [Ansible Collection Documentation](https://docs.ansible.com/ansible/latest/collections/dellemc/openmanage/index.html#plugins-in-dellemc-openmanage). +> **_NOTE_**: There might be a scenario where the documentation available at [Ansible Collection Documentation](https://docs.ansible.com/ansible/latest/collections/dellemc/openmanage/index.html#plugins-in-dellemc-openmanage) is not the latest version. And, this is due to differences in the release timelines for Ansible community release and OpenManage Ansible collection. +- To view the documentation for a module, use the command ```ansible-doc```. For example, + ```$ ansible-doc dellemc.openmanage.``` + +## SSL Certificate Validation +**By default, SSL certificate validation is enabled in all modules to enforce secure communication.** + +### Enable SSL certificate validation + - Generate and upload the custom or organizational CA signed certificates on the iDRACs, OpenManage Enterprise, and OpenManage Enterprise-Modular, as required. + - For iDRAC, see the section `SSL server certificates` in the `Integrated Dell Remote Access Controller Users Guide`. + - For OpenManage Enterprise, see the section `Security Certificates` in the `OpenManage Enterprise Users Guide`. + - For OpenManage Enterprise Modular, see the section `Managing certificates` in the `OpenManage Enterprise Modular for PowerEdge MX7000 Chassis Users Guide`. + - After you have uploaded the custom or organizational CA signed certificate to iDRAC or OME or OME-M, you must have the CA file or bundle available on your Ansible controller. For example, copy the CA file or bundle in the following path: /usr/share/ssl-certs/ + > **_NOTE_**: Ensure that the user running the Ansible modules has permission to access the certificate file or bundle. + - You can then use either of the following methods to specify the custom or organization CA certificate file or bundle path to the module: + - In your playbook tasks, set the `ca_path` argument to the file path of your custom or organization CA certificate file or bundle. + ```ca_path: /usr/share/ssl-certs/ca-cert.pem``` + - Use any of the following environment variables to specify the custom or organization CA certificate file or bundle path. The modules reads the environment variable in the following order of preference: ```REQUESTS_CA_BUNDLE```, ```CURL_CA_BUNDLE```, ```OMAM_CA_BUNDLE```. + > **_NOTE_**: Use the following command to set the environment variable with the custom or organization CA certificate file or bundle: + ```export REQUESTS_CA_BUNDLE=/usr/share/ssl-certs/ca-cert.pem``` + +### Ignore SSL certificate validation +It is common to run a test environment without a proper SSL certificate configuration. To disable the certificate validation for a module, set the validate_certs module argument to ```False``` in the playbook. + diff --git a/ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md b/ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md new file mode 100644 index 00000000..a688c5c2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md @@ -0,0 +1,347 @@ +# Build execution environment with Dell OpenManage Ansible Modules +Creating automation execution environments using the OpenManage Ansible Modules enables your automation teams to define, build, and update their automation environment themselves. Execution environments provide a common language to communicate automation dependency between automation developers, architects, and platform administrators. + +In this tutorial, you will learn how to build the execution environment image, push the image to a registry, and then create the execution environment in Ansible Automation Platform. + +## Why Ansible Automation Platform over Galaxy + +While Ansible Galaxy is good for testing the latest and greatest developer content, it is difficult to find the author who uploaded the content and if the content is supported. Whereas Ansible Automation Platform has bundles of modules, plugins, roles, and documentation from Red Hat. The Ansible Automation Platform provides the following benefits: + +- Red Hat Certified content. +- The content can be directly used in your own Ansible playbooks. +- Private Ansible Automation hub can be used within the organization to publish and collaborate. +- Premium support enables you to get help directly from Red Hat if you have any issue with an official Red Hat collection or certified partner collection. +- Red Hat subscription provides free and unlimited access to any content available. + +## Why AWX +Ansible AWX provides an open-source version of Ansible Automation Platform and is the foundation on which Ansible Automation Platform was developed. With Ansible AWX, you have all the enterprise features for an unlimited number of nodes. However, one drawback to note is that Ansible AWX undergoes minimal testing and quality engineering testing. + +## Workflow +In this tutorial, you will learn how to: +1. [Build custom execution environment image.](#build-custom-execution-environment-image) +2. [Use Ansible Runner to verify the execution environment (Optional).](#use-ansible-runner-to-verify-the-execution-environment) +3. [Upload the execution environment to a registry.](#upload-the-execution-environment-to-a-registry) +4. [Create execution environment in Ansible Automation Platform.](#create-execution-environment-in-ansible-automation-platform) + +## Build custom execution environment image +Build a custom image with the required OpenManage Ansible collections ([dellemc.openmanage](https://github.com/dell/dellemc-openmanage-ansible-modules) ) and libraries (omsdk and netaddr), and then upload it to a registry of your choice. In this tutorial, you will learn how to create a Docker image. + +1. Create the following files in your local directory: + - *execution_environment.yml* + - *requirement.yml* + - *requirements.txt* +2. For installing OpenManage collections and their dependencies, copy the metadata from the [dellemc.openmanage](https://github.com/dell/dellemc-openmanage-ansible-modules) GitHub repository. + +The following are the sample files: + +**execution_environment.yml** + +```yaml +version: 1 +dependencies: + galaxy: requirements.yml + python: requirements.txt +``` + +**requirement.yml** +```yaml +collections: + - name: dellemc.openmanage +``` + +**requirements.txt** +```yaml +omsdk +netaddr>=0.7.19 +``` + +3. Build the Docker image using the following syntax: + +`ansible-builder build -f/execution-environment.yml --container-runtime= -c build_context --tag //:` + + In this tutorial, the following command is used to build the Docker image with the name "*execution_environment*". + +```yaml +ansible-builder build -f execution-environment.yml --container-runtime=docker -c build_context --tag docker.io/delluser/execution_environment: +docker build -f context/Dockerfile -t docker.io/delluser/execution_environment context +Complete! The build context can be found at: /context +``` + +## Use Ansible Runner to verify the execution environment + +**Note:** Using Ansible Runner to verify the execution environment is an optional step. + +**Prerequisite** + +Ensure to install Ansible Runner. For details on how to install Ansible Runner, see [https://ansible-runner.readthedocs.io/en/stable/install/](https://ansible-runner.readthedocs.io/en/stable/install/). + +To verify the image using the Ansible Runner, do the following: + +1. Create a folder structure as shown below: + +```yaml +runner-example/ +├── inventory +│ └── hosts +└── project + └── testplaybook.yml +``` + +2. Create a host file with the following entries: + +```yaml +[idrac] +192.168.0.1 + +[idrac:vars] +ansible_python_interpreter=/usr/bin/python3.8 +user=user +password=password +``` +3. Create a playbook. + +```yaml +- hosts: idrac + connection: local + name: Get system inventory + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Get system inventory. + idrac_system_info: + idrac_ip: "{{ inventory_hostname }}" + idrac_user: "{{ user }}" + idrac_password: "{{ password }}" + validate_certs: False + +``` +4. Run the playbook using the following command: + +```yaml +ansible-runner run --process-isolation --process-isolation-executable docker --container-image docker.io/delluser/execution_environment -p sysinfo.yml ./runner-example/ -v +No config file found; using defaults + +PLAY [Get system inventory] **************************************************** + +TASK [Get system inventory.] *************************************************** + +ok: [192.168.0.1] => { ..sysdetails..} +META: ran handlers +META: ran handlers + +PLAY RECAP ********************************************************************* +192.168.0.1 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` +After the execution, a complete trace of execution information is saved in a directory structure as shown below: + +```yaml +# tree runner-example/ +runner-example/ +├── artifacts +│ └── 53810baa-15de-4dd6-93a1-35a28eb89070 +│ ├── ansible_version.txt +│ ├── collections.json +│ ├── command +│ ├── env.list +│ ├── fact_cache +│ ├── job_events +│ │ ├── 1-592da7d5-b64f-4121-a91f-b33f28f6b0da.json +│ │ ├── 2-0242ac11-0007-b479-84c9-000000000006.json +│ │ ├── 3-0242ac11-0007-b479-84c9-000000000008.json +│ │ ├── 4-6d132edf-994c-4bf4-b9b2-dd6fa6ba834f.json +│ │ ├── 5-22b7e7a4-5244-4d3c-bbb7-395980feaee1.json +│ │ └── 6-c7e089be-6494-4b6e-8379-cf435e108aa6.json +│ ├── rc +│ ├── status +│ ├── stderr +│ └── stdout +├── inventory +│ └── hosts +└── project +└── sysinfo.yml +``` +## Upload the execution environment to a registry + +Now that you have built the image, you can upload the execution environment image to a registry. The following steps describe how to upload the image to a Docker registry. You can upload the image to a registry of your choice (https://quay.io or https://docker.io). + +1. Log in to docker.io. +```yaml +docker login docker.io +``` +2. To view the list of images, run the following command: + +```yaml +docker image list +``` +Output: + +```yaml +REPOSITORY TAG IMAGE ID CREATED SIZE +docker.io/delluser/execution_environment latest 6ea6337881f5 36 seconds ago 908MB + bab8f0c1f372 3 hours ago 959MB + 26e61b6f31b6 3 hours ago 779MB +``` +3. Upload the image to the repository using the following command: + +```yaml +docker push docker.io/delluser/execution_environment +``` +Output: + +```yaml +Using default tag: latest +The push refers to repository [docker.io/delluser/execution_environment] +6a938007b4eb: Pushed +c1a7a8b69adb: Pushed +75f55eeed6f1: Pushed +7da4273e9d6b: Pushed +d8672b46fe52: Layer already exists +daf6e68722b8: Layer already exists +e258e2d51ae2: Layer already exists +134616f736b1: Layer already exists +34ac022ee9b6: Layer already exists +e7423a18eff2: Layer already exists +4d851e75ba42: Layer already exists +38adeed967d9: Layer already exists +78fc855ac59c: Layer already exists +d0f9b1e225dd: Layer already exists +5d4daec00137: Layer already exists +dd423f7aa20e: Layer already exists +1ce7e8b08eb8: Layer already exists +5fa5c1c78a8e: Layer already exists +e0808177f5c4: Layer already exists +aadc47c09f66: Layer already exists +101e6c349551: Layer already exists +latest: digest: sha256:7be5110235abf72e0547cac016a506d59313addefc445d35e5dff68edb0a9ad6 size: 4726 + 26e61b6f31b6 3 hours ago 779MB + +``` + +## Create execution environment in Ansible Automation Platform +Now that you uploaded the image to a registry, you can now create the execution environment in Ansible Automation Platform. + +### Add execution environment + +1. Log in to Ansible Automation Platform. +2. On the navigation pane, click **Administration > Execution Environments**. +2. On the **Execution Environments** page, click **Add**. +3. On the **Create new execution environment** page, enter the following details, and click **Save**. + - **Name**: Enter a name for the execution environment (required). + - **Image**: Enter the image name (required). The image name requires its full location (repo), the registry, image name, and version tag + - **Pull**: From the **Pull** drop-down list, select **Only pull the image if not present before running**. + - **Description**: optional. + - **Organization**: optionally assign the organization to specifically use this execution environment. To make the execution environment available for use across multiple organizations, leave this field blank. + - **Registry credential**: If the image has a protected container registry, provide the credential to access it. + + +### Create Projects + +A Project is a logical collection of Ansible playbooks. + +1. On the navigation pane, click **Resources > Projects**. +2. On the **Projects** page, click **Add**. +3. On the **Create New Project** page, do the following, and click **Save**. + - From the **Source Control Credential Type** drop-down list, select the source control type. For example, you can select "GIT". + - In the **Source Control URL**, specify the source control URL. That is your repository link. + +### Create Credential Types +This tutorial uses a custom credential type. You can create credential types depending on your data center environment. For more information, see [Credential Types](https://docs.ansible.com/automation-controller/4.0.0/html/userguide/credentials.html#credential-types). + +To create a credential type: + +1. On the navigation pane, click **Administration > Credential Types**. +2. On the **Credential Types** page, click **Add**. +2. On the **Create Credential Types** page, enter the name, and then specify the **Input configuration** and **Injector configuration**. +3. Click **Save**. + +This tutorial uses a custom credential type. The following are the input configuration and injector configuration used in this tutorial. + +**Input configuration:** + +```yaml +fields: + -id: username + type: string + label: Username + -Id: password + type: string + label: Password + secret: true +Required: + -username + -password +``` + +**Injector configuration:** + +```yaml +extra_vars: + user: '{{ username }}' + password: '{{ password }}' +``` +#### Create Credentials + +1. On the navigation pane, click **Resources > Credentials**. +2. On the **Credentials** page, click **Add**. +3. On the **Create New Credential** page, enter the name of the credential and select the credential type. +4. Click **Save**. + +**Note:** In this tutorial, the custom credential type that we created in the section [Create Credential Types](#create-credential-types) is used. + +## Create Inventories +1. On the navigation pane, click **Resources > Inventories**. +2. On the **Inventories** page, click **Add**. +3. On the **Create New Inventory** page, enter the details and click **Save**. +4. Add groups and hosts to the inventory. + +## Create Job Templates + +1. On the navigation pane, click **Resources > Templates**. +2. On the **Templates** page, click **Add** and select the new job template. +3. On the **Create New Job Template** page, enter the name, inventory, project, execution environment, playbook, and credentials. +4. Click **Save**. +5. To run the template, on the **Details** page, click **Launch**. + +To check the job status, on the navigation pane, select **Views > Jobs**. The following is a sample output in JSON. + +```yaml +PLAY [Get system inventory] **************************************************** + +TASK [Get system inventory.] *************************************************** + +ok: [192.168.0.1] => { ..sysdetails..} +META: ran handlers +META: ran handlers + +PLAY RECAP ********************************************************************* +192.168.0.1 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +## Troubleshoot +You can add an Ansible python interpreter to a Template or Inventory. + +`ansible_python_interpreter: /usr/bin/python` + +```yaml +ansible_python_interpreter: /usr/bin/python3.8 +``` + +## Documentation references +- [https://www.redhat.com/en/technologies/management/ansible](https://www.redhat.com/en/technologies/management/ansible) +- [https://www.redhat.com/en/blog/what-ansible-automation-hub-and-why-should-you-use-it](https://www.redhat.com/en/blog/what-ansible-automation-hub-and-why-should-you-use-it) +- [https://becloudready.com/ansible-awx-vs-ansible-tower-the-key-to-automation/](https://becloudready.com/ansible-awx-vs-ansible-tower-the-key-to-automation/) + + + + + + + + + + + + diff --git a/ansible_collections/dellemc/openmanage/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/openmanage/docs/ISSUE_TRIAGE.md new file mode 100644 index 00000000..e565b398 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/ISSUE_TRIAGE.md @@ -0,0 +1,195 @@ + + +# Triage issues + +The main goal of issue triage is to categorize all incoming issues and make sure each issue has all basic information needed for anyone else to understand and be able to start working on it. + +> **Note:** This information is for project Maintainers, Owners, and Admins. If you are a Contributor, then you will not be able to perform most of the tasks in this topic. + +The core maintainers of this project are responsible for categorizing all incoming issues and delegating any critical or important issue to other maintainers. Triage provides an important way to contribute to an open source project. + +Triage helps ensure issues resolve quickly by: + +- Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +## 1. Find issues that need triage + +The easiest way to find issues that haven't been triaged is to search for issues with the `needs-triage` label. + +## 2. Ensure the issue contains basic information + +Make sure that the issue's author provided the standard issue information. This project utilizes GitHub issue templates to guide contributors to provide standard information that must be included for each type of template or type of issue. + +### Standard issue information that must be included + +The following section describes the various issue templates and the expected content. + +#### Bug reports + +Should explain what happened, what was expected and how to reproduce it together with any additional information that may help giving a complete picture of what happened such as screenshots, output and any environment related information that's applicable and/or maybe related to the reported problem: + + - OS/Version: [e.g. RHEL 8.5] + - Python Version [e.g. 3.9] + - Ansible Version [e.g. 2.13] + - Any other additional information... + +#### Feature requests + +Should explain what feature that the author wants to be added and why that is needed. + +#### Ask a Question requests + +In general, if the issue description and title is perceived as a question no more information is needed. + +### Good practices + +To make it easier for everyone to understand and find issues they're searching for it's suggested as a general rule of thumbs to: + +- Make sure that issue titles are named to explain the subject of the issue, has a correct spelling and doesn't include irrelevant information and/or sensitive information. +- Make sure that issue descriptions doesn't include irrelevant information. +- Make sure that issues do not contain sensitive information. +- Make sure that issues have all relevant fields filled in. +- Do your best effort to change title and description or request suggested changes by adding a comment. + +> **Note:** Above rules are applicable to both new and existing issues. + +### Dealing with missing information + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. Label issue with `triage/needs-information`. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. Label issue with `triage/needs-information`. + +If the author does not respond to the requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +If you receive a notification with additional information provided but you are not anymore on issue triage and you feel you do not have time to handle it, you should delegate it to the current person on issue triage. + +## 3. Categorizing an issue + +### Duplicate issues + +Make sure it's not a duplicate by searching existing issues using related terms from the issue title and description. If you think you know there is an existing issue, but can't find it, please reach out to one of the maintainers and ask for help. If you identify that the issue is a duplicate of an existing issue: + +1. Add a comment `duplicate of #` +2. Add the `triage/duplicate` label + +### Bug reports + +If it's not perfectly clear that it's an actual bug, quickly try to reproduce it. + +**It's a bug/it can be reproduced:** + +1. Add a comment describing detailed steps for how to reproduce it, if applicable. +2. If you know that maintainers won't be able to put any resources into it for some time then label the issue with `help wanted` and optionally `beginner friendly` together with pointers on which code to update to fix the bug. This should signal to the community that we would appreciate any help we can get to resolve this. +3. Move on to [prioritizing the issue](#4-prioritization-of-issues). + +**It can't be reproduced:** + +1. Either [ask for more information](#2-ensure-the-issue-contains-basic-information) needed to investigate it more thoroughly. Provide details in a comment. +2. Either [delegate further investigations](#investigation-of-issues) to someone else. Provide details in a comment. + +**It works as intended/by design:** + +1. Kindly and politely add a comment explaining briefly why we think it works as intended and close the issue. +2. Label the issue `triage/works-as-intended`. +3. Remove the `needs-triage` label. + +**It does not work as intended/by design:** + +### Feature requests + +1. If the feature request does not align with the product vision, add a comment indicating so, remove the `needs-triage` label and close the issue +2. Otherwise, move on to [prioritizing the issue](#4-prioritization-of-issues). Assign the appropriate priority label to the issue, add the appropriate comments to the issue, and remove the `needs-triage` label. + +## 4. Prioritization of issues +[ANUPAM TO REVIEW THIS AND PROVIDE NECESSARY INPUT] +In general bugs and feature request issues should be labeled with a priority. + +This is the most difficult thing with triaging issues since it requires a lot of knowledge, context and experience before being able to think of and start feel comfortable adding a certain priority label. + +The key here is asking for help and discuss issues to understand how more experienced project members think and reason. By doing that you learn more and eventually be more and more comfortable with prioritizing issues. + +In case there is an uncertainty around the prioritization of an issue, please ask the maintainers for help. + +| Label | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `priority/critical` | Highest priority. Must be actively worked on as someone's top priority right now. | +| `priority/high` | Must be worked on soon, ideally in time for the next release. | +| `priority/low` | Lowest priority. Possibly useful, but not yet enough interest in it. | + +### Critical priority + +1. If an issue has been categorized and any of the following criteria apply, the issue should be labeled as critical and must be actively worked on as someone's top priority right now. + + - Results in any data loss + - Critical security or performance issues + - Problem that makes a feature unusable + - Multiple users experience a severe problem affecting their business, users etc. + +2. Label the issue `priority/critical`. +3. Escalate the problem to the maintainers. +4. Assign or ask a maintainer for help assigning someone to make this issue their top priority right now. +5. Add the issue to the next upcoming release milestone. + +### High priority + +1. Label the issue `priority/high`. +2. Add the issue to the next upcoming release milestone. +3. Prioritize it or assign someone to work on it now or very soon. +4. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +### Low priority + +1. If the issue is deemed possibly useful but a low priority label the issue `priority/low`. +2. The amount of interest in the issue will determine if the priority changes to be higher. +3. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +## 5. Requesting help from the community + +Depending on the issue and/or priority, it's always a good idea to consider signalling to the community that help from community is appreciated and needed in case an issue is not prioritized to be worked on by maintainers. Use your best judgement. In general, requesting help from the community means that a contribution has a good chance of getting accepted and merged. + +In many cases the issue author or community as a whole is more suitable to contribute changes since they're experts in their domain. It's also quite common that someone has tried to get something to work using the documentation without success and made an effort to get it to work and/or reached out to the community to get the missing information. + +1. Kindly and politely add a comment to signal to users subscribed to updates of the issue. + - Explain that the issue would be nice to get resolved, but it isn't prioritized to work on by maintainers for an unforeseen future. + - If possible or applicable, try to help contributors getting starting by adding pointers and references to what code/files need to be changed and/or ideas of a good way to solve/implement the issue. +2. Label the issue with `help wanted`. +3. If applicable, label the issue with `beginner friendly` to denote that the issue is suitable for a beginner to work on. + +## Investigation of issues + +When an issue has all basic information provided, but the reported problem cannot be reproduced at a first glance, the issue is labeled `triage/needs-information`. Depending on the perceived severity and/or number of upvotes, the investigation will either be delegated to another maintainer for further investigation or put on hold until someone else (maintainer or contributor) picks it up and eventually starts investigating it. + +Even if you don't have the time or knowledge to investigate an issue we highly recommend that you upvote the issue if you happen to have the same problem. If you have further details that may help investigating the issue please provide as much information as possible. + +## External pull requests + +Part of issue triage should also be triaging of external PRs. Main goal should be to make sure PRs from external contributors have an owner/reviewer and are not forgotten. + +1. Check new external PRs which do not have a reviewer. +1. Check if there is a link to an existing issue. +1. If not and you know which issue it is solving, add the link yourself, otherwise ask the author to link the issue or create one. +1. Assign a reviewer based on who was handling the linked issue or what code or feature does the PR touches (look at who was the last to make changes there if all else fails). + +## GitHub issue management workflow + +The following section describes the triage workflow for new GitGHub issues that get created. + +### GitHub Issue: Bug + +This workflow starts off with a GitHub issue of type bug being created. + +1. Collaborator or maintainer creates a GitHub bug using the appropriate GitHub issue template +2. By default a bug will be created with the `type/bug` and `needs-triage` labels diff --git a/ansible_collections/dellemc/openmanage/docs/MAINTAINERS.md b/ansible_collections/dellemc/openmanage/docs/MAINTAINERS.md new file mode 100644 index 00000000..77156bd3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/MAINTAINERS.md @@ -0,0 +1,18 @@ + + + +# Maintainers + +* @sachin-apa +* @felix-88 +* @jagadeeshnv +* @rajshekarp87 +* @anupamaloke \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/docs/README.md b/ansible_collections/dellemc/openmanage/docs/README.md new file mode 100644 index 00000000..a96bf0da --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/README.md @@ -0,0 +1,93 @@ + +# OpenManage Ansible Modules Documentation + +### iDRAC Modules +- [dellemc_configure_idrac_eventing](modules/dellemc_configure_idrac_eventing.rst) +- [dellemc_configure_idrac_services](modules/dellemc_configure_idrac_services.rst) +- [dellemc_get_firmware_inventory](modules/dellemc_get_firmware_inventory.rst) +- [dellemc_get_system_inventory](modules/dellemc_get_system_inventory.rst) +- [dellemc_idrac_lc_attributes](modules/dellemc_idrac_lc_attributes.rst) +- [dellemc_idrac_storage_volume](modules/dellemc_idrac_storage_volume.rst) +- [dellemc_system_lockdown_mode](modules/dellemc_system_lockdown_mode.rst) +- [idrac_attributes](modules/idrac_attributes.rst) +- [idrac_bios](modules/idrac_bios.rst) +- [idrac_certificates](modules/idrac_certificates.rst) +- [idrac_firmware](modules/idrac_firmware.rst) +- [idrac_firmware_info](modules/idrac_firmware_info.rst) +- [idrac_lifecycle_controller_jobs](modules/idrac_lifecycle_controller_jobs.rst) +- [idrac_lifecycle_controller_job_status_info](modules/idrac_lifecycle_controller_job_status_info.rst) +- [idrac_lifecycle_controller_logs](modules/idrac_lifecycle_controller_logs.rst) +- [idrac_lifecycle_controller_status_info](modules/idrac_lifecycle_controller_status_info.rst) +- [idrac_network](modules/idrac_network.rst) +- [idrac_os_deployment](modules/idrac_os_deployment.rst) +- [idrac_redfish_storage_controller](modules/idrac_redfish_storage_controller.rst) +- [idrac_reset](modules/idrac_reset.rst) +- [idrac_server_config_profile](modules/idrac_server_config_profile.rst) +- [idrac_syslog](modules/idrac_syslog.rst) +- [idrac_system_info](modules/idrac_system_info.rst) +- [idrac_timezone_ntp](modules/idrac_timezone_ntp.rst) +- [idrac_user](modules/idrac_user.rst) +- [idrac_virtual_media](modules/idrac_virtual_media.rst) +- [redfish_event_subscription](modules/redfish_event_subscription.rst) +- [redfish_firmware](modules/redfish_firmware.rst) +- [redfish_powerstate](modules/redfish_powerstate.rst) +- [redfish_storage_volume](modules/redfish_storage_volume.rst) + +### OpenManage Enterprise Modules +- [ome_active_directory](modules/ome_active_directory.rst) +- [ome_application_alerts_smtp](modules/ome_application_alerts_smtp.rst) +- [ome_application_alerts_syslog](modules/ome_application_alerts_syslog.rst) +- [ome_application_certificate](modules/ome_application_certificate.rst) +- [ome_application_console_preferences](modules/ome_application_console_preferences.rst) +- [ome_application_network_address](modules/ome_application_network_address.rst) +- [ome_application_network_proxy](modules/ome_application_network_proxy.rst) +- [ome_application_network_settings](modules/ome_application_network_settings.rst) +- [ome_application_network_time](modules/ome_application_network_time.rst) +- [ome_application_network_webserver](modules/ome_application_network_webserver.rst) +- [ome_application_security_settings](modules/ome_application_security_settings.rst) +- [ome_chassis_slots](modules/ome_chassis_slots.rst) +- [ome_configuration_compliance_baseline](modules/ome_configuration_compliance_baseline.rst) +- [ome_configuration_compliance_info](modules/ome_configuration_compliance_info.rst) +- [ome_device_group](modules/ome_device_group.rst) +- [ome_device_info](modules/ome_device_info.rst) +- [ome_device_local_access_configuration](modules/ome_device_local_access_configuration.rst) +- [ome_device_location](modules/ome_device_location.rst) +- [ome_device_mgmt_network](modules/ome_device_mgmt_network.rst) +- [ome_device_network_services](modules/ome_device_network_services.rst) +- [ome_device_power_settings](modules/ome_device_power_settings.rst) +- [ome_device_quick_deploy](modules/ome_device_quick_deploy.rst) +- [ome_diagnostics](modules/ome_diagnostics.rst) +- [ome_discovery](modules/ome_discovery.rst) +- [ome_domain_user_groups](modules/ome_domain_user_groups.rst) +- [ome_firmware](modules/ome_firmware.rst) +- [ome_firmware_baseline](modules/ome_firmware_baseline.rst) +- [ome_firmware_baseline_compliance_info](modules/ome_firmware_baseline_compliance_info.rst) +- [ome_firmware_baseline_info](modules/ome_firmware_baseline_info.rst) +- [ome_firmware_catalog](modules/ome_firmware_catalog.rst) +- [ome_groups](modules/ome_groups.rst) +- [ome_identity_pool](modules/ome_identity_pool.rst) +- [ome_job_info](modules/ome_job_info.rst) +- [ome_network_port_breakout](modules/ome_network_port_breakout.rst) +- [ome_network_vlan](modules/ome_network_vlan.rst) +- [ome_network_vlan_info](modules/ome_network_vlan_info.rst) +- [ome_powerstate](modules/ome_powerstate.rst) +- [ome_profile](modules/ome_profile.rst) +- [ome_server_interface_profile_info](modules/ome_server_interface_profile_info.rst) +- [ome_server_interface_profiles](modules/ome_server_interface_profiles.rst) +- [ome_smart_fabric](modules/ome_smart_fabric.rst) +- [ome_smart_fabric_uplink](modules/ome_smart_fabric_uplink.rst) +- [ome_template](modules/ome_template.rst) +- [ome_template_identity_pool](modules/ome_template_identity_pool.rst) +- [ome_template_info](modules/ome_template_info.rst) +- [ome_template_network_vlan](modules/ome_template_network_vlan.rst) +- [ome_user](modules/ome_user.rst) +- [ome_user_info](modules/ome_user_info.rst) + diff --git a/ansible_collections/dellemc/openmanage/docs/SECURITY.md b/ansible_collections/dellemc/openmanage/docs/SECURITY.md new file mode 100644 index 00000000..f5c52c85 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/SECURITY.md @@ -0,0 +1,20 @@ + + + +# Security Policy +TBD + +## Reporting a Vulnerability + +Have you discovered a security vulnerability in this project? +We ask you to alert the maintainers by sending an email, describing the issue, impact, and fix - if applicable. + +You can reach the OpenManageAnsible Maintainers at OpenManageAnsible@dell.com \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/docs/SUPPORT.md b/ansible_collections/dellemc/openmanage/docs/SUPPORT.md new file mode 100644 index 00000000..f17b98be --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/SUPPORT.md @@ -0,0 +1,15 @@ + + +# Support + + * To report any issue, create an issue [here](https://github.com/dell/dellemc-openmanage-ansible-modules/issues). + * If any requirements have not been addressed, then create an issue [here](https://github.com/dell/dellemc-openmanage-ansible-modules/issues). + * To provide feedback to the development team, send an email to **OpenManageAnsible@Dell.com**. diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst new file mode 100644 index 00000000..d0e59981 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst @@ -0,0 +1,218 @@ +.. _dellemc_configure_idrac_eventing_module: + + +dellemc_configure_idrac_eventing -- Configures the iDRAC eventing related attributes +==================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the iDRAC eventing related attributes. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (optional, str, None) + (deprecated)Network share or a local path. + + This option is deprecated and will be removed in the later version. + + + share_user (optional, str, None) + (deprecated)Network share user in the format 'user@domain' or 'domain\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_password (optional, str, None) + (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_mnt (optional, str, None) + (deprecated)Local mount path of the network share with read-write permission for ansible user. This option is mandatory for Network Share. + + This option is deprecated and will be removed in the later version. + + + destination_number (optional, int, None) + Destination number for SNMP Trap. + + + destination (optional, str, None) + Destination for SNMP Trap. + + + snmp_v3_username (optional, str, None) + SNMP v3 username for SNMP Trap. + + + snmp_trap_state (optional, str, None) + Whether to Enable or Disable SNMP alert. + + + email_alert_state (optional, str, None) + Whether to Enable or Disable Email alert. + + + alert_number (optional, int, None) + Alert number for Email configuration. + + + address (optional, str, None) + Email address for SNMP Trap. + + + custom_message (optional, str, None) + Custom message for SNMP Trap reference. + + + enable_alerts (optional, str, None) + Whether to Enable or Disable iDRAC alerts. + + + authentication (optional, str, None) + Simple Mail Transfer Protocol Authentication. + + + smtp_ip_address (optional, str, None) + SMTP IP address for communication. + + + smtp_port (optional, str, None) + SMTP Port number for access. + + + username (optional, str, None) + Username for SMTP authentication. + + + password (optional, str, None) + Password for SMTP authentication. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure the iDRAC eventing attributes + dellemc.openmanage.dellemc_configure_idrac_eventing: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination_number: "2" + destination: "1.1.1.1" + snmp_v3_username: "None" + snmp_trap_state: "Enabled" + email_alert_state: "Disabled" + alert_number: "1" + address: "alert_email@company.com" + custom_message: "Custom Message" + enable_alerts: "Disabled" + authentication: "Enabled" + smtp_ip_address: "192.168.0.1" + smtp_port: "25" + username: "username" + password: "password" + + + +Return Values +------------- + +msg (always, str, Successfully configured the iDRAC eventing settings.) + Successfully configured the iDRAC eventing settings. + + +eventing_status (success, dict, AnsibleMapping([('CompletionTime', '2020-04-02T02:43:28'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_12345123456'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + Configures the iDRAC eventing attributes. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst new file mode 100644 index 00000000..02e803fe --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst @@ -0,0 +1,215 @@ +.. _dellemc_configure_idrac_services_module: + + +dellemc_configure_idrac_services -- Configures the iDRAC services related attributes +==================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the iDRAC services related attributes. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + share_name (optional, str, None) + (deprecated)Network share or a local path. + + This option is deprecated and will be removed in the later version. + + + share_user (optional, str, None) + (deprecated)Network share user in the format 'user@domain' or 'domain\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_password (optional, str, None) + (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_mnt (optional, str, None) + (deprecated)Local mount path of the network share with read-write permission for ansible user. This option is mandatory for Network Share. + + This option is deprecated and will be removed in the later version. + + + enable_web_server (optional, str, None) + Whether to Enable or Disable webserver configuration for iDRAC. + + + ssl_encryption (optional, str, None) + Secure Socket Layer encryption for webserver. + + + tls_protocol (optional, str, None) + Transport Layer Security for webserver. + + + https_port (optional, int, None) + HTTPS access port. + + + http_port (optional, int, None) + HTTP access port. + + + timeout (optional, str, None) + Timeout value. + + + snmp_enable (optional, str, None) + Whether to Enable or Disable SNMP protocol for iDRAC. + + + snmp_protocol (optional, str, None) + Type of the SNMP protocol. + + + community_name (optional, str, None) + SNMP community name for iDRAC. It is used by iDRAC to validate SNMP queries received from remote systems requesting SNMP data access. + + + alert_port (optional, int, 162) + The iDRAC port number that must be used for SNMP traps. The default value is 162, and the acceptable range is between 1 to 65535. + + + discovery_port (optional, int, 161) + The SNMP agent port on the iDRAC. The default value is 161, and the acceptable range is between 1 to 65535. + + + trap_format (optional, str, None) + SNMP trap format for iDRAC. + + + ipmi_lan (optional, dict, None) + Community name set on iDRAC for SNMP settings. + + + community_name (optional, str, None) + This option is used by iDRAC when it sends out SNMP and IPMI traps. The community name is checked by the remote system to which the traps are sent. + + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure the iDRAC services attributes + dellemc.openmanage.dellemc_configure_idrac_services: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + enable_web_server: "Enabled" + http_port: 80 + https_port: 443 + ssl_encryption: "Auto_Negotiate" + tls_protocol: "TLS_1_2_Only" + timeout: "1800" + snmp_enable: "Enabled" + snmp_protocol: "SNMPv3" + community_name: "public" + alert_port: 162 + discovery_port: 161 + trap_format: "SNMPv3" + ipmi_lan: + community_name: "public" + + + +Return Values +------------- + +msg (always, str, Successfully configured the iDRAC services settings.) + Overall status of iDRAC service attributes configuration. + + +service_status (success, dict, AnsibleMapping([('CompletionTime', '2020-04-02T02:43:28'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_12345123456'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + Details of iDRAC services attributes configuration. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst new file mode 100644 index 00000000..14b84431 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst @@ -0,0 +1,107 @@ +.. _dellemc_get_firmware_inventory_module: + + +dellemc_get_firmware_inventory -- Get Firmware Inventory +======================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Get Firmware Inventory. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Get Installed Firmware Inventory + dellemc.openmanage.dellemc_get_firmware_inventory: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst new file mode 100644 index 00000000..3babb032 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst @@ -0,0 +1,107 @@ +.. _dellemc_get_system_inventory_module: + + +dellemc_get_system_inventory -- Get the PowerEdge Server System Inventory +========================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Get the PowerEdge Server System Inventory. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Get System Inventory + dellemc.openmanage.dellemc_get_system_inventory: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst new file mode 100644 index 00000000..0459b5a4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst @@ -0,0 +1,153 @@ +.. _dellemc_idrac_lc_attributes_module: + + +dellemc_idrac_lc_attributes -- Enable or disable Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs +=========================================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module is responsible for enabling or disabling of Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (optional, str, None) + (deprecated)Network share or a local path. + + This option is deprecated and will be removed in the later version. + + + share_user (optional, str, None) + (deprecated)Network share user in the format 'user@domain' or 'domain\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_password (optional, str, None) + (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_mnt (optional, str, None) + (deprecated)Local mount path of the network share with read-write permission for ansible user. This option is mandatory for Network Share. + + This option is deprecated and will be removed in the later version. + + + csior (optional, str, Enabled) + Whether to Enable or Disable Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Set up iDRAC LC Attributes + dellemc.openmanage.dellemc_idrac_lc_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + csior: "Enabled" + + + +Return Values +------------- + +msg (always, str, Successfully configured the iDRAC LC attributes.) + Overall status of iDRAC LC attributes configuration. + + +lc_attribute_status (success, dict, AnsibleMapping([('CompletionTime', '2020-03-30T00:06:53'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_1234512345'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs is configured. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst new file mode 100644 index 00000000..d15aee67 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst @@ -0,0 +1,281 @@ +.. _dellemc_idrac_storage_volume_module: + + +dellemc_idrac_storage_volume -- Configures the RAID configuration attributes +============================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module is responsible for configuring the RAID attributes. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, view) + ``create``, performs create volume operation. + + ``delete``, performs remove volume operation. + + ``view``, returns storage view. + + + span_depth (optional, int, 1) + Number of spans in the RAID configuration. + + *span_depth* is required for ``create`` and its value depends on *volume_type*. + + + span_length (optional, int, 1) + Number of disks in a span. + + *span_length* is required for ``create`` and its value depends on *volume_type*. + + + number_dedicated_hot_spare (optional, int, 0) + Number of Dedicated Hot Spare. + + + volume_type (optional, str, RAID 0) + Provide the the required RAID level. + + + disk_cache_policy (optional, str, Default) + Disk Cache Policy. + + + write_cache_policy (optional, str, WriteThrough) + Write cache policy. + + + read_cache_policy (optional, str, NoReadAhead) + Read cache policy. + + + stripe_size (optional, int, 65536) + Stripe size value to be provided in multiples of 64 * 1024. + + + controller_id (optional, str, None) + Fully Qualified Device Descriptor (FQDD) of the storage controller, for example 'RAID.Integrated.1-1'. Controller FQDD is required for ``create`` RAID configuration. + + + media_type (optional, str, None) + Media type. + + + protocol (optional, str, None) + Bus protocol. + + + volume_id (optional, str, None) + Fully Qualified Device Descriptor (FQDD) of the virtual disk, for example 'Disk.virtual.0:RAID.Slot.1-1'. This option is used to get the virtual disk information. + + + volumes (optional, list, None) + A list of virtual disk specific iDRAC attributes. This is applicable for ``create`` and ``delete`` operations. + + For ``create`` operation, name and drives are applicable options, other volume options can also be specified. + + The drives is a required option for ``create`` operation and accepts either location (list of drive slot) or id (list of drive fqdd). + + For ``delete`` operation, only name option is applicable. + + See the examples for more details. + + + capacity (optional, float, None) + Virtual disk size in GB. + + + raid_reset_config (optional, str, False) + This option represents whether a reset config operation needs to be performed on the RAID controller. Reset Config operation deletes all the virtual disks present on the RAID controller. + + + raid_init_operation (optional, str, None) + This option represents initialization configuration operation to be performed on the virtual disk. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create single volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "create" + controller_id: "RAID.Slot.1-1" + volumes: + - drives: + location: [5] + + - name: Create multiple volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + raid_reset_config: "True" + state: "create" + controller_id: "RAID.Slot.1-1" + volume_type: "RAID 1" + span_depth: 1 + span_length: 2 + number_dedicated_hot_spare: 1 + disk_cache_policy: "Enabled" + write_cache_policy: "WriteBackForce" + read_cache_policy: "ReadAhead" + stripe_size: 65536 + capacity: 100 + raid_init_operation: "Fast" + volumes: + - name: "volume_1" + drives: + id: ["Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1", "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1"] + - name: "volume_2" + volume_type: "RAID 5" + span_length: 3 + span_depth: 1 + drives: + location: [7,3,5] + disk_cache_policy: "Disabled" + write_cache_policy: "WriteBack" + read_cache_policy: "NoReadAhead" + stripe_size: 131072 + capacity: "200" + raid_init_operation: "None" + + - name: View all volume details + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "view" + + - name: View specific volume details + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "view" + controller_id: "RAID.Slot.1-1" + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + + - name: Delete single volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "delete" + volumes: + - name: "volume_1" + + - name: Delete multiple volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "delete" + volumes: + - name: "volume_1" + - name: "volume_2" + + + +Return Values +------------- + +msg (always, str, Successfully completed the view storage volume operation) + Overall status of the storage configuration operation. + + +storage_status (success, dict, AnsibleMapping([('Id', 'JID_XXXXXXXXX'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageId', 'XXX123'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + Storage configuration job and progress details from the iDRAC. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst new file mode 100644 index 00000000..66d9c7b8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst @@ -0,0 +1,153 @@ +.. _dellemc_system_lockdown_mode_module: + + +dellemc_system_lockdown_mode -- Configures system lockdown mode for iDRAC +========================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module is allows to Enable or Disable System lockdown Mode. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (optional, str, None) + (deprecated)Network share or a local path. + + This option is deprecated and will be removed in the later version. + + + share_user (optional, str, None) + (deprecated)Network share user in the format 'user@domain' or 'domain\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_password (optional, str, None) + (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + + This option is deprecated and will be removed in the later version. + + + share_mnt (optional, str, None) + (deprecated)Local mount path of the network share with read-write permission for ansible user. This option is mandatory for Network Share. + + This option is deprecated and will be removed in the later version. + + + lockdown_mode (True, str, None) + Whether to Enable or Disable system lockdown mode. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Check System Lockdown Mode + dellemc.openmanage.dellemc_system_lockdown_mode: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + lockdown_mode: "Disabled" + + + +Return Values +------------- + +msg (always, str, Successfully completed the lockdown mode operations.) + Lockdown mode of the system is configured. + + +system_lockdown_status (success, dict, AnsibleMapping([('Data', AnsibleMapping([('StatusCode', 200), ('body', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Successfully Completed Request'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'Base.1.0.Success'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'None'), ('Severity', 'OK')])])]))])), ('Message', 'none'), ('Status', 'Success'), ('StatusCode', 200), ('retval', True)])) + Storage configuration job and progress details from the iDRAC. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst new file mode 100644 index 00000000..b0512bd1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst @@ -0,0 +1,271 @@ +.. _idrac_attributes_module: + + +idrac_attributes -- Configure the iDRAC attributes. +=================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the iDRAC attributes. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_attributes (optional, dict, None) + Dictionary of iDRAC attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above, see, https://*idrac_ip*/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1 and https://*idrac_ip*/redfish/v1/Registries/ManagerAttributeRegistry. + + For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is .# (for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is .. (for Example, 'SNMP.1.AgentCommunity'). + + + system_attributes (optional, dict, None) + Dictionary of System attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above, see, https://*idrac_ip*/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1 and https://*idrac_ip*/redfish/v1/Registries/ManagerAttributeRegistry. + + For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is .# (for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is .. (for Example, 'ThermalSettings.1.ThermalProfile'). + + + lifecycle_controller_attributes (optional, dict, None) + Dictionary of Lifecycle Controller attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry.To view the list of attributes in Attribute Registry for iDRAC9 and above, see, https://*idrac_ip*/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1 and https://*idrac_ip*/redfish/v1/Registries/ManagerAttributeRegistry. + + For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is .# (for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is .. (for Example, 'LCAttributes.1.AutoUpdate'). + + + resource_id (optional, str, None) + Redfish ID of the resource. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports ``check_mode``. + - For iDRAC7 and iDRAC8 based servers, the value provided for the attributes are not be validated. Ensure appropriate values are passed. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure iDRAC attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.AgentCommunity: public + + - name: Configure System attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + system_attributes: + ThermalSettings.1.ThermalProfile: Sound Cap + + - name: Configure Lifecycle Controller attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + lifecycle_controller_attributes: + LCAttributes.1.AutoUpdate: Enabled + + - name: Configure the iDRAC attributes for email alert settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + EmailAlert.1.CustomMsg: Display Message + EmailAlert.1.Enable: Enabled + EmailAlert.1.Address: test@test.com + + - name: Configure the iDRAC attributes for SNMP alert settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMPAlert.1.Destination: 192.168.0.2 + SNMPAlert.1.State: Enabled + SNMPAlert.1.SNMPv3Username: username + + - name: Configure the iDRAC attributes for SMTP alert settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + RemoteHosts.1.SMTPServerIPAddress: 192.168.0.3 + RemoteHosts.1.SMTPAuthentication: Enabled + RemoteHosts.1.SMTPPort: 25 + RemoteHosts.1.SMTPUserName: username + RemoteHosts.1.SMTPPassword: password + + - name: Configure the iDRAC attributes for webserver settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + WebServer.1.SSLEncryptionBitLength: 128-Bit or higher + WebServer.1.TLSProtocol: TLS 1.1 and Higher + + - name: Configure the iDRAC attributes for SNMP settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.SNMPProtocol: All + SNMP.1.AgentEnable: Enabled + SNMP.1.TrapFormat: SNMPv1 + SNMP.1.AlertPort: 162 + SNMP.1.AgentCommunity: public + + - name: Configure the iDRAC LC attributes for collecting system inventory. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + lifecycle_controller_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: Enabled + + - name: Configure the iDRAC system attributes for LCD configuration. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + system_attributes: + LCD.1.Configuration: Service Tag + LCD.1.vConsoleIndication: Enabled + LCD.1.FrontPanelLocking: Full-Access + LCD.1.UserDefinedString: custom string + + - name: Configure the iDRAC attributes for Timezone settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + Time.1.TimeZone: CST6CDT + NTPConfigGroup.1.NTPEnable: Enabled + NTPConfigGroup.1.NTP1: 192.168.0.5 + NTPConfigGroup.1.NTP2: 192.168.0.6 + NTPConfigGroup.1.NTP3: 192.168.0.7 + + - name: Configure all attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.AgentCommunity: test + SNMP.1.AgentEnable: Enabled + SNMP.1.DiscoveryPort: 161 + system_attributes: + ServerOS.1.HostName: demohostname + lifecycle_controller_attributes: + LCAttributes.1.AutoUpdate: Disabled + + + +Return Values +------------- + +msg (always, str, Successfully updated the attributes.) + Status of the attribute update operation. + + +invalid_attributes (on invalid attributes or values., dict, AnsibleMapping([('LCAttributes.1.AutoUpdate', 'Invalid value for Enumeration.'), ('LCAttributes.1.StorageHealthRollupStatus', 'Read only Attribute cannot be modified.'), ('SNMP.1.AlertPort', 'Not a valid integer.'), ('SNMP.1.AlertPorty', 'Attribute does not exist.'), ('SysLog.1.PowerLogInterval', 'Integer out of valid range.'), ('ThermalSettings.1.AirExhaustTemp', 'Invalid value for Enumeration.')])) + Dict of invalid attributes provided. + + +error_info (when attribute value is invalid., dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', "The value 'false' for the property LCAttributes.1.BIOSRTDRequested is of a different type than the property can accept."), ('MessageArgs', ['false', 'LCAttributes.1.BIOSRTDRequested']), ('MessageArgs@odata.count', 2), ('MessageId', 'Base.1.12.PropertyValueTypeError'), ('RelatedProperties', ['#/Attributes/LCAttributes.1.BIOSRTDRequested']), ('RelatedProperties@odata.count', 1), ('Resolution', 'Correct the value for the property in the request body and resubmit the request if the operation failed.'), ('Severity', 'Warning')])]), ('code', 'Base.1.12.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))])) + Error information of the operation. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Husniya Abdul Hameed (@husniya-hameed) +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst new file mode 100644 index 00000000..72f6bc73 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst @@ -0,0 +1,341 @@ +.. _idrac_bios_module: + + +idrac_bios -- Modify and clear BIOS attributes, reset BIOS settings and configure boot sources +============================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to modify the BIOS attributes. Also clears pending BIOS attributes and resets BIOS to default settings. + +Boot sources can be enabled or disabled. Boot sequence can be configured. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.490 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (optional, str, None) + (deprecated)Network share or a local path. + + + share_user (optional, str, None) + (deprecated)Network share user name. Use the format 'user@domain' or domain//user if user is part of a domain. This option is mandatory for CIFS share. + + + share_password (optional, str, None) + (deprecated)Network share user password. This option is mandatory for CIFS share. + + + share_mnt (optional, str, None) + (deprecated)Local mount path of the network share with read-write permission for ansible user. This option is mandatory for network shares. + + + apply_time (optional, str, Immediate) + Apply time of the *attributes*. + + This is applicable only to *attributes*. + + ``Immediate`` Allows the user to immediately reboot the host and apply the changes. *job_wait* is applicable. + + ``OnReset`` Allows the user to apply the changes on the next reboot of the host server. + + ``AtMaintenanceWindowStart`` Allows the user to apply at the start of a maintenance window as specified in *maintenance_window*. A reboot job will be scheduled. + + ``InMaintenanceWindowOnReset`` Allows to apply after a manual reset but within the maintenance window as specified in *maintenance_window*. + + + maintenance_window (optional, dict, None) + Option to schedule the maintenance window. + + This is required when *apply_time* is ``AtMaintenanceWindowStart`` or ``InMaintenanceWindowOnReset``. + + + start_time (True, str, None) + The start time for the maintenance window to be scheduled. + + The format is YYYY-MM-DDThh:mm:ss + + is the time offset from UTC that the current timezone set in iDRAC in the format: +05:30 for IST. + + + duration (True, int, None) + The duration in seconds for the maintenance window. + + + + attributes (optional, dict, None) + Dictionary of BIOS attributes and value pair. Attributes should be part of the Redfish Dell BIOS Attribute Registry. Use https://*idrac_ip*/redfish/v1/Systems/System.Embedded.1/Bios to view the Redfish URI. + + This is mutually exclusive with *boot_sources*, *clear_pending*, and *reset_bios*. + + + boot_sources (optional, list, None) + (deprecated)List of boot devices to set the boot sources settings. + + *boot_sources* is mutually exclusive with *attributes*, *clear_pending*, and *reset_bios*. + + *job_wait* is not applicable. The module waits till the completion of this task. + + This feature is deprecated, please use :ref:`idrac_boot ` for configuring boot sources. + + + clear_pending (optional, bool, None) + Allows the user to clear all pending BIOS attributes changes. + + ``true`` will discard any pending changes to bios attributes or remove job if in scheduled state. + + This operation will not create any job. + + ``false`` will not perform any operation. + + This is mutually exclusive with *boot_sources*, *attributes*, and *reset_bios*. + + ``Note`` Any BIOS job scheduled due to boot sources configuration will not be cleared. + + + reset_bios (optional, bool, None) + Resets the BIOS to default settings and triggers a reboot of host system. + + This is applied to the host after the restart. + + This operation will not create any job. + + ``false`` will not perform any operation. + + This is mutually exclusive with *boot_sources*, *attributes*, and *clear_pending*. + + When ``true``, this action will always report as changes found to be applicable. + + + reset_type (optional, str, graceful_restart) + ``force_restart`` Forcefully reboot the host system. + + ``graceful_restart`` Gracefully reboot the host system. + + This is applicable for *attributes*, and *reset_bios*. + + + job_wait (optional, bool, True) + Provides the option to wait for job completion. + + This is applicable for *attributes* when *apply_time* is ``Immediate``. + + + job_wait_timeout (optional, int, 1200) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - omsdk is required to be installed only for *boot_sources* operation. + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure generic attributes of the BIOS + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + attributes: + BootMode : "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + + - name: Configure PXE generic attributes + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + attributes: + PxeDev1EnDis: "Enabled" + PxeDev1Protocol: "IPV4" + PxeDev1VlanEnDis: "Enabled" + PxeDev1VlanId: 1 + PxeDev1Interface: "NIC.Embedded.1-1-1" + PxeDev1VlanPriority: 2 + + - name: Configure BIOS attributes at Maintenance window + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + apply_time: AtMaintenanceWindowStart + maintenance_window: + start_time: "2022-09-30T05:15:40-05:00" + duration: 600 + attributes: + BootMode : "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + + - name: Clear pending BIOS attributes + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + clear_pending: yes + + - name: Reset BIOS attributes to default settings. + dellemc.openmanage.idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_pwd }}" + validate_certs: False + reset_bios: yes + + - name: Configure boot sources + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-2-3" + Enabled : true + Index : 0 + + - name: Configure multiple boot sources + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-1-1" + Enabled : true + Index : 0 + - Name : "NIC.Integrated.2-2-2" + Enabled : true + Index : 1 + - Name : "NIC.Integrated.3-3-3" + Enabled : true + Index : 2 + + - name: Configure boot sources - Enabling + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-1-1" + Enabled : true + + - name: Configure boot sources - Index + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-1-1" + Index : 0 + + + +Return Values +------------- + +status_msg (success, str, Successfully cleared pending BIOS attributes.) + Overall status of the bios operation. + + +msg (success, dict, {'CompletionTime': '2020-04-20T18:50:20', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_873888162305', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageId': 'SYS053', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True}) + Status of the job for *boot_sources* or status of the action performed on bios. + + +invalid_attributes (on invalid attributes or values., dict, {'NumLock': 'Invalid value for Enumeration.', 'SystemModelName': 'Read only Attribute cannot be modified.', 'AlertPort': 'Not a valid integer.', 'AssetTag': 'Attribute does not exist.', 'PowerLogInterval': 'Integer out of valid range.', 'AirExhaustTemp': 'Invalid value for Enumeration.'}) + Dict of invalid attributes provided. + + +error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}}) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Anooja Vardhineni (@anooja-vardhineni) +- Jagadeesh N V (@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst new file mode 100644 index 00000000..3825c0a6 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst @@ -0,0 +1,282 @@ +.. _idrac_boot_module: + + +idrac_boot -- Configure the boot order settings. +================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the boot order settings. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + boot_options (optional, list, None) + Options to enable or disable the boot devices. + + This is mutually exclusive with *boot_order*, *boot_source_override_mode*, *boot_source_override_enabled* *boot_source_override_target*, and *uefi_target_boot_source_override*. + + + boot_option_reference (optional, str, None) + FQDD of the boot device. + + This is mutually exclusive with *display_name*. + + + display_name (optional, str, None) + Display name of the boot source device. + + This is mutually exclusive with *boot_option_reference*. + + + enabled (True, bool, None) + Enable or disable the boot device. + + + + boot_order (optional, list, None) + This option allows to set the boot devices in the required boot order sequences. + + This is mutually exclusive with *boot_options*. + + + boot_source_override_mode (optional, str, None) + The BIOS boot mode (either Legacy or UEFI) to be used when *boot_source_override_target* boot source is booted from. + + ``legacy`` The system boot in non-UEF*Legacy* boot mode to the *boot_source_override_target*. + + ``uefi`` The system boot in UEFI boot mode to the *boot_source_override_target*. + + This is mutually exclusive with *boot_options*. + + + boot_source_override_enabled (optional, str, None) + The state of the Boot Source Override feature. + + ``disabled`` The system boots normally. + + ``once`` The system boots (one time) to the *boot_source_override_target*. + + ``continuous`` The system boots to the target specified in the *boot_source_override_target* until this property is set to Disabled. + + The state is set to ``once`` for the one-time boot override and ``continuous`` for the remain-active-until—canceled override. If the state is set ``once``, the value is reset to ``disabled`` after the *boot_source_override_target* actions have completed successfully. + + Changes to this options do not alter the BIOS persistent boot order configuration. + + This is mutually exclusive with *boot_options*. + + + boot_source_override_target (optional, str, None) + The boot source override target device to use during the next boot instead of the normal boot device. + + ``pxe`` performs PXE boot from the primary NIC. + + ``floppy``, ``cd``, ``hdd``, ``sd_card`` performs boot from their devices respectively. + + ``bios_setup`` performs boot into the native BIOS setup. + + ``utilities`` performs boot from the local utilities. + + ``uefi_target`` performs boot from the UEFI device path found in *uefi_target_boot_source_override*. + + If the *boot_source_override_target* is set to a value other than ``none`` then the *boot_source_override_enabled* is automatically set to ``once``. + + Changes to this options do not alter the BIOS persistent boot order configuration. + + This is mutually exclusive with *boot_options*. + + + uefi_target_boot_source_override (optional, str, None) + The UEFI device path of the device from which to boot when *boot_source_override_target* is ``uefi_target``. + + *boot_source_override_enabled* cannot be set to c(continuous) if *boot_source_override_target* set to ``uefi_target`` because this settings is defined in UEFI as a one-time-boot setting. + + Changes to this options do not alter the BIOS persistent boot order configuration. + + This is required if *boot_source_override_target* is ``uefi_target``. + + This is mutually exclusive with *boot_options*. + + + reset_type (optional, str, graceful_restart) + ``none`` Host system is not rebooted and *job_wait* is not applicable. + + ``force_reset`` Forcefully reboot the Host system. + + ``graceful_reset`` Gracefully reboot the Host system. + + + job_wait (optional, bool, True) + Provides the option to wait for job completion. + + This is applicable when *reset_type* is ``force_reset`` or ``graceful_reset``. + + + job_wait_timeout (optional, int, 900) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + resource_id (optional, str, None) + Redfish ID of the resource. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure the system boot options settings. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_options: + - display_name: Hard drive C + enabled: true + - boot_option_reference: NIC.PxeDevice.2-1 + enabled: true + + - name: Configure the boot order settings. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_order: + - Boot0001 + - Boot0002 + - Boot0004 + - Boot0003 + + - name: Configure the boot source override mode. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: legacy + boot_source_override_target: cd + boot_source_override_enabled: once + + - name: Configure the UEFI target settings. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: uefi + boot_source_override_target: uefi_target + uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)" + + - name: Configure the boot source override mode as pxe. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: legacy + boot_source_override_target: pxe + boot_source_override_enabled: continuous + + + +Return Values +------------- + +msg (success, str, Successfully updated the boot settings.) + Successfully updated the boot settings. + + +job (success, dict, {'ActualRunningStartTime': '2019-06-19T00:57:24', 'ActualRunningStopTime': '2019-06-19T01:00:27', 'CompletionTime': '2019-06-19T01:00:27', 'Description': 'Job Instance', 'EndTime': 'TIME_NA', 'Id': 'JID_609237056489', 'JobState': 'Completed', 'JobType': 'BIOSConfiguration', 'Message': 'Job completed successfully.', 'MessageArgs': [], 'MessageId': 'PR19', 'Name': 'Configure: BIOS.Setup.1-1', 'PercentComplete': 100, 'StartTime': '2019-06-19T00:55:05', 'TargetSettingsURI': None}) + Configured job details. + + +boot (success, dict, {'BootOptions': {'Description': 'Collection of BootOptions', 'Members': [{'BootOptionEnabled': False, 'BootOptionReference': 'HardDisk.List.1-1', 'Description': 'Current settings of the Legacy Boot option', 'DisplayName': 'Hard drive C:', 'Id': 'HardDisk.List.1-1', 'Name': 'Legacy Boot option', 'UefiDevicePath': 'VenHw(D6C0639F-C705-4EB9-AA4F-5802D8823DE6)'}], 'Name': 'Boot Options Collection'}, 'BootOrder': ['HardDisk.List.1-1'], 'BootSourceOverrideEnabled': 'Disabled', 'BootSourceOverrideMode': 'Legacy', 'BootSourceOverrideTarget': 'None', 'UefiTargetBootSourceOverride': None}) + Configured boot settings details. + + +error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}}) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst new file mode 100644 index 00000000..fb7f4ead --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst @@ -0,0 +1,259 @@ +.. _idrac_certificates_module: + + +idrac_certificates -- Configure certificates for iDRAC +====================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to generate certificate signing request, import, and export certificates on iDRAC. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + command (optional, str, generate_csr) + ``generate_csr``, generate CSR. This requires *cert_params* and *certificate_path*. This is applicable only for ``HTTPS`` + + ``import``, import the certificate file. This requires *certificate_path*. + + ``export``, export the certificate. This requires *certificate_path*. + + ``reset``, reset the certificate to default settings. This is applicable only for ``HTTPS``. + + + certificate_type (optional, str, HTTPS) + Type of the iDRAC certificate. + + ``HTTPS`` The Dell self-signed SSL certificate. + + ``CA`` Certificate Authority(CA) signed SSL certificate. + + ``CSC`` The custom signed SSL certificate. + + ``CLIENT_TRUST_CERTIFICATE`` Client trust certificate. + + + certificate_path (optional, path, None) + Absolute path of the certificate file if *command* is ``import``. + + Directory path with write permissions if *command* is ``generate_csr`` or ``export``. + + + passphrase (optional, str, None) + The passphrase string if the certificate to be imported is passphrase protected. + + + cert_params (optional, dict, None) + Certificate parameters to generate signing request. + + + common_name (True, str, None) + The common name of the certificate. + + + organization_unit (True, str, None) + The name associated with an organizational unit. For example department name. + + + locality_name (True, str, None) + The city or other location where the entity applying for certification is located. + + + state_name (True, str, None) + The state where the entity applying for certification is located. + + + country_code (True, str, None) + The country code of the country where the entity applying for certification is located. + + + email_address (True, str, None) + The email associated with the CSR. + + + organization_name (True, str, None) + The name associated with an organization. + + + subject_alt_name (optional, list, []) + The alternative domain names associated with the request. + + + + resource_id (optional, str, None) + Redfish ID of the resource. + + + reset (optional, bool, True) + To reset the iDRAC after the certificate operation. + + This is applicable when *command* is ``import`` or ``reset``. + + + wait (optional, int, 300) + Maximum wait time for iDRAC to start after the reset, in seconds. + + This is applicable when *command* is ``import`` or ``reset`` and *reset* is ``True``. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - The certificate operations are supported on iDRAC firmware 5.10.10.00 and above. + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Generate HTTPS certificate signing request + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "generate_csr" + certificate_type: "HTTPS" + certificate_path: "/home/omam/mycerts" + cert_params: + common_name: "sample.domain.com" + organization_unit: "OrgUnit" + locality_name: "Bangalore" + state_name: "Karnataka" + country_code: "IN" + email_address: "admin@domain.com" + organization_name: "OrgName" + subject_alt_name: + - 192.198.2.1 + + - name: Import a HTTPS certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + certificate_type: "HTTPS" + certificate_path: "/path/to/cert.pem" + + - name: Export a HTTPS certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + certificate_type: "HTTPS" + certificate_path: "/home/omam/mycert_dir" + + - name: Import a CSC certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + certificate_type: "CSC" + certificate_path: "/path/to/cert.pem" + + - name: Export a Client trust certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + certificate_type: "CLIENT_TRUST_CERTIFICATE" + certificate_path: "/home/omam/mycert_dir" + + + +Return Values +------------- + +msg (always, str, Successfully performed the operation generate_csr.) + Status of the certificate configuration operation. + + +certificate_path (when I(command) is C(export) or C(generate_csr), str, /home/ansible/myfiles/cert.pem) + The csr or exported certificate file path + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst new file mode 100644 index 00000000..99c5f147 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst @@ -0,0 +1,224 @@ +.. _idrac_firmware_module: + + +idrac_firmware -- Firmware update from a repository on a network share (CIFS, NFS, HTTP, HTTPS, FTP) +==================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Update the Firmware by connecting to a network share (CIFS, NFS, HTTP, HTTPS, FTP) that contains a catalog of available updates. + +Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs. + +All applicable updates contained in the repository are applied to the system. + +This feature is available only with iDRAC Enterprise License. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (True, str, None) + Network share path of update repository. CIFS, NFS, HTTP, HTTPS and FTP share types are supported. + + + share_user (optional, str, None) + Network share user in the format 'user@domain' or 'domain\\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + + share_password (optional, str, None) + Network share user password. This option is mandatory for CIFS Network Share. + + + share_mnt (optional, str, None) + Local mount path of the network share with read-write permission for ansible user. + + This option is not applicable for HTTP, HTTPS, and FTP shares. + + + job_wait (optional, bool, True) + Whether to wait for job completion or not. + + + catalog_file_name (optional, str, Catalog.xml) + Catalog file name relative to the *share_name*. + + + ignore_cert_warning (optional, bool, True) + Specifies if certificate warnings are ignored when HTTPS share is used. If ``True`` option is set, then the certificate warnings are ignored. + + + apply_update (optional, bool, True) + If *apply_update* is set to ``True``, then the packages are applied. + + If *apply_update* is set to ``False``, no updates are applied, and a catalog report of packages is generated and returned. + + + reboot (optional, bool, False) + Provides the option to apply the update packages immediately or in the next reboot. + + If *reboot* is set to ``True``, then the packages are applied immediately. + + If *reboot* is set to ``False``, then the packages are staged and applied in the next reboot. + + Packages that do not require a reboot are applied immediately irrespective of I (reboot). + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - Module will report success based on the iDRAC firmware update parent job status if there are no individual component jobs present. + - For server with iDRAC firmware 5.00.00.00 and later, if the repository contains unsupported packages, then the module will return success with a proper message. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update firmware from repository on a NFS Share + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.0:/share" + reboot: True + job_wait: True + apply_update: True + catalog_file_name: "Catalog.xml" + + - name: Update firmware from repository on a CIFS Share + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "full_cifs_path" + share_user: "share_user" + share_password: "share_password" + reboot: True + job_wait: True + apply_update: True + catalog_file_name: "Catalog.xml" + + - name: Update firmware from repository on a HTTP + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://downloads.dell.com" + reboot: True + job_wait: True + apply_update: True + + - name: Update firmware from repository on a HTTPS + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "https://downloads.dell.com" + reboot: True + job_wait: True + apply_update: True + + - name: Update firmware from repository on a FTP + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "ftp://ftp.dell.com" + reboot: True + job_wait: True + apply_update: True + + + +Return Values +------------- + +msg (always, str, Successfully updated the firmware.) + Overall firmware update status. + + +update_status (success, dict, AnsibleMapping([('InstanceID', 'JID_XXXXXXXXXXXX'), ('JobState', 'Completed'), ('Message', 'Job completed successfully.'), ('MessageId', 'REDXXX'), ('Name', 'Repository Update'), ('JobStartTime', 'NA'), ('Status', 'Success')])) + Firmware Update job and progress details from the iDRAC. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst new file mode 100644 index 00000000..b6eda6ae --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst @@ -0,0 +1,121 @@ +.. _idrac_firmware_info_module: + + +idrac_firmware_info -- Get Firmware Inventory +============================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Get Firmware Inventory. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Get Installed Firmware Inventory + dellemc.openmanage.idrac_firmware_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + + + +Return Values +------------- + +msg (always, str, Successfully fetched the firmware inventory details.) + Fetching the firmware inventory details. + + +firmware_info (success, dict, AnsibleMapping([('Firmware', [AnsibleMapping([('BuildNumber', '0'), ('Classifications', '10'), ('ComponentID', '102573'), ('ComponentType', 'FRMW'), ('DeviceID', None), ('ElementName', 'Power Supply.Slot.1'), ('FQDD', 'PSU.Slot.1'), ('HashValue', None), ('IdentityInfoType', 'OrgID:ComponentType:ComponentID'), ('IdentityInfoValue', 'DCIM:firmware:102573'), ('InstallationDate', '2018-11-22T03:58:23Z'), ('InstanceID', 'DCIM:INSTALLED#0x15__PSU.Slot.1'), ('IsEntity', 'true'), ('Key', 'DCIM:INSTALLED#0x15__PSU.Slot.1'), ('MajorVersion', '0'), ('MinorVersion', '3'), ('RevisionNumber', '67'), ('RevisionString', None), ('Status', 'Installed'), ('SubDeviceID', None), ('SubVendorID', None), ('Updateable', 'true'), ('VendorID', None), ('VersionString', '00.3D.67'), ('impactsTPMmeasurements', 'false')])])])) + Details of the firmware. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst new file mode 100644 index 00000000..6b4cfd4e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst @@ -0,0 +1,127 @@ +.. _idrac_lifecycle_controller_job_status_info_module: + + +idrac_lifecycle_controller_job_status_info -- Get the status of a Lifecycle Controller job +========================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module shows the status of a specific Lifecycle Controller job using its job ID. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + job_id (True, str, None) + JOB ID in the format "JID_123456789012". + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Show status of a Lifecycle Control job + dellemc.openmanage.idrac_lifecycle_controller_job_status_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + job_id: "JID_1234567890" + + + +Return Values +------------- + +msg (always, str, Successfully fetched the job info.) + Overall status of the job facts operation. + + +job_info (success, dict, AnsibleMapping([('ElapsedTimeSinceCompletion', '8742'), ('InstanceID', 'JID_844222910040'), ('JobStartTime', 'NA'), ('JobStatus', 'Completed'), ('JobUntilTime', 'NA'), ('Message', 'Job completed successfully.'), ('MessageArguments', 'NA'), ('MessageID', 'RED001'), ('Name', 'update:DCIM:INSTALLED#iDRAC.Embedded.1-1#IDRACinfo'), ('PercentComplete', '100'), ('Status', 'Success')])) + Displays the status of a Lifecycle Controller job. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst new file mode 100644 index 00000000..79bb43b2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst @@ -0,0 +1,136 @@ +.. _idrac_lifecycle_controller_jobs_module: + + +idrac_lifecycle_controller_jobs -- Delete the Lifecycle Controller Jobs +======================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Delete a Lifecycle Controller job using its job ID or delete all jobs. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + job_id (optional, str, None) + Job ID of the specific job to be deleted. + + All the jobs in the job queue are deleted if this option is not specified. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Delete Lifecycle Controller job queue + dellemc.openmanage.idrac_lifecycle_controller_jobs: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + + - name: Delete Lifecycle Controller job using a job ID + dellemc.openmanage.idrac_lifecycle_controller_jobs: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + job_id: "JID_801841929470" + + + +Return Values +------------- + +msg (always, str, Successfully deleted the job.) + Status of the delete operation. + + +status (success, dict, AnsibleMapping([('Message', 'The specified job was deleted'), ('MessageID', 'SUP020'), ('ReturnValue', '0')])) + Details of the delete operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst new file mode 100644 index 00000000..f2d20a24 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst @@ -0,0 +1,161 @@ +.. _idrac_lifecycle_controller_logs_module: + + +idrac_lifecycle_controller_logs -- Export Lifecycle Controller logs to a network share or local path. +===================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Export Lifecycle Controller logs to a given network share or local path. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (True, str, None) + Network share or local path. + + CIFS, NFS network share types are supported. + + + share_user (optional, str, None) + Network share user in the format 'user@domain' or 'domain\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + + share_password (optional, str, None) + Network share user password. This option is mandatory for CIFS Network Share. + + + job_wait (optional, bool, True) + Whether to wait for the running job completion or not. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Exporting data to a local share is supported only on iDRAC9-based PowerEdge Servers and later. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Export lifecycle controller logs to NFS share. + dellemc.openmanage.idrac_lifecycle_controller_logs: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.0:/nfsfileshare" + + - name: Export lifecycle controller logs to CIFS share. + dellemc.openmanage.idrac_lifecycle_controller_logs: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: "share_user_name" + share_password: "share_user_pwd" + + - name: Export lifecycle controller logs to LOCAL path. + dellemc.openmanage.idrac_lifecycle_controller_logs: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "/example/export_lc" + + + +Return Values +------------- + +msg (always, str, Successfully exported the lifecycle controller logs.) + Status of the export lifecycle controller logs job. + + +lc_logs_status (success, dict, AnsibleMapping([('ElapsedTimeSinceCompletion', '0'), ('InstanceID', 'JID_274774785395'), ('JobStartTime', 'NA'), ('JobStatus', 'Completed'), ('JobUntilTime', 'NA'), ('Message', 'LCL Export was successful'), ('MessageArguments', 'NA'), ('MessageID', 'LC022'), ('Name', 'LC Export'), ('PercentComplete', '100'), ('Status', 'Success'), ('file', '192.168.0.0:/nfsfileshare/190.168.0.1_20210728_133437_LC_Log.log'), ('retval', True)])) + Status of the export operation along with job details and file path. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst new file mode 100644 index 00000000..9757ab8e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst @@ -0,0 +1,122 @@ +.. _idrac_lifecycle_controller_status_info_module: + + +idrac_lifecycle_controller_status_info -- Get the status of the Lifecycle Controller +==================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module shows the status of the Lifecycle Controller on a Dell EMC PowerEdge server. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Show status of the Lifecycle Controller + dellemc.openmanage.idrac_lifecycle_controller_status_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + + + +Return Values +------------- + +msg (always, str, Successfully fetched the lifecycle controller status.) + Overall status of fetching lifecycle controller status. + + +lc_status_info (success, dict, AnsibleMapping([('msg', AnsibleMapping([('LCReady', True), ('LCStatus', 'Ready')]))])) + Displays the status of the Lifecycle Controller on a Dell EMC PowerEdge server. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst new file mode 100644 index 00000000..d565eae7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst @@ -0,0 +1,264 @@ +.. _idrac_network_module: + + +idrac_network -- Configures the iDRAC network attributes +======================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure iDRAC network settings. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (optional, str, None) + (deprecated)Network share or a local path. + + This option is deprecated and will be removed in the later version. + + + share_user (optional, str, None) + (deprecated)Network share user name. Use the format 'user@domain' or 'domain\user' if user is part of a domain. This option is mandatory for CIFS share. + + This option is deprecated and will be removed in the later version. + + + share_password (optional, str, None) + (deprecated)Network share user password. This option is mandatory for CIFS share. + + This option is deprecated and will be removed in the later version. + + + share_mnt (optional, str, None) + (deprecated)Local mount path of the network share with read-write permission for ansible user. This option is mandatory for network shares. + + This option is deprecated and will be removed in the later version. + + + setup_idrac_nic_vlan (optional, str, None) + Allows to configure VLAN on iDRAC. + + + register_idrac_on_dns (optional, str, None) + Registers iDRAC on a Domain Name System (DNS). + + + dns_idrac_name (optional, str, None) + Name of the DNS to register iDRAC. + + + auto_config (optional, str, None) + Allows to enable or disable auto-provisioning to automatically acquire domain name from DHCP. + + + static_dns (optional, str, None) + Enter the static DNS domain name. + + + vlan_id (optional, int, None) + Enter the VLAN ID. The VLAN ID must be a number from 1 through 4094. + + + vlan_priority (optional, int, None) + Enter the priority for the VLAN ID. The priority value must be a number from 0 through 7. + + + enable_nic (optional, str, None) + Allows to enable or disable the Network Interface Controller (NIC) used by iDRAC. + + + nic_selection (optional, str, None) + Select one of the available NICs. + + + failover_network (optional, str, None) + Select one of the remaining LOMs. If a network fails, the traffic is routed through the failover network. + + + auto_detect (optional, str, None) + Allows to auto detect the available NIC types used by iDRAC. + + + auto_negotiation (optional, str, None) + Allows iDRAC to automatically set the duplex mode and network speed. + + + network_speed (optional, str, None) + Select the network speed for the selected NIC. + + + duplex_mode (optional, str, None) + Select the type of data transmission for the NIC. + + + nic_mtu (optional, int, None) + Maximum Transmission Unit of the NIC. + + + ip_address (optional, str, None) + Enter a valid iDRAC static IPv4 address. + + + enable_dhcp (optional, str, None) + Allows to enable or disable Dynamic Host Configuration Protocol (DHCP) in iDRAC. + + + enable_ipv4 (optional, str, None) + Allows to enable or disable IPv4 configuration. + + + dns_from_dhcp (optional, str, None) + Allows to enable DHCP to obtain DNS server address. + + + static_dns_1 (optional, str, None) + Enter the preferred static DNS server IPv4 address. + + + static_dns_2 (optional, str, None) + Enter the preferred static DNS server IPv4 address. + + + static_gateway (optional, str, None) + Enter the static IPv4 gateway address to iDRAC. + + + static_net_mask (optional, str, None) + Enter the static IP subnet mask to iDRAC. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure iDRAC network settings + dellemc.openmanage.idrac_network: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + register_idrac_on_dns: Enabled + dns_idrac_name: None + auto_config: None + static_dns: None + setup_idrac_nic_vlan: Enabled + vlan_id: 0 + vlan_priority: 1 + enable_nic: Enabled + nic_selection: Dedicated + failover_network: T_None + auto_detect: Disabled + auto_negotiation: Enabled + network_speed: T_1000 + duplex_mode: Full + nic_mtu: 1500 + ip_address: "192.168.0.1" + enable_dhcp: Enabled + enable_ipv4: Enabled + static_dns_1: "192.168.0.1" + static_dns_2: "192.168.0.1" + dns_from_dhcp: Enabled + static_gateway: None + static_net_mask: None + + + +Return Values +------------- + +msg (always, str, Successfully configured the idrac network settings.) + Successfully configured the idrac network settings. + + +network_status (success, dict, AnsibleMapping([('@odata.context', '/redfish/v1/$metadata#DellJob.DellJob'), ('@odata.id', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_856418531008'), ('@odata.type', '#DellJob.v1_0_2.DellJob'), ('CompletionTime', '2020-03-31T03:04:15'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_856418531008'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + Status of the Network settings operation job. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst new file mode 100644 index 00000000..b3a18dce --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst @@ -0,0 +1,141 @@ +.. _idrac_os_deployment_module: + + +idrac_os_deployment -- Boot to a network ISO image +================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Boot to a network ISO image. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + share_name (True, str, None) + CIFS or NFS Network share. + + + share_user (optional, str, None) + Network share user in the format 'user@domain' or 'domain\\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + + share_password (optional, str, None) + Network share user password. This option is mandatory for CIFS Network Share. + + + iso_image (True, str, None) + Network ISO name. + + + expose_duration (optional, int, 1080) + It is the time taken in minutes for the ISO image file to be exposed as a local CD-ROM device to the host server. When the time expires, the ISO image gets automatically detached. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Boot to Network ISO + dellemc.openmanage.idrac_os_deployment: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.0:/nfsfileshare" + iso_image: "unattended_os_image.iso" + expose_duration: 180 + + + +Return Values +------------- + +msg (on error, str, Failed to boot to network iso) + Over all device information status. + + +boot_status (always, dict, AnsibleMapping([('DeleteOnCompletion', 'false'), ('InstanceID', 'DCIM_OSDConcreteJob:1'), ('JobName', 'BootToNetworkISO'), ('JobStatus', 'Success'), ('Message', 'The command was successful.'), ('MessageID', 'OSD1'), ('Name', 'BootToNetworkISO'), ('Status', 'Success'), ('file', '192.168.0.0:/nfsfileshare/unattended_os_image.iso'), ('retval', True)])) + Details of the boot to network ISO image operation. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Jagadeesh N V (@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst new file mode 100644 index 00000000..64f8f4ea --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst @@ -0,0 +1,425 @@ +.. _idrac_redfish_storage_controller_module: + + +idrac_redfish_storage_controller -- Configures the physical disk, virtual disk, and storage controller settings +=============================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows the users to configure the settings of the physical disk, virtual disk, and storage controller. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + command (optional, str, AssignSpare) + These actions may require a system reset, depending on the capabilities of the controller. + + ``ResetConfig`` - Deletes all the virtual disks and unassigns all hot spares on physical disks. *controller_id* is required for this operation. + + ``AssignSpare`` - Assigns a physical disk as a dedicated or global hot spare for a virtual disk. *target* is required for this operation. + + ``SetControllerKey`` - Sets the key on controllers, which is used to encrypt the drives in Local Key Management(LKM). *controller_id*, *key*, and *key_id* are required for this operation. + + ``RemoveControllerKey`` - Deletes the encryption key on the controller. *controller_id* is required for this operation. + + ``ReKey`` - Resets the key on the controller and it always reports as changes found when check mode is enabled. *controller_id*, *old_key*, *key_id*, and *key* is required for this operation. + + ``UnassignSpare`` - To unassign the Global or Dedicated hot spare. *target* is required for this operation. + + ``EnableControllerEncryption`` - To enable Local Key Management (LKM) or Secure Enterprise Key Manager (SEKM) on controllers that support encryption of the drives. *controller_id*, *key*, and *key_id* are required for this operation. + + ``BlinkTarget`` - Blinks the target virtual drive or physical disk and it always reports as changes found when check mode is enabled. *target* or *volume_id* is required for this operation. + + ``UnBlinkTarget`` - Unblink the target virtual drive or physical disk and and it always reports as changes found when check mode is enabled. *target* or *volume_id* is required for this operation. + + ``ConvertToRAID`` - Converts the disk form non-Raid to Raid. *target* is required for this operation. + + ``ConvertToNonRAID`` - Converts the disk form Raid to non-Raid. *target* is required for this operation. + + ``ChangePDStateToOnline`` - To set the disk status to online. *target* is required for this operation. + + ``ChangePDStateToOffline`` - To set the disk status to offline. *target* is required for this operation. + + ``LockVirtualDisk`` - To encrypt the virtual disk. *volume_id* is required for this operation. + + + target (optional, list, None) + Fully Qualified Device Descriptor (FQDD) of the target physical drive. + + This is mandatory when *command* is ``AssignSpare``, ``UnassisgnSpare``, ``ChangePDStateToOnline``, ``ChangePDStateToOffline``, ``ConvertToRAID``, or ``ConvertToNonRAID``. + + If *volume_id* is not specified or empty, this physical drive will be assigned as a global hot spare when *command* is ``AssignSpare``. + + Notes: Global or Dedicated hot spare can be assigned only once for a physical disk, Re-assign cannot be done when *command* is ``AssignSpare``. + + + volume_id (optional, list, None) + Fully Qualified Device Descriptor (FQDD) of the volume. + + Applicable if *command* is ``AssignSpare``, ``BlinkTarget``, ``UnBlinkTarget`` or ``LockVirtualDisk``. + + *volume_id* or *target* is required when the *command* is ``BlinkTarget`` or ``UnBlinkTarget``, if both are specified *target* is considered. + + To know the number of volumes to which a hot spare can be assigned, refer iDRAC Redfish API documentation. + + + controller_id (optional, str, None) + Fully Qualified Device Descriptor (FQDD) of the storage controller. For example-'RAID.Slot.1-1'. + + This option is mandatory when *command* is ``ResetConfig``, ``SetControllerKey``, ``RemoveControllerKey``, ``ReKey``, or ``EnableControllerEncryption``. + + + key (optional, str, None) + A new security key passphrase that the encryption-capable controller uses to create the encryption key. The controller uses the encryption key to lock or unlock access to the Self-Encrypting Drive (SED). Only one encryption key can be created for each controller. + + This is mandatory when *command* is ``SetControllerKey``, ``ReKey``, or ``EnableControllerEncryption`` and when *mode* is ``LKM``. + + The length of the key can be a maximum of 32 characters in length, where the expanded form of the special character is counted as a single character. + + The key must contain at least one character from each of the character classes: uppercase, lowercase, number, and special character. + + + key_id (optional, str, None) + This is a user supplied text label associated with the passphrase. + + This is mandatory when *command* is ``SetControllerKey``, ``ReKey``, or ``EnableControllerEncryption`` and when *mode* is ``LKM``. + + The length of *key_id* can be a maximum of 32 characters in length and should not have any spaces. + + + old_key (optional, str, None) + Security key passphrase used by the encryption-capable controller. + + This option is mandatory when *command* is ``ReKey`` and *mode* is ``LKM``. + + + mode (optional, str, LKM) + Encryption mode of the encryption capable controller. + + This option is applicable only when *command* is ``ReKey`` or ``EnableControllerEncryption``. + + ``SEKM`` requires secure enterprise key manager license on the iDRAC. + + ``LKM`` to choose mode as local key mode. + + + job_wait (optional, bool, False) + Provides the option if the module has to wait for the job to be completed. + + + job_wait_timeout (optional, int, 120) + The maximum wait time of job completion in seconds before the job tracking is stopped. + + This option is applicable when *job_wait* is ``True``. + + + baseuri (True, str, None) + IP address of the target out-of-band controller. For example- :. + + + username (True, str, None) + Username of the target out-of-band controller. + + + password (True, str, None) + Password of the target out-of-band controller. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell iDRAC. + - This module always reports as changes found when ``ReKey``, ``BlinkTarget``, and ``UnBlinkTarget``. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Assign dedicated hot spare + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + volume_id: + - "Disk.Virtual.0:RAID.Slot.1-1" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - assign_dedicated_hot_spare + + - name: Assign global hot spare + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - assign_global_hot_spare + + - name: Unassign hot spare + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + command: UnassignSpare + tags: + - un-assign-hot-spare + + - name: Set controller encryption key + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "SetControllerKey" + controller_id: "RAID.Slot.1-1" + key: "PassPhrase@123" + key_id: "mykeyid123" + tags: + - set_controller_key + + - name: Rekey in LKM mode + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + key: "NewPassPhrase@123" + key_id: "newkeyid123" + old_key: "OldPassPhrase@123" + tags: + - rekey_lkm + + - name: Rekey in SEKM mode + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + mode: "SEKM" + tags: + - rekey_sekm + + - name: Remove controller key + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "RemoveControllerKey" + controller_id: "RAID.Slot.1-1" + tags: + - remove_controller_key + + - name: Reset controller configuration + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ResetConfig" + controller_id: "RAID.Slot.1-1" + tags: + - reset_config + + - name: Enable controller encryption + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "EnableControllerEncryption" + controller_id: "RAID.Slot.1-1" + mode: "LKM" + key: "your_Key@123" + key_id: "your_Keyid@123" + tags: + - enable-encrypt + + - name: Blink physical disk. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: BlinkTarget + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - blink-target + + - name: Blink virtual drive. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: BlinkTarget + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + tags: + - blink-volume + + - name: Unblink physical disk. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: UnBlinkTarget + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - unblink-target + + - name: Unblink virtual drive. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: UnBlinkTarget + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + tags: + - unblink-drive + + - name: Convert physical disk to RAID + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ConvertToRAID" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - convert-raid + + - name: Convert physical disk to non-RAID + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ConvertToNonRAID" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - convert-non-raid + + - name: Change physical disk state to online. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ChangePDStateToOnline" + target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - pd-state-online + + - name: Change physical disk state to offline. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ChangePDStateToOnline" + target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - pd-state-offline + + - name: Lock virtual drive + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "LockVirtualDisk" + volume_id: "Disk.Virtual.0:RAID.SL.3-1" + tags: + - lock + + + +Return Values +------------- + +msg (always, str, Successfully submitted the job that performs the AssignSpare operation) + Overall status of the storage controller configuration operation. + + +task (success, dict, AnsibleMapping([('id', 'JID_XXXXXXXXXXXXX'), ('uri', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX')])) + ID and URI resource of the job created. + + +status (always, dict, AnsibleMapping([('ActualRunningStartTime', '2022-02-09T04:42:41'), ('ActualRunningStopTime', '2022-02-09T04:44:00'), ('CompletionTime', '2022-02-09T04:44:00'), ('Description', 'Job Instance'), ('EndTime', 'TIME_NA'), ('Id', 'JID_444033604418'), ('JobState', 'Completed'), ('JobType', 'RealTimeNoRebootConfiguration'), ('Message', 'Job completed successfully.'), ('MessageArgs', []), ('MessageId', 'PR19'), ('Name', 'Configure: RAID.Integrated.1-1'), ('PercentComplete', 100), ('StartTime', '2022-02-09T04:42:40'), ('TargetSettingsURI', None)])) + status of the submitted job. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to run the method because the requested HTTP method is not allowed.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'iDRAC.1.6.SYS402'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'Enter a valid HTTP method and retry the operation. For information about valid methods, see the Redfish Users Guide available on the support site.'), ('Severity', 'Informational')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))])) + Details of a http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V (@jagadeeshnv) +- Felix Stephen (@felixs88) +- Husniya Hameed (@husniya_hameed) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst new file mode 100644 index 00000000..86728701 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst @@ -0,0 +1,125 @@ +.. _idrac_reset_module: + + +idrac_reset -- Reset iDRAC +========================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module resets iDRAC. + +iDRAC is not accessible for some time after running this module. It is recommended to wait for some time, before trying to connect to iDRAC. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Reset iDRAC + dellemc.openmanage.idrac_reset: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + idrac_port: 443 + ca_path: "/path/to/ca_cert.pem" + + + +Return Values +------------- + +msg (always, str, Successfully performed iDRAC reset.) + Status of the iDRAC reset operation. + + +reset_status (always, dict, AnsibleMapping([('idracreset', AnsibleMapping([('Data', AnsibleMapping([('StatusCode', 204)])), ('Message', 'none'), ('Status', 'Success'), ('StatusCode', 204), ('retval', True)]))])) + Details of iDRAC reset operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst new file mode 100644 index 00000000..f013c229 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst @@ -0,0 +1,386 @@ +.. _idrac_server_config_profile_module: + + +idrac_server_config_profile -- Export or Import iDRAC Server Configuration Profile (SCP) +======================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Export the Server Configuration Profile (SCP) from the iDRAC or import from a network share (CIFS, NFS, HTTP, HTTPS) or a local file. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + command (optional, str, export) + If ``import``, the module performs SCP import operation. + + If ``export``, the module performs SCP export operation. + + If ``preview``, the module performs SCP preview operation. + + + job_wait (True, bool, None) + Whether to wait for job completion or not. + + + share_name (True, str, None) + Network share or local path. + + CIFS, NFS, HTTP, and HTTPS network share types are supported. + + + share_user (optional, str, None) + Network share user in the format 'user@domain' or 'domain\\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share. + + + share_password (optional, str, None) + Network share user password. This option is mandatory for CIFS Network Share. + + + scp_file (optional, str, None) + Name of the server configuration profile (SCP) file. + + This option is mandatory if *command* is ``import``. + + The default format _YYmmdd_HHMMSS_scp is used if this option is not specified for ``import``. + + *export_format* is used if the valid extension file is not provided for ``import``. + + + scp_components (optional, str, ALL) + If ``ALL``, this module exports or imports all components configurations from SCP file. + + If ``IDRAC``, this module exports or imports iDRAC configuration from SCP file. + + If ``BIOS``, this module exports or imports BIOS configuration from SCP file. + + If ``NIC``, this module exports or imports NIC configuration from SCP file. + + If ``RAID``, this module exports or imports RAID configuration from SCP file. + + + shutdown_type (optional, str, Graceful) + This option is applicable for ``import`` command. + + If ``Graceful``, the job gracefully shuts down the operating system and turns off the server. + + If ``Forced``, it forcefully shuts down the server. + + If ``NoReboot``, the job that applies the SCP will pause until you manually reboot the server. + + + end_host_power_state (optional, str, On) + This option is applicable for ``import`` command. + + If ``On``, End host power state is on. + + If ``Off``, End host power state is off. + + + export_format (optional, str, XML) + Specify the output file format. This option is applicable for ``export`` command. + + + export_use (optional, str, Default) + Specify the type of server configuration profile (SCP) to be exported. This option is applicable for ``export`` command. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports ``check_mode``. + - To import Server Configuration Profile (SCP) on the iDRAC7 and iDRAC8-based servers, the servers must have iDRAC Enterprise license or later. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Export SCP with IDRAC components in JSON format to a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + scp_components: IDRAC + scp_file: example_file + export_format: JSON + export_use: Clone + job_wait: True + + - name: Import SCP with IDRAC components in JSON format from a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + command: import + scp_components: "IDRAC" + scp_file: example_file.json + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: False + + - name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + scp_components: "BIOS" + export_format: XML + export_use: Default + job_wait: True + + - name: Import SCP with BIOS components in XML format from a NFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + command: import + scp_components: "BIOS" + scp_file: 192.168.0.1_20210618_162856.xml + shutdown_type: NoReboot + end_host_power_state: "Off" + job_wait: False + + - name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username@domain + share_password: share_password + share_mnt: /mnt/cifs + scp_file: example_file.xml + scp_components: "RAID" + export_format: XML + export_use: Default + job_wait: True + + - name: Import SCP with RAID components in XML format from a CIFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username + share_password: share_password + share_mnt: /mnt/cifs + command: import + scp_components: "RAID" + scp_file: example_file.xml + shutdown_type: Forced + end_host_power_state: "On" + job_wait: True + + - name: Export SCP with ALL components in JSON format to a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://192.168.0.3/share" + share_user: share_username + share_password: share_password + scp_file: example_file.json + scp_components: ALL + export_format: JSON + job_wait: False + + - name: Import SCP with ALL components in JSON format from a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: import + share_name: "http://192.168.0.3/share" + share_user: share_username + share_password: share_password + scp_file: example_file.json + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: True + + - name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "https://192.168.0.4/share" + share_user: share_username + share_password: share_password + scp_components: ALL + export_format: XML + export_use: Replace + job_wait: True + + - name: Import SCP with ALL components in XML format from a HTTPS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: import + share_name: "https://192.168.0.4/share" + share_user: share_username + share_password: share_password + scp_file: 192.168.0.1_20160618_164647.xml + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: False + + - name: Preview SCP with ALL components in XML format from a CIFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username + share_password: share_password + command: preview + scp_components: "ALL" + scp_file: example_file.xml + job_wait: True + + - name: Preview SCP with ALL components in JSON format from a NFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + command: preview + scp_components: "IDRAC" + scp_file: example_file.xml + job_wait: True + + - name: Preview SCP with ALL components in XML format from a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://192.168.0.1/http-share" + share_user: share_username + share_password: share_password + command: preview + scp_components: "ALL" + scp_file: example_file.xml + job_wait: True + + - name: Preview SCP with ALL components in XML format from a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + command: preview + scp_components: "IDRAC" + scp_file: example_file.json + job_wait: False + + + +Return Values +------------- + +msg (always, str, Successfully imported the Server Configuration Profile) + Status of the import or export SCP job. + + +scp_status (success, dict, AnsibleMapping([('Id', 'JID_XXXXXXXXX'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'XXX123'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + SCP operation job and progress details from the iDRAC. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst new file mode 100644 index 00000000..31d69bd3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst @@ -0,0 +1,160 @@ +.. _idrac_syslog_module: + + +idrac_syslog -- Enable or disable the syslog on iDRAC +===================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to enable or disable the iDRAC syslog. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + syslog (optional, str, Enabled) + Enables or disables an iDRAC syslog. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + share_name (True, str, None) + Network share or a local path. + + + share_user (optional, str, None) + Network share user name. Use the format 'user@domain' or 'domain\\user' if user is part of a domain. This option is mandatory for CIFS share. + + + share_password (optional, str, None) + Network share user password. This option is mandatory for CIFS share. + + + share_mnt (optional, str, None) + Local mount path of the network share with read-write permission for ansible user. This option is mandatory for network shares. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Enable iDRAC syslog + dellemc.openmanage.idrac_syslog: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + share_password: "share_user_pwd" + share_user: "share_user_name" + share_mnt: "/mnt/share" + syslog: "Enabled" + + - name: Disable iDRAC syslog + dellemc.openmanage.idrac_syslog: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + share_password: "share_user_pwd" + share_user: "share_user_name" + share_mnt: "/mnt/share" + syslog: "Disabled" + + + +Return Values +------------- + +msg (always, str, Successfully fetch the syslogs.) + Overall status of the syslog export operation. + + +syslog_status (success, dict, AnsibleMapping([('@odata.context', '/redfish/v1/$metadata#DellJob.DellJob'), ('@odata.id', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485'), ('@odata.type', '#DellJob.v1_0_2.DellJob'), ('CompletionTime', '2020-03-27T02:27:45'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_852940632485'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + Job details of the syslog operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst new file mode 100644 index 00000000..9d0bade4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst @@ -0,0 +1,121 @@ +.. _idrac_system_info_module: + + +idrac_system_info -- Get the PowerEdge Server System Inventory +============================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Get the PowerEdge Server System Inventory. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Get System Inventory + dellemc.openmanage.idrac_system_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + + + +Return Values +------------- + +msg (always, str, Successfully fetched the system inventory details.) + Overall system inventory information status. + + +system_info (success, dict, AnsibleMapping([('BIOS', [AnsibleMapping([('BIOSReleaseDate', '11/26/2019'), ('FQDD', 'BIOS.Setup.1-1'), ('InstanceID', 'DCIM:INSTALLED#741__BIOS.Setup.1-1'), ('Key', 'DCIM:INSTALLED#741__BIOS.Setup.1-1'), ('SMBIOSPresent', 'True'), ('VersionString', '2.4.8')])])])) + Details of the PowerEdge Server System Inventory. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Rajeev Arakkal (@rajeevarakkal) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst new file mode 100644 index 00000000..d05e94fe --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst @@ -0,0 +1,174 @@ +.. _idrac_timezone_ntp_module: + + +idrac_timezone_ntp -- Configures time zone and NTP on iDRAC +=========================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure time zone and NTP on iDRAC. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- omsdk >= 1.2.488 +- python >= 3.8.6 + + + +Parameters +---------- + + setup_idrac_timezone (optional, str, None) + Allows to configure time zone on iDRAC. + + + enable_ntp (optional, str, None) + Allows to enable or disable NTP on iDRAC. + + + ntp_server_1 (optional, str, None) + The IP address of the NTP server 1. + + + ntp_server_2 (optional, str, None) + The IP address of the NTP server 2. + + + ntp_server_3 (optional, str, None) + The IP address of the NTP server 3. + + + share_name (optional, str, None) + (deprecated)Network share or a local path. + + This option is deprecated and will be removed in the later version. + + + share_user (optional, str, None) + (deprecated)Network share user name. Use the format 'user@domain' or 'domain\user' if user is part of a domain. This option is mandatory for CIFS share. + + This option is deprecated and will be removed in the later version. + + + share_password (optional, str, None) + (deprecated)Network share user password. This option is mandatory for CIFS share. + + This option is deprecated and will be removed in the later version. + + + share_mnt (optional, str, None) + (deprecated)Local mount path of the network share with read-write permission for ansible user. This option is mandatory for network shares. + + This option is deprecated and will be removed in the later version. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module requires 'Administrator' privilege for *idrac_user*. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure time zone and NTP on iDRAC + dellemc.openmanage.idrac_timezone_ntp: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + setup_idrac_timezone: "UTC" + enable_ntp: Enabled + ntp_server_1: "190.168.0.1" + ntp_server_2: "190.168.0.2" + ntp_server_3: "190.168.0.3" + + + +Return Values +------------- + +msg (always, str, Successfully configured the iDRAC time settings.) + Overall status of the timezone and ntp configuration. + + +timezone_ntp_status (success, dict, AnsibleMapping([('@odata.context', '/redfish/v1/$metadata#DellJob.DellJob'), ('@odata.id', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_861801613971'), ('@odata.type', '#DellJob.v1_0_0.DellJob'), ('CompletionTime', '2020-04-06T19:06:01'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_861801613971'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)])) + Job details of the time zone setting operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + +- This module will be removed in version + . + *[deprecated]* + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Anooja Vardhineni (@anooja-vardhineni) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst new file mode 100644 index 00000000..e404582b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst @@ -0,0 +1,233 @@ +.. _idrac_user_module: + + +idrac_user -- Configure settings for user accounts +================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to perform the following, + +Add a new user account. + +Edit a user account. + +Enable or Disable a user account. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + Select ``present`` to create or modify a user account. + + Select ``absent`` to remove a user account. + + Ensure Lifecycle Controller is available because the user operation uses the capabilities of Lifecycle Controller. + + + user_name (True, str, None) + Provide the *user_name* of the account to be created, deleted or modified. + + + user_password (optional, str, None) + Provide the password for the user account. The password can be changed when the user account is modified. + + To ensure security, the *user_password* must be at least eight characters long and must contain lowercase and upper-case characters, numbers, and special characters. + + + new_user_name (optional, str, None) + Provide the *user_name* for the account to be modified. + + + privilege (optional, str, None) + Following are the role-based privileges. + + A user with ``Administrator`` privilege can log in to iDRAC, and then configure iDRAC, configure users, clear logs, control and configure system, access virtual console, access virtual media, test alerts, and execute debug commands. + + A user with ``Operator`` privilege can log in to iDRAC, and then configure iDRAC, control and configure system, access virtual console, access virtual media, and execute debug commands. + + A user with ``ReadOnly`` privilege can only log in to iDRAC. + + A user with ``None``, no privileges assigned. + + + ipmi_lan_privilege (optional, str, None) + The Intelligent Platform Management Interface LAN privilege level assigned to the user. + + + ipmi_serial_privilege (optional, str, None) + The Intelligent Platform Management Interface Serial Port privilege level assigned to the user. + + This option is only applicable for rack and tower servers. + + + enable (optional, bool, None) + Provide the option to enable or disable a user from logging in to iDRAC. + + + sol_enable (optional, bool, None) + Enables Serial Over Lan (SOL) for an iDRAC user. + + + protocol_enable (optional, bool, None) + Enables protocol for the iDRAC user. + + + authentication_protocol (optional, str, None) + This option allows to configure one of the following authentication protocol types to authenticate the iDRAC user. + + Secure Hash Algorithm ``SHA``. + + Message Digest 5 ``MD5``. + + An authentication protocol is not configured if ``None`` is selected. + + + privacy_protocol (optional, str, None) + This option allows to configure one of the following privacy encryption protocols for the iDRAC user. + + Data Encryption Standard ``DES``. + + Advanced Encryption Standard ``AES``. + + A privacy protocol is not configured if ``None`` is selected. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure a new iDRAC user + dellemc.openmanage.idrac_user: + idrac_ip: 198.162.0.1 + idrac_user: idrac_user + idrac_password: idrac_password + ca_path: "/path/to/ca_cert.pem" + state: present + user_name: user_name + user_password: user_password + privilege: Administrator + ipmi_lan_privilege: Administrator + ipmi_serial_privilege: Administrator + enable: true + sol_enable: true + protocol_enable: true + authentication_protocol: SHA + privacy_protocol: AES + + - name: Modify existing iDRAC user username and password + dellemc.openmanage.idrac_user: + idrac_ip: 198.162.0.1 + idrac_user: idrac_user + idrac_password: idrac_password + ca_path: "/path/to/ca_cert.pem" + state: present + user_name: user_name + new_user_name: new_user_name + user_password: user_password + + - name: Delete existing iDRAC user account + dellemc.openmanage.idrac_user: + idrac_ip: 198.162.0.1 + idrac_user: idrac_user + idrac_password: idrac_password + ca_path: "/path/to/ca_cert.pem" + state: absent + user_name: user_name + + + +Return Values +------------- + +msg (always, str, Successfully created user account details.) + Status of the iDRAC user configuration. + + +status (success, dict, AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Successfully Completed Request'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'Base.1.5.Success'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'None'), ('Severity', 'OK')]), AnsibleMapping([('Message', 'The operation successfully completed.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'IDRAC.2.1.SYS413'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'No response action is required.'), ('Severity', 'Informational')])])])) + Configures the iDRAC users attributes. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst new file mode 100644 index 00000000..7210dd95 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst @@ -0,0 +1,255 @@ +.. _idrac_virtual_media_module: + + +idrac_virtual_media -- Configure the Remote File Share settings. +================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure Remote File Share settings. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + virtual_media (True, list, None) + Details of the Remote File Share. + + + insert (True, bool, None) + ``True`` connects the remote image file. + + ``False`` ejects the remote image file if connected. + + + image (optional, path, None) + The path of the image file. The supported file types are .img and .iso. + + The file name with .img extension is redirected as a virtual floppy and a file name with .iso extension is redirected as a virtual CDROM. + + This option is required when *insert* is ``True``. + + The following are the examples of the share location: CIFS share: //192.168.0.1/file_path/image_name.iso, NFS share: 192.168.0.2:/file_path/image_name.img, HTTP share: http://192.168.0.3/file_path/image_name.iso, HTTPS share: https://192.168.0.4/file_path/image_name.img + + CIFS share is not supported by iDRAC7 and iDRAC8. + + HTTPS share with credentials is not supported by iDRAC7 and iDRAC8. + + + index (optional, int, None) + Index of the Remote File Share. For example, to specify the Remote File Share 1, the value of *index* should be 1. If *index* is not specified, the order of *virtual_media* list will be considered. + + + domain (optional, str, None) + Domain name of network share. This option is applicable for CIFS and HTTPS share. + + + username (optional, str, None) + Network share username. This option is applicable for CIFS and HTTPS share. + + + password (optional, str, None) + Network share password. This option is applicable for CIFS and HTTPS share. + + This module always reports as the changes found when *password* is provided. + + + media_type (optional, str, None) + Type of the image file. This is applicable when *insert* is ``True``. + + + + force (optional, bool, False) + ``True`` ejects the image file if already connected and inserts the file provided in *image*. This is applicable when *insert* is ``True``. + + + resource_id (optional, str, None) + Resource id of the iDRAC, if not specified manager collection id will be used. + + + idrac_ip (True, str, None) + iDRAC IP Address. + + + idrac_user (True, str, None) + iDRAC username. + + + idrac_password (True, str, None) + iDRAC user password. + + + idrac_port (optional, int, 443) + iDRAC port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Insert image file to Remote File Share 1 using CIFS share. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + virtual_media: + - insert: true + image: "//192.168.0.2/file_path/file.iso" + username: "username" + password: "password" + + - name: Insert image file to Remote File Share 2 using NFS share. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + virtual_media: + - index: 2 + insert: true + image: "192.168.0.4:/file_path/file.iso" + + - name: Insert image file to Remote File Share 1 and 2 using HTTP. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: "http://192.168.0.4/file_path/file.img" + - index: 2 + insert: true + image: "http://192.168.0.4/file_path/file.img" + + - name: Insert image file using HTTPS. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: "https://192.168.0.5/file_path/file.img" + username: username + password: password + + - name: Eject multiple virtual media. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: false + - index: 2 + insert: false + + - name: Ejection of image file from Remote File Share 1. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + insert: false + + - name: Insertion and ejection of image file in single task. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: https://192.168.0.5/file/file.iso + username: username + password: password + - index: 2 + insert: false + + + +Return Values +------------- + +msg (success, str, Successfully performed the virtual media operation.) + Successfully performed the virtual media operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst new file mode 100644 index 00000000..fa9c6b8c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst @@ -0,0 +1,273 @@ +.. _ome_active_directory_module: + + +ome_active_directory -- Configure Active Directory groups to be used with Directory Services +============================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to add, modify, and delete OpenManage Enterprise connection with Active Directory Service. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + domain_server (optional, list, None) + Enter the domain name or FQDN or IP address of the domain controller. + + If *domain_controller_lookup* is ``DNS``, enter the domain name to query DNS for the domain controllers. + + If *domain_controller_lookup* is ``MANUAL``, enter the FQDN or the IP address of the domain controller. The maximum number of Active Directory servers that can be added is three. + + + domain_controller_lookup (optional, str, DNS) + Select the Domain Controller Lookup method. + + + domain_controller_port (optional, int, 3269) + Domain controller port. + + By default, Global Catalog Address port number 3269 is populated. + + For the Domain Controller Access, enter 636 as the port number. + + ``NOTE``, Only LDAPS ports are supported. + + + group_domain (optional, str, None) + Provide the group domain in the format ``example.com`` or ``ou=org, dc=example, dc=com``. + + + id (optional, int, None) + Provide the ID of the existing Active Directory service connection. + + This is applicable for modification and deletion. + + This is mutually exclusive with *name*. + + + name (optional, str, None) + Provide a name for the Active Directory connection. + + This is applicable for creation and deletion. + + This is mutually exclusive with *name*. + + + network_timeout (optional, int, 120) + Enter the network timeout duration in seconds. + + The supported timeout duration range is 15 to 300 seconds. + + + search_timeout (optional, int, 120) + Enter the search timeout duration in seconds. + + The supported timeout duration range is 15 to 300 seconds. + + + state (optional, str, present) + ``present`` allows to create or modify an Active Directory service. + + ``absent`` allows to delete a Active Directory service. + + + test_connection (optional, bool, False) + Enables testing the connection to the domain controller. + + The connection to the domain controller is tested with the provided Active Directory service details. + + If test fails, module will error out. + + If ``yes``, *domain_username* and *domain_password* has to be provided. + + + domain_password (optional, str, None) + Provide the domain password. + + This is applicable when *test_connection* is ``yes``. + + + domain_username (optional, str, None) + Provide the domain username either in the UPN (username@domain) or NetBIOS (domain\\username) format. + + This is applicable when *test_connection* is ``yes``. + + + validate_certificate (optional, bool, False) + Enables validation of SSL certificate of the domain controller. + + The module will always report change when this is ``yes``. + + + certificate_file (optional, path, None) + Provide the full path of the SSL certificate. + + The certificate should be a Root CA Certificate encoded in Base64 format. + + This is applicable when *validate_certificate* is ``yes``. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - The module will always report change when *validate_certificate* is ``yes``. + - Run this module from a system that has direct access to OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Add Active Directory service using DNS lookup along with the test connection + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad1 + domain_server: + - domainname.com + group_domain: domainname.com + test_connection: yes + domain_username: user@domainname + domain_password: domain_password + + - name: Add Active Directory service using IP address of the domain controller with certificate validation + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + domain_controller_lookup: MANUAL + domain_server: + - 192.68.20.181 + group_domain: domainname.com + validate_certificate: yes + certificate_file: "/path/to/certificate/file.cer" + + - name: Modify domain controller IP address, network_timeout and group_domain + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + domain_controller_lookup: MANUAL + domain_server: + - 192.68.20.189 + group_domain: newdomain.in + network_timeout: 150 + + - name: Delete Active Directory service + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + state: absent + + - name: Test connection to existing Active Directory service with certificate validation + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + test_connection: yes + domain_username: user@domainname + domain_password: domain_password + validate_certificate: yes + certificate_file: "/path/to/certificate/file.cer" + + + +Return Values +------------- + +msg (always, str, Successfully renamed the slot(s).) + Overall status of the Active Directory operation. + + +active_directory (on change, dict, {'Name': 'ad_test', 'Id': 21789, 'ServerType': 'MANUAL', 'ServerName': ['192.168.20.181'], 'DnsServer': [], 'GroupDomain': 'dellemcdomain.com', 'NetworkTimeOut': 120, 'Password': None, 'SearchTimeOut': 120, 'ServerPort': 3269, 'CertificateValidation': False}) + The Active Directory that was added, modified or deleted by this module. + + +error_info (on HTTP error, dict, {'error_info': {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to connect to the LDAP or AD server because the entered credentials are invalid.', 'MessageArgs': [], 'MessageId': 'CSEC5002', 'RelatedProperties': [], 'Resolution': 'Make sure the server input configuration are valid and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}}}) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst new file mode 100644 index 00000000..2b34f1cd --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst @@ -0,0 +1,171 @@ +.. _ome_application_alerts_smtp_module: + + +ome_application_alerts_smtp -- This module allows to configure SMTP or email configurations +=========================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure SMTP or email configurations on OpenManage Enterprise and OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + destination_address (True, str, None) + The IP address or FQDN of the SMTP destination server. + + + port_number (optional, int, None) + The port number of the SMTP destination server. + + + use_ssl (optional, bool, None) + Use SSL to connect with the SMTP server. + + + enable_authentication (True, bool, None) + Enable or disable authentication to access the SMTP server. + + The *credentials* are mandatory if *enable_authentication* is ``True``. + + The module will always report change when this is ``True``. + + + credentials (optional, dict, None) + The credentials for the SMTP server + + + username (True, str, None) + The username to access the SMTP server. + + + password (True, str, None) + The password to access the SMTP server. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - The module will always report change when *enable_authentication* is ``True``. + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or OpenManage Enterprise Modular. + - This module support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update SMTP destination server configuration with authentication + dellemc.openmanage.ome_application_alerts_smtp: + hostname: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination_address: "localhost" + port_number: 25 + use_ssl: true + enable_authentication: true + credentials: + username: "username" + password: "password" + - name: Update SMTP destination server configuration without authentication + dellemc.openmanage.ome_application_alerts_smtp: + hostname: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination_address: "localhost" + port_number: 25 + use_ssl: false + enable_authentication: false + + + +Return Values +------------- + +msg (always, str, Successfully updated the SMTP settings.) + Overall status of the SMTP settings update. + + +smtp_details (success, dict, AnsibleMapping([('DestinationAddress', 'localhost'), ('PortNumber', 25), ('UseCredentials', True), ('UseSSL', False), ('Credential', AnsibleMapping([('User', 'admin'), ('Password', None)]))])) + returned when SMTP settings are updated successfully. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CAPP1106'), ('RelatedProperties', []), ('Message', 'Unable to update the SMTP settings because the entered credential is invalid or empty.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Either enter valid credentials or disable the Use Credentials option and retry the operation.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sachin Apagundi(@sachin-apa) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst new file mode 100644 index 00000000..d741e167 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst @@ -0,0 +1,167 @@ +.. _ome_application_alerts_syslog_module: + + +ome_application_alerts_syslog -- Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular +================================================================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + syslog_servers (optional, list, None) + List of servers to forward syslog. + + + id (True, int, None) + The ID of the syslog server. + + + enabled (optional, bool, None) + Enable or disable syslog forwarding. + + + destination_address (optional, str, None) + The IP address, FQDN or hostname of the syslog server. + + This is required if *enabled* is ``True``. + + + port_number (optional, int, None) + The UDP port number of the syslog server. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or Dell EMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure single server to forward syslog + dellemc.openmanage.ome_application_alerts_syslog: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + syslog_servers: + - id: 1 + enabled: true + destination_address: 192.168.0.2 + port_number: 514 + + - name: Configure multiple server to forward syslog + dellemc.openmanage.ome_application_alerts_syslog: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + syslog_servers: + - id: 1 + port_number: 523 + - id: 2 + enabled: true + destination_address: sysloghost1.lab.com + - id: 3 + enabled: false + - id: 4 + enabled: true + destination_address: 192.168.0.4 + port_number: 514 + + + +Return Values +------------- + +msg (always, str, Successfully updated the syslog forwarding settings.) + Overall status of the syslog forwarding operation. + + +syslog_details (on success, list, [AnsibleMapping([('DestinationAddress', '192.168.10.43'), ('Enabled', False), ('Id', 1), ('PortNumber', 514)]), AnsibleMapping([('DestinationAddress', '192.168.10.46'), ('Enabled', True), ('Id', 2), ('PortNumber', 514)]), AnsibleMapping([('DestinationAddress', '192.168.10.44'), ('Enabled', True), ('Id', 3), ('PortNumber', 514)]), AnsibleMapping([('DestinationAddress', '192.168.10.42'), ('Enabled', True), ('Id', 4), ('PortNumber', 515)])]) + Syslog forwarding settings list applied. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CAPP1108'), ('RelatedProperties', []), ('Message', 'Unable to update the Syslog settings because the request contains an invalid number of configurations. The request must contain no more than 4 configurations but contains 5.'), ('MessageArgs', ['4', '5']), ('Severity', 'Warning'), ('Resolution', 'Enter only the required number of configurations as identified in the message and retry the operation.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst new file mode 100644 index 00000000..e4fbec1c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst @@ -0,0 +1,173 @@ +.. _ome_application_certificate_module: + + +ome_application_certificate -- This module allows to generate a CSR and upload the certificate +============================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows the generation a new certificate signing request (CSR) and to upload the certificate on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + command (optional, str, generate_csr) + ``generate_csr`` allows the generation of a CSR and ``upload`` uploads the certificate. + + + distinguished_name (optional, str, None) + Name of the certificate issuer. This option is applicable for ``generate_csr``. + + + department_name (optional, str, None) + Name of the department that issued the certificate. This option is applicable for ``generate_csr``. + + + business_name (optional, str, None) + Name of the business that issued the certificate. This option is applicable for ``generate_csr``. + + + locality (optional, str, None) + Local address of the issuer of the certificate. This option is applicable for ``generate_csr``. + + + country_state (optional, str, None) + State in which the issuer resides. This option is applicable for ``generate_csr``. + + + country (optional, str, None) + Country in which the issuer resides. This option is applicable for ``generate_csr``. + + + email (optional, str, None) + Email associated with the issuer. This option is applicable for ``generate_csr``. + + + upload_file (optional, str, None) + Local path of the certificate file to be uploaded. This option is applicable for ``upload``. Once the certificate is uploaded, OpenManage Enterprise cannot be accessed for a few seconds. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - If a certificate is uploaded, which is identical to an already existing certificate, it is accepted by the module. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Generate a certificate signing request + dellemc.openmanage.ome_application_certificate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "generate_csr" + distinguished_name: "hostname.com" + department_name: "Remote Access Group" + business_name: "Dell Inc." + locality: "Round Rock" + country_state: "Texas" + country: "US" + email: "support@dell.com" + + - name: Upload the certificate + dellemc.openmanage.ome_application_certificate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "upload" + upload_file: "/path/certificate.cer" + + + +Return Values +------------- + +msg (always, str, Successfully generated certificate signing request.) + Overall status of the certificate signing request. + + +csr_status (on success, dict, AnsibleMapping([('CertificateData', '-----BEGIN CERTIFICATE REQUEST-----GHFSUEKLELE af3u4h2rkdkfjasczjfefhkrr/frjrfrjfrxnvzklf/nbcvxmzvndlskmcvbmzkdk kafhaksksvklhfdjtrhhffgeth/tashdrfstkm@kdjFGD/sdlefrujjfvvsfeikdf yeufghdkatbavfdomehtdnske/tahndfavdtdfgeikjlagmdfbandfvfcrfgdtwxc qwgfrteyupojmnsbajdkdbfs/ujdfgthedsygtamnsuhakmanfuarweyuiwruefjr etwuwurefefgfgurkjkdmbvfmvfvfk==-----END CERTIFICATE REQUEST-----')])) + Details of the generated certificate. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CSEC9002'), ('RelatedProperties', []), ('Message', 'Unable to upload the certificate because the certificate file provided is invalid.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Make sure the CA certificate and private key are correct and retry the operation.')])])]))])) + Details of the HTTP error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst new file mode 100644 index 00000000..b30d6652 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst @@ -0,0 +1,314 @@ +.. _ome_application_console_preferences_module: + + +ome_application_console_preferences -- Configure console preferences on OpenManage Enterprise. +============================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows user to configure the console preferences on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + report_row_limit (optional, int, None) + The maximum number of rows that you can view on OpenManage Enterprise reports. + + + device_health (optional, dict, None) + The time after which the health of the devices must be automatically monitored and updated on the OpenManage Enterprise dashboard. + + + health_check_interval (optional, int, None) + The frequency at which the device health must be recorded and data stored. + + + health_check_interval_unit (optional, str, None) + The time unit of the frequency at which the device health must be recorded and data stored. + + ``Hourly`` to set the frequency in hours. + + ``Minutes`` to set the frequency in minutes. + + + health_and_power_state_on_connection_lost (optional, str, None) + The latest recorded device health. + + ``last_known`` to display the latest recorded device health when the power connection was lost. + + ``unknown`` to display the latest recorded device health when the device status moved to unknown. + + + + discovery_settings (optional, dict, None) + The device naming to be used by the OpenManage Enterprise to identify the discovered iDRACs and other devices. + + + general_device_naming (optional, str, DNS) + Applicable to all the discovered devices other than the iDRACs. + + ``DNS`` to use the DNS name. + + ``NETBIOS`` to use the NetBIOS name. + + + server_device_naming (optional, str, IDRAC_SYSTEM_HOSTNAME) + Applicable to iDRACs only. + + ``IDRAC_HOSTNAME`` to use the iDRAC hostname. + + ``IDRAC_SYSTEM_HOSTNAME`` to use the system hostname. + + + invalid_device_hostname (optional, str, None) + The invalid hostnames separated by a comma. + + + common_mac_addresses (optional, str, None) + The common MAC addresses separated by a comma. + + + + server_initiated_discovery (optional, dict, None) + Server initiated discovery settings. + + + device_discovery_approval_policy (optional, str, None) + Discovery approval policies. + + ``Automatic`` allows servers with iDRAC Firmware version 4.00.00.00, which are on the same network as the console, to be discovered automatically by the console. + + ``Manual`` for the servers to be discovered by the user manually. + + + set_trap_destination (optional, bool, None) + Trap destination settings. + + + + mx7000_onboarding_preferences (optional, str, None) + Alert-forwarding behavior on chassis when they are onboarded. + + ``all`` to receive all alert. + + ``chassis`` to receive chassis category alerts only. + + + builtin_appliance_share (optional, dict, None) + The external network share that the appliance must access to complete operations. + + + share_options (optional, str, None) + The share options. + + ``CIFS`` to select CIFS share type. + + ``HTTPS`` to select HTTPS share type. + + + cifs_options (optional, str, None) + The SMB protocol version. + + *cifs_options* is required *share_options* is ``CIFS``. + + ``V1`` to enable SMBv1. + + ``V2`` to enable SMBv2 + + + + email_sender_settings (optional, str, None) + The email address of the user who is sending an email message. + + + trap_forwarding_format (optional, str, None) + The trap forwarding format. + + ``Original`` to retain the trap data as is. + + ``Normalized`` to normalize the trap data. + + + metrics_collection_settings (optional, int, None) + The frequency of the PowerManager extension data maintenance and purging. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update Console preferences with all the settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + report_row_limit: 123 + device_health: + health_check_interval: 1 + health_check_interval_unit: Hourly + health_and_power_state_on_connection_lost: last_known + discovery_settings: + general_device_naming: DNS + server_device_naming: IDRAC_HOSTNAME + invalid_device_hostname: "localhost" + common_mac_addresses: "::" + server_initiated_discovery: + device_discovery_approval_policy: Automatic + set_trap_destination: True + mx7000_onboarding_preferences: all + builtin_appliance_share: + share_options: CIFS + cifs_options: V1 + email_sender_settings: "admin@dell.com" + trap_forwarding_format: Normalized + metrics_collection_settings: 31 + + - name: Update Console preferences with report and device health settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + report_row_limit: 236 + device_health: + health_check_interval: 10 + health_check_interval_unit: Hourly + health_and_power_state_on_connection_lost: last_known + + - name: Update Console preferences with invalid device health settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_health: + health_check_interval: 65 + health_check_interval_unit: Minutes + + - name: Update Console preferences with discovery and built in appliance share settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_settings: + general_device_naming: DNS + server_device_naming: IDRAC_SYSTEM_HOSTNAME + invalid_device_hostname: "localhost" + common_mac_addresses: "00:53:45:00:00:00" + builtin_appliance_share: + share_options: CIFS + cifs_options: V1 + + - name: Update Console preferences with server initiated discovery, mx7000 onboarding preferences, email sender, + trap forwarding format, and metrics collection settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + server_initiated_discovery: + device_discovery_approval_policy: Automatic + set_trap_destination: True + mx7000_onboarding_preferences: chassis + email_sender_settings: "admin@dell.com" + trap_forwarding_format: Original + metrics_collection_settings: 365 + + + +Return Values +------------- + +msg (always, str, Successfully update the console preferences.) + Overall status of the console preferences. + + +console_preferences (on success, list, [AnsibleMapping([('Name', 'DEVICE_PREFERRED_NAME'), ('DefaultValue', 'SLOT_NAME'), ('Value', 'PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME'), ('DataType', 'java.lang.String'), ('GroupName', 'DISCOVERY_SETTING')]), AnsibleMapping([('Name', 'INVALID_DEVICE_HOSTNAME'), ('DefaultValue', ''), ('Value', 'localhost,localhost.localdomain,not defined,pv132t,pv136t,default,dell,idrac-'), ('DataType', 'java.lang.String'), ('GroupName', 'DISCOVERY_SETTING')]), AnsibleMapping([('Name', 'COMMON_MAC_ADDRESSES'), ('DefaultValue', ''), ('Value', '00:53:45:00:00:00,33:50:6F:45:30:30,50:50:54:50:30:30,00:00:FF:FF:FF:FF,20:41:53:59:4E:FF,00:00:00:00:00:00,20:41:53:59:4e:ff,00:00:00:00:00:00'), ('DataType', 'java.lang.String'), ('GroupName', 'DISCOVERY_SETTING')]), AnsibleMapping([('Name', 'SHARE_TYPE'), ('DefaultValue', 'CIFS'), ('Value', 'CIFS'), ('DataType', 'java.lang.String'), ('GroupName', 'BUILT_IN_APPLIANCE_SHARE_SETTINGS')]), AnsibleMapping([('Name', 'TRAP_FORWARDING_SETTING'), ('DefaultValue', 'AsIs'), ('Value', 'Normalized'), ('DataType', 'java.lang.String'), ('GroupName', '')]), AnsibleMapping([('Name', 'DATA_PURGE_INTERVAL'), ('DefaultValue', '365'), ('Value', '3650000'), ('DataType', 'java.lang.Integer'), ('GroupName', '')]), AnsibleMapping([('Name', 'CONSOLE_CONNECTION_SETTING'), ('DefaultValue', 'last_known'), ('Value', 'last_known'), ('DataType', 'java.lang.String'), ('GroupName', 'CONSOLE_CONNECTION_SETTING')]), AnsibleMapping([('Name', 'MIN_PROTOCOL_VERSION'), ('DefaultValue', 'V2'), ('Value', 'V1'), ('DataType', 'java.lang.String'), ('GroupName', 'CIFS_PROTOCOL_SETTINGS')]), AnsibleMapping([('Name', 'ALERT_ACKNOWLEDGEMENT_VIEW'), ('DefaultValue', '2000'), ('Value', '2000'), ('DataType', 'java.lang.Integer'), ('GroupName', '')]), AnsibleMapping([('Name', 'AUTO_CONSOLE_UPDATE_AFTER_DOWNLOAD'), ('DefaultValue', 'false'), ('Value', 'false'), ('DataType', 'java.lang.Boolean'), ('GroupName', 'CONSOLE_UPDATE_SETTING_GROUP')]), AnsibleMapping([('Name', 'NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION'), ('DefaultValue', 'false'), ('Value', 'false'), ('DataType', 'java.lang.Boolean'), ('GroupName', '')]), AnsibleMapping([('Name', 'REPORTS_MAX_RESULTS_LIMIT'), ('DefaultValue', '0'), ('Value', '2000000000000000000000000'), ('DataType', 'java.lang.Integer'), ('GroupName', '')]), AnsibleMapping([('Name', 'EMAIL_SENDER'), ('DefaultValue', 'omcadmin@dell.com'), ('Value', 'admin1@dell.com@dell.com@dell.com'), ('DataType', 'java.lang.String'), ('GroupName', '')]), AnsibleMapping([('Name', 'MX7000_ONBOARDING_PREF'), ('DefaultValue', 'all'), ('Value', 'test_chassis'), ('DataType', 'java.lang.String'), ('GroupName', '')]), AnsibleMapping([('Name', 'DISCOVERY_APPROVAL_POLICY'), ('DefaultValue', 'Automatic'), ('Value', 'Automatic_test'), ('DataType', 'java.lang.String'), ('GroupName', '')])]) + Details of the console preferences. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1006'), ('RelatedProperties', []), ('Message', 'Unable to complete the request because the resource URI does not exist or is not implemented.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Enter a valid URI and retry the operation.')])])]))])) + Details of the HTTP error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sachin Apagundi(@sachin-apa) +- Husniya Hameed (@husniya-hameed) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst new file mode 100644 index 00000000..c3e3228b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst @@ -0,0 +1,394 @@ +.. _ome_application_network_address_module: + + +ome_application_network_address -- Updates the network configuration on OpenManage Enterprise +============================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows the configuration of a DNS and an IPV4 or IPV6 network on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + enable_nic (optional, bool, True) + Enable or disable Network Interface Card (NIC) configuration. + + + interface_name (optional, str, None) + If there are multiple interfaces, network configuration changes can be applied to a single interface using the interface name of the NIC. + + If this option is not specified, Primary interface is chosen by default. + + + ipv4_configuration (optional, dict, None) + IPv4 network configuration. + + *Warning* Ensure that you have an alternate interface to access OpenManage Enterprise as these options can change the current IPv4 address for *hostname*. + + + enable (True, bool, None) + Enable or disable access to the network using IPv4. + + + enable_dhcp (optional, bool, None) + Enable or disable the automatic request to get an IPv4 address from the IPv4 Dynamic Host Configuration Protocol (DHCP) server + + If *enable_dhcp* option is true, OpenManage Enterprise retrieves the IP configuration—IPv4 address, subnet mask, and gateway from a DHCP server on the existing network. + + + static_ip_address (optional, str, None) + Static IPv4 address + + This option is applicable when *enable_dhcp* is false. + + + static_subnet_mask (optional, str, None) + Static IPv4 subnet mask address + + This option is applicable when *enable_dhcp* is false. + + + static_gateway (optional, str, None) + Static IPv4 gateway address + + This option is applicable when *enable_dhcp* is false. + + + use_dhcp_for_dns_server_names (optional, bool, None) + This option allows to automatically request and obtain a DNS server IPv4 address from the DHCP server. + + This option is applicable when *enable_dhcp* is true. + + + static_preferred_dns_server (optional, str, None) + Static IPv4 DNS preferred server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + static_alternate_dns_server (optional, str, None) + Static IPv4 DNS alternate server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + + ipv6_configuration (optional, dict, None) + IPv6 network configuration. + + *Warning* Ensure that you have an alternate interface to access OpenManage Enterprise as these options can change the current IPv6 address for *hostname*. + + + enable (True, bool, None) + Enable or disable access to the network using the IPv6. + + + enable_auto_configuration (optional, bool, None) + Enable or disable the automatic request to get an IPv6 address from the IPv6 DHCP server or router advertisements(RA) + + If *enable_auto_configuration* is true, OME retrieves IP configuration-IPv6 address, prefix, and gateway, from a DHCPv6 server on the existing network + + + static_ip_address (optional, str, None) + Static IPv6 address + + This option is applicable when *enable_auto_configuration* is false. + + + static_prefix_length (optional, int, None) + Static IPv6 prefix length + + This option is applicable when *enable_auto_configuration* is false. + + + static_gateway (optional, str, None) + Static IPv6 gateway address + + This option is applicable when *enable_auto_configuration* is false. + + + use_dhcp_for_dns_server_names (optional, bool, None) + This option allows to automatically request and obtain a DNS server IPv6 address from the DHCP server. + + This option is applicable when *enable_auto_configuration* is true + + + static_preferred_dns_server (optional, str, None) + Static IPv6 DNS preferred server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + static_alternate_dns_server (optional, str, None) + Static IPv6 DNS alternate server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + + management_vlan (optional, dict, None) + vLAN configuration. + + These settings are applicable for OpenManage Enterprise Modular. + + + enable_vlan (True, bool, None) + Enable or disable vLAN for management. + + The vLAN configuration cannot be updated if the *register_with_dns* field under *dns_configuration* is true. + + *WARNING* Ensure that the network cable is plugged to the correct port after the vLAN configuration changes have been made. If not, the configuration change may not be effective. + + + vlan_id (optional, int, None) + vLAN ID. + + This option is applicable when *enable_vlan* is true. + + + + dns_configuration (optional, dict, None) + Domain Name System(DNS) settings. + + + register_with_dns (optional, bool, None) + Register/Unregister *dns_name* on the DNS Server. + + This option cannot be updated if vLAN configuration changes. + + + use_dhcp_for_dns_domain_name (optional, bool, None) + Get the *dns_domain_name* using a DHCP server. + + + dns_name (optional, str, None) + DNS name for *hostname* + + This is applicable when *register_with_dns* is true. + + + dns_domain_name (optional, str, None) + Static DNS domain name + + This is applicable when *use_dhcp_for_dns_domain_name* is false. + + + + reboot_delay (optional, int, None) + The time in seconds, after which settings are applied. + + This option is not mandatory. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - The configuration changes can only be applied to one interface at a time. + - The system management consoles might be unreachable for some time after the configuration changes are applied. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: IPv4 network configuration for primary interface + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_nic: true + ipv4_configuration: + enable: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + reboot_delay: 5 + + - name: IPv6 network configuration for primary interface + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + ipv6_configuration: + enable: true + enable_auto_configuration: true + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2 + use_dhcp_for_dns_server_names: true + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + + - name: Management vLAN configuration for primary interface + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + management_vlan: + enable_vlan: true + vlan_id: 3344 + dns_configuration: + register_with_dns: false + reboot_delay: 1 + + - name: DNS settings + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + ipv4_configuration: + enable: true + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "dnslocaldomain" + + - name: Disbale nic interface eth1 + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_nic: false + interface_name: eth1 + + - name: Complete network settings for interface eth1 + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_nic: true + interface_name: eth1 + ipv4_configuration: + enable: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable: true + enable_auto_configuration: true + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcp_for_dns_server_names: true + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "dnslocaldomain" + reboot_delay: 5 + + + +Return Values +------------- + +msg (always, str, Successfully updated network address configuration) + Overall status of the network address configuration change. + + +network_configuration (on success, dict, AnsibleMapping([('Delay', 0), ('DnsConfiguration', AnsibleMapping([('DnsDomainName', ''), ('DnsName', 'MX-SVCTAG'), ('RegisterWithDNS', False), ('UseDHCPForDNSDomainName', True)])), ('EnableNIC', True), ('InterfaceName', 'eth0'), ('PrimaryInterface', True), ('Ipv4Configuration', AnsibleMapping([('Enable', True), ('EnableDHCP', False), ('StaticAlternateDNSServer', ''), ('StaticGateway', '192.168.0.2'), ('StaticIPAddress', '192.168.0.3'), ('StaticPreferredDNSServer', '192.168.0.4'), ('StaticSubnetMask', '255.255.254.0'), ('UseDHCPForDNSServerNames', False)])), ('Ipv6Configuration', AnsibleMapping([('Enable', True), ('EnableAutoConfiguration', True), ('StaticAlternateDNSServer', ''), ('StaticGateway', ''), ('StaticIPAddress', ''), ('StaticPreferredDNSServer', ''), ('StaticPrefixLength', 0), ('UseDHCPForDNSServerNames', True)])), ('ManagementVLAN', AnsibleMapping([('EnableVLAN', False), ('Id', 1)]))])) + Updated application network address configuration. + + +job_info (on success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'system'), ('Editable', True), ('EndTime', None), ('Id', 14902), ('JobDescription', 'Generic OME runtime task'), ('JobName', 'OMERealtime_Task'), ('JobStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 207), ('Internal', True), ('Name', 'OMERealtime_Task')])), ('LastRun', None), ('LastRunStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('NextRun', None), ('Params', [AnsibleMapping([('JobId', 14902), ('Key', 'Nmcli_Update'), ('Value', '{"interfaceName":"eth0","profileName":"eth0","enableNIC":true, "ipv4Configuration":{"enable":true,"enableDHCP":true,"staticIPAddress":"", "staticSubnetMask":"","staticGateway":"","useDHCPForDNSServerNames":true, "staticPreferredDNSServer":"","staticAlternateDNSServer":""}, "ipv6Configuration":{"enable":false,"enableAutoConfiguration":true,"staticIPAddress":"", "staticPrefixLength":0,"staticGateway":"","useDHCPForDNSServerNames":false, "staticPreferredDNSServer":"","staticAlternateDNSServer":""}, "managementVLAN":{"enableVLAN":false,"id":0},"dnsConfiguration":{"registerWithDNS":false, "dnsName":"","useDHCPForDNSDomainName":false,"dnsDomainName":"","fqdndomainName":"", "ipv4CurrentPreferredDNSServer":"","ipv4CurrentAlternateDNSServer":"", "ipv6CurrentPreferredDNSServer":"","ipv6CurrentAlternateDNSServer":""}, "currentSettings":{"ipv4Address":[],"ipv4Gateway":"","ipv4Dns":[],"ipv4Domain":"", "ipv6Address":[],"ipv6LinkLocalAddress":"","ipv6Gateway":"","ipv6Dns":[], "ipv6Domain":""},"delay":0,"primaryInterface":true,"modifiedConfigs":{}}')])]), ('Schedule', 'startnow'), ('StartTime', None), ('State', 'Enabled'), ('Targets', []), ('UpdatedBy', None), ('Visible', True)])) + Details of the job to update in case OME version is >= 3.3. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to update the address configuration because a dependent field is missing for Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration .'), ('MessageArgs', ['Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration']), ('MessageId', 'CAPP1304'), ('RelatedProperties', []), ('Resolution', 'Make sure that all dependent fields contain valid content and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of the HTTP error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst new file mode 100644 index 00000000..2c5d1bd0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst @@ -0,0 +1,183 @@ +.. _ome_application_network_proxy_module: + + +ome_application_network_proxy -- Updates the proxy configuration on OpenManage Enterprise +========================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure a network proxy on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + enable_proxy (True, bool, None) + Enables or disables the HTTP proxy configuration. + + If *enable proxy* is false, then the HTTP proxy configuration is set to its default value. + + + ip_address (optional, str, None) + Proxy server address. + + This option is mandatory when *enable_proxy* is true. + + + proxy_port (optional, int, None) + Proxy server's port number. + + This option is mandatory when *enable_proxy* is true. + + + enable_authentication (optional, bool, None) + Enable or disable proxy authentication. + + If *enable_authentication* is true, *proxy_username* and *proxy_password* must be provided. + + If *enable_authentication* is false, the proxy username and password are set to its default values. + + + proxy_username (optional, str, None) + Proxy server username. + + This option is mandatory when *enable_authentication* is true. + + + proxy_password (optional, str, None) + Proxy server password. + + This option is mandatory when *enable_authentication* is true. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update proxy configuration and enable authentication + dellemc.openmanage.ome_application_network_proxy: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: true + ip_address: "192.168.0.2" + proxy_port: 444 + enable_authentication: true + proxy_username: "proxy_username" + proxy_password: "proxy_password" + + - name: Reset proxy authentication + dellemc.openmanage.ome_application_network_proxy: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: true + ip_address: "192.168.0.2" + proxy_port: 444 + enable_authentication: false + + - name: Reset proxy configuration + dellemc.openmanage.ome_application_network_proxy: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: false + + + +Return Values +------------- + +msg (always, str, Successfully updated network proxy configuration.) + Overall status of the network proxy configuration change. + + +proxy_configuration (success, dict, AnsibleMapping([('EnableAuthentication', True), ('EnableProxy', True), ('IpAddress', '192.168.0.2'), ('Password', None), ('PortNumber', 444), ('Username', 'root')])) + Updated application network proxy configuration. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because the input value for PortNumber is missing or an invalid value is entered.'), ('MessageArgs', ['PortNumber']), ('MessageId', 'CGEN6002'), ('RelatedProperties', []), ('Resolution', 'Enter a valid value and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of the HTTP error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst new file mode 100644 index 00000000..ab630209 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst @@ -0,0 +1,238 @@ +.. _ome_application_network_settings_module: + + +ome_application_network_settings -- This module allows you to configure the session inactivity timeout settings +=============================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows you to configure the session inactivity timeout settings on OpenManage Enterprise and OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + session_inactivity_timeout (optional, dict, None) + Session inactivity timeout settings. + + + enable_universal_timeout (optional, bool, None) + Enable or disable the universal inactivity timeout. + + + universal_timeout (optional, float, None) + Duration of inactivity in minutes after which all sessions end. + + This is applicable when *enable_universal_timeout* is ``true``. + + This is mutually exclusive with *api_timeout*, *gui_timeout*, *ssh_timeout* and *serial_timeout*. + + + api_timeout (optional, float, None) + Duration of inactivity in minutes after which the API session ends. + + This is mutually exclusive with *universal_timeout*. + + + api_sessions (optional, int, None) + The maximum number of API sessions to be allowed. + + + gui_timeout (optional, float, None) + Duration of inactivity in minutes after which the web interface of Graphical User Interface (GUI) session ends. + + This is mutually exclusive with *universal_timeout*. + + + gui_sessions (optional, int, None) + The maximum number of GUI sessions to be allowed. + + + ssh_timeout (optional, float, None) + Duration of inactivity in minutes after which the SSH session ends. + + This is applicable only for OpenManage Enterprise Modular. + + This is mutually exclusive with *universal_timeout*. + + + ssh_sessions (optional, int, None) + The maximum number of SSH sessions to be allowed. + + This is applicable to OME-M only. + + + serial_timeout (optional, float, None) + Duration of inactivity in minutes after which the serial console session ends. + + This is applicable only for OpenManage Enterprise Modular. + + This is mutually exclusive with *universal_timeout*. + + + serial_sessions (optional, int, None) + The maximum number of serial console sessions to be allowed. + + This is applicable only for OpenManage Enterprise Modular. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or OpenManage Enterprise Modular. + - To configure other network settings such as network address, web server, and so on, refer to the respective OpenManage Enterprise application network setting modules. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure universal inactivity timeout + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + enable_universal_timeout: true + universal_timeout: 30 + api_sessions: 90 + gui_sessions: 5 + ssh_sessions: 2 + serial_sessions: 1 + + - name: Configure API and GUI timeout and sessions + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + api_timeout: 20 + api_sessions: 100 + gui_timeout: 25 + gui_sessions: 5 + + - name: Configure timeout and sessions for all parameters + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + api_timeout: 20 + api_sessions: 100 + gui_timeout: 15 + gui_sessions: 5 + ssh_timeout: 30 + ssh_sessions: 2 + serial_timeout: 35 + serial_sessions: 1 + + - name: Disable universal timeout and configure timeout and sessions for other parameters + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + enable_universal_timeout: false + api_timeout: 20 + api_sessions: 100 + gui_timeout: 15 + gui_sessions: 5 + ssh_timeout: 30 + ssh_sessions: 2 + serial_timeout: 35 + serial_sessions: 1 + + + +Return Values +------------- + +msg (always, str, Successfully updated the session timeout settings.) + Overall status of the Session timeout settings. + + +session_inactivity_setting (success, dict, [AnsibleMapping([('SessionType', 'API'), ('MaxSessions', 32), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 86400000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 100), ('MaxSessionsConfigurable', True), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'GUI'), ('MaxSessions', 6), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 7200000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 6), ('MaxSessionsConfigurable', True), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'SSH'), ('MaxSessions', 4), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 10800000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 4), ('MaxSessionsConfigurable', True), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'Serial'), ('MaxSessions', 1), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 86400000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 1), ('MaxSessionsConfigurable', False), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'UniversalTimeout'), ('MaxSessions', 0), ('SessionTimeout', -1), ('MinSessionTimeout', -1), ('MaxSessionTimeout', 86400000), ('MinSessionsAllowed', 0), ('MaxSessionsAllowed', 0), ('MaxSessionsConfigurable', False), ('SessionTimeoutConfigurable', True)])]) + Returned when session inactivity timeout settings are updated successfully. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CUSR1233'), ('RelatedProperties', []), ('Message', 'The number of allowed concurrent sessions for API must be between 1 and 100 sessions.'), ('MessageArgs', ['API', '1', '100']), ('Severity', 'Critical'), ('Resolution', 'Enter values in the correct range and retry the operation.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sachin Apagundi(@sachin-apa) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst new file mode 100644 index 00000000..6c884a15 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst @@ -0,0 +1,173 @@ +.. _ome_application_network_time_module: + + +ome_application_network_time -- Updates the network time on OpenManage Enterprise +================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows the configuration of network time on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + enable_ntp (True, bool, None) + Enables or disables Network Time Protocol(NTP). + + If *enable_ntp* is false, then the NTP addresses reset to their default values. + + + system_time (optional, str, None) + Time in the current system. + + This option is only applicable when *enable_ntp* is false. + + This option must be provided in following format 'yyyy-mm-dd hh:mm:ss'. + + + time_zone (optional, str, None) + The valid timezone ID to be used. + + This option is applicable for both system time and NTP time synchronization. + + + primary_ntp_address (optional, str, None) + The primary NTP address. + + This option is applicable when *enable_ntp* is true. + + + secondary_ntp_address1 (optional, str, None) + The first secondary NTP address. + + This option is applicable when *enable_ntp* is true. + + + secondary_ntp_address2 (optional, str, None) + The second secondary NTP address. + + This option is applicable when *enable_ntp* is true. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure system time + dellemc.openmanage.ome_application_network_time: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_ntp: false + system_time: "2020-03-31 21:35:18" + time_zone: "TZ_ID_11" + + - name: Configure NTP server for time synchronization + dellemc.openmanage.ome_application_network_time: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_ntp: true + time_zone: "TZ_ID_66" + primary_ntp_address: "192.168.0.2" + secondary_ntp_address1: "192.168.0.2" + secondary_ntp_address2: "192.168.0.4" + + + +Return Values +------------- + +msg (always, str, Successfully configured network time.) + Overall status of the network time configuration change. + + +proxy_configuration (success, dict, AnsibleMapping([('EnableNTP', False), ('JobId', None), ('PrimaryNTPAddress', None), ('SecondaryNTPAddress1', None), ('SecondaryNTPAddress2', None), ('SystemTime', None), ('TimeSource', 'Local Clock'), ('TimeZone', 'TZ_ID_1'), ('TimeZoneIdLinux', None), ('TimeZoneIdWindows', None), ('UtcTime', None)])) + Updated application network time configuration. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because the input value for SystemTime is missing or an invalid value is entered.'), ('MessageArgs', ['SystemTime']), ('MessageId', 'CGEN6002'), ('RelatedProperties', []), ('Resolution', 'Enter a valid value and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of the HTTP error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst new file mode 100644 index 00000000..9add772b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst @@ -0,0 +1,150 @@ +.. _ome_application_network_webserver_module: + + +ome_application_network_webserver -- Updates the Web server configuration on OpenManage Enterprise +================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure a network web server on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + webserver_port (optional, int, None) + Port number used by OpenManage Enterprise to establish a secure server connection. + + *WARNING* A change in port number results in a loss of connectivity in the current session for more than a minute. + + + webserver_timeout (optional, int, None) + The duration in minutes after which a web user interface session is automatically disconnected. + + If a change is made to the session timeout, it will only take effect after the next log in. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update web server port and session time out + dellemc.openmanage.ome_application_network_webserver: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + webserver_port: 9443 + webserver_timeout: 20 + + - name: Update session time out + dellemc.openmanage.ome_application_network_webserver: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + webserver_timeout: 30 + + - name: Update web server port + dellemc.openmanage.ome_application_network_webserver: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + webserver_port: 8443 + + + +Return Values +------------- + +msg (always, str, Successfully updated network web server configuration.) + Overall status of the network web server configuration change. + + +webserver_configuration (success, dict, AnsibleMapping([('TimeOut', 20), ('PortNumber', 443), ('EnableWebServer', True)])) + Updated application network web server configuration. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because the input value for PortNumber is missing or an invalid value is entered.'), ('MessageArgs', ['PortNumber']), ('MessageId', 'CGEN6002'), ('RelatedProperties', []), ('Resolution', 'Enter a valid value and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of the HTTP error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst new file mode 100644 index 00000000..f99ca189 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst @@ -0,0 +1,229 @@ +.. _ome_application_security_settings_module: + + +ome_application_security_settings -- Configure the login security properties +============================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows you to configure the login security properties on OpenManage Enterprise or OpenManage Enterprise Modular + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + restrict_allowed_ip_range (optional, dict, None) + Restrict to allow inbound connections only from the specified IP address range. + + This is mutually exclusive with *fips_mode_enable*. + + ``NOTE`` When *restrict_allowed_ip_range* is configured on the appliance, any inbound connection to the appliance, such as alert reception, firmware update, and network identities are blocked from the devices that are outside the specified IP address range. However, any outbound connection from the appliance will work on all devices. + + + enable_ip_range (True, bool, None) + Allow connections based on the IP address range. + + + ip_range (optional, str, None) + The IP address range in Classless Inter-Domain Routing (CIDR) format. For example: 192.168.100.14/24 or 2001:db8::/24 + + + + login_lockout_policy (optional, dict, None) + Locks the application after multiple unsuccessful login attempts. + + This is mutually exclusive with *fips_mode_enable*. + + + by_user_name (optional, bool, None) + Enable or disable lockout policy settings based on the user name. This restricts the number of unsuccessful login attempts from a specific user for a specific time interval. + + + by_ip_address (optional, bool, None) + Enable or disable lockout policy settings based on the IP address. This restricts the number of unsuccessful login attempts from a specific IP address for a specific time interval. + + + lockout_fail_count (optional, int, None) + The number of unsuccessful login attempts that are allowed after which the appliance prevents log in from the specific username or IP Address. + + + lockout_fail_window (optional, int, None) + Lockout fail window is the time in seconds within which the lockout fail count event must occur to trigger the lockout penalty time. Enter the duration for which OpenManage Enterprise must display information about a failed attempt. + + + lockout_penalty_time (optional, int, None) + The duration of time, in seconds, that login attempts from the specific user or IP address must not be allowed. + + + + job_wait (optional, bool, True) + Provides an option to wait for job completion. + + + job_wait_timeout (optional, int, 120) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + fips_mode_enable (optional, bool, None) + The FIPS mode is intended to meet the requirements of FIPS 140-2 level 1. For more information refer to the FIPS user guide + + This is applicable only for OpenManage Enterprise Modular only + + This is mutually exclusive with *restrict_allowed_ip_range* and *login_lockout_policy*. + + ``WARNING`` Enabling or Disabling this option resets your chassis to default settings. This may cause change in IP settings and loss of network connectivity. + + ``WARNING`` The FIPS mode cannot be enabled on a lead chassis in a multi-chassis management configuration. To toggle enable FIPS on a lead chassis, delete the chassis group, enable FIPS and recreate the group. + + ``WARNING`` For a Standalone or member chassis, enabling the FIPS mode deletes any fabrics created. This may cause loss of network connectivity and data paths to the compute sleds. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure restricted allowed IP range + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + restrict_allowed_ip_range: + enable_ip_range: true + ip_range: 192.1.2.3/24 + + - name: Configure login lockout policy + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + login_lockout_policy: + by_user_name: true + by_ip_address: true + lockout_fail_count: 3 + lockout_fail_window: 30 + lockout_penalty_time: 900 + + - name: Configure restricted allowed IP range and login lockout policy with job wait time out of 60 seconds + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + restrict_allowed_ip_range: + enable_ip_range: true + ip_range: 192.1.2.3/24 + login_lockout_policy: + by_user_name: true + by_ip_address: true + lockout_fail_count: 3 + lockout_fail_window: 30 + lockout_penalty_time: 900 + job_wait_timeout: 60 + + - name: Enable FIPS mode + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fips_mode_enable: yes + + + +Return Values +------------- + +msg (always, str, Successfully applied the security settings.) + Overall status of the login security configuration. + + +job_id (When security configuration properties are provided, int, 10123) + Job ID of the security configuration task. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to process the request because the domain information cannot be retrieved.'), ('MessageArgs', []), ('MessageId', 'CGEN8007'), ('RelatedProperties', []), ('Resolution', 'Verify the status of the database and domain configuration, and then retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst new file mode 100644 index 00000000..60463fe0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst @@ -0,0 +1,229 @@ +.. _ome_chassis_slots_module: + + +ome_chassis_slots -- Rename sled slots on OpenManage Enterprise Modular +======================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to rename sled slots on OpenManage Enterprise Modular either using device id or device service tag or using chassis service tag and slot number. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_options (optional, list, None) + The ID or service tag of the sled in the slot and the new name for the slot. + + *device_options* is mutually exclusive with *slot_options*. + + + device_id (optional, int, None) + Device ID of the sled in the slot. + + This is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, str, None) + Service tag of the sled in the slot. + + This is mutually exclusive with *device_id*. + + + slot_name (True, str, None) + Provide name for the slot. + + + + slot_options (optional, list, None) + The service tag of the chassis, slot number of the slot to be renamed, and the new name for the slot. + + *slot_options* is mutually exclusive with *device_options*. + + + chassis_service_tag (True, str, None) + Service tag of the chassis. + + + slots (True, list, None) + The slot number and the new name for the slot. + + + slot_number (True, int, None) + The slot number of the slot to be renamed. + + + slot_name (True, str, None) + Provide name for the slot. + + + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module initiates the refresh inventory task. It may take a minute for new names to be reflected. If the task exceeds 300 seconds to refresh, the task times out. + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Rename the slots in multiple chassis using slot number and chassis service tag + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + slot_options: + - chassis_service_tag: ABC1234 + slots: + - slot_number: 1 + slot_name: sled_name_1 + - slot_number: 2 + slot_name: sled_name_2 + - chassis_service_tag: ABC1235 + slots: + - slot_number: 1 + slot_name: sled_name_1 + - slot_number: 2 + slot_name: sled_name_2 + + - name: Rename single slot name of the sled using sled ID + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_id: 10054 + slot_name: slot_device_name_1 + + - name: Rename single slot name of the sled using sled service tag + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_service_tag: ABC1234 + slot_name: service_tag_slot + + - name: Rename multiple slot names of the devices + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_id: 10054 + slot_name: sled_name_1 + - device_service_tag: ABC1234 + slot_name: sled_name_2 + - device_id: 10055 + slot_name: sled_name_3 + - device_service_tag: PQR1234 + slot_name: sled_name_4 + + + +Return Values +------------- + +msg (always, str, Successfully renamed the slot(s).) + Overall status of the slot rename operation. + + +slot_info (if at least one slot renamed, list, [AnsibleMapping([('ChassisId', 10053), ('ChassisServiceTag', 'ABCD123'), ('DeviceName', ''), ('DeviceType', 1000), ('JobId', 15746), ('SlotId', '10072'), ('SlotName', 'slot_op2'), ('SlotNumber', '6'), ('SlotType', 2000)]), AnsibleMapping([('ChassisId', 10053), ('ChassisName', 'MX-ABCD123'), ('ChassisServiceTag', 'ABCD123'), ('DeviceType', '3000'), ('JobId', 15747), ('SlotId', '10070'), ('SlotName', 'slot_op2'), ('SlotNumber', '4'), ('SlotType', '2000')]), AnsibleMapping([('ChassisId', '10053'), ('ChassisName', 'MX-PQRS123'), ('ChassisServiceTag', 'PQRS123'), ('DeviceId', '10054'), ('DeviceServiceTag', 'XYZ5678'), ('DeviceType', '1000'), ('JobId', 15761), ('SlotId', '10067'), ('SlotName', 'a1'), ('SlotNumber', '1'), ('SlotType', '2000')])]) + Information of the slots that are renamed successfully. + + The ``DeviceServiceTag`` and ``DeviceId`` options are available only if *device_options* is used. + + ``NOTE`` Only the slots which were renamed are listed. + + +rename_failed_slots (if at least one slot renaming fails, list, [AnsibleMapping([('ChassisId', '12345'), ('ChassisName', 'MX-ABCD123'), ('ChassisServiceTag', 'ABCD123'), ('DeviceType', '4000'), ('JobId', 1234), ('JobStatus', 'Aborted'), ('SlotId', '10061'), ('SlotName', 'c2'), ('SlotNumber', '1'), ('SlotType', '4000')]), AnsibleMapping([('ChassisId', '10053'), ('ChassisName', 'MX-PQRS123'), ('ChassisServiceTag', 'PQRS123'), ('DeviceType', '1000'), ('JobId', 0), ('JobStatus', 'HTTP Error 400: Bad Request'), ('SlotId', '10069'), ('SlotName', 'b2'), ('SlotNumber', '3'), ('SlotType', '2000')])]) + Information of the valid slots that are not renamed. + + ``JobStatus`` is shown if rename job fails. + + ``NOTE`` Only slots which were not renamed are listed. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1014'), ('RelatedProperties', []), ('Message', 'Unable to complete the operation because an invalid value is entered for the property Invalid json type: STRING for Edm.Int64 property: Id .'), ('MessageArgs', ['Invalid json type: STRING for Edm.Int64 property: Id']), ('Severity', 'Critical'), ('Resolution', "Enter a valid value for the property and retry the operation. For more information about valid values, see the OpenManage Enterprise-Modular User's Guide available on the support site.")])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst new file mode 100644 index 00000000..d4d2c53a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst @@ -0,0 +1,297 @@ +.. _ome_configuration_compliance_baseline_module: + + +ome_configuration_compliance_baseline -- Create, modify, and delete a configuration compliance baseline and remediate non-compliant devices on OpenManage Enterprise +==================================================================================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, and delete a configuration compliance baseline on OpenManage Enterprise. This module also allows to remediate devices that are non-compliant with the baseline by changing the attributes of devices to match with the associated baseline attributes. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + command (optional, str, create) + ``create`` creates a configuration baseline from an existing compliance template.``create`` supports ``check_mode`` or idempotency checking for only *names*. + + ``modify`` modifies an existing baseline.Only *names*, *description*, *device_ids*, *device_service_tags*, and *device_group_names* can be modified + + *WARNING* When a baseline is modified, the provided *device_ids*, *device_group_names*, and *device_service_tags* replaces the devices previously present in the baseline. + + ``delete`` deletes the list of configuration compliance baselines based on the baseline name. Invalid baseline names are ignored. + + ``remediate`` remediates devices that are non-compliant with the baseline by changing the attributes of devices to match with the associated baseline attributes. + + ``remediate`` is performed on all the non-compliant devices if either *device_ids*, or *device_service_tags* is not provided. + + + names (True, list, None) + Name(s) of the configuration compliance baseline. + + This option is applicable when *command* is ``create``, ``modify``, or ``delete``. + + Provide the list of configuration compliance baselines names that are supported when *command* is ``delete``. + + + new_name (optional, str, None) + New name of the compliance baseline to be modified. + + This option is applicable when *command* is ``modify``. + + + template_name (optional, str, None) + Name of the compliance template for creating the compliance baseline(s). + + Name of the deployment template to be used for creating a compliance baseline. + + This option is applicable when *command* is ``create`` and is mutually exclusive with *template_id*. + + + template_id (optional, int, None) + ID of the deployment template to be used for creating a compliance baseline. + + This option is applicable when *command* is ``create`` and is mutually exclusive with *template_name*. + + + device_ids (optional, list, None) + IDs of the target devices. + + This option is applicable when *command* is ``create``, ``modify``, or ``remediate``, and is mutually exclusive with *device_service_tag* and *device_group_names*. + + + device_service_tags (optional, list, None) + Service tag of the target device. + + This option is applicable when *command* is ``create``, ``modify``, or ``remediate`` and is mutually exclusive with *device_ids* and *device_group_names*. + + + device_group_names (optional, list, None) + Name of the target device group. + + This option is applicable when *command* is ``create``, or ``modify`` and is mutually exclusive with *device_ids* and *device_service_tag*. + + + description (optional, str, None) + Description of the compliance baseline. + + This option is applicable when *command* is ``create``, or ``modify``. + + + job_wait (optional, bool, True) + Provides the option to wait for job completion. + + This option is applicable when *command* is ``create``, ``modify``, or ``remediate``. + + + job_wait_timeout (optional, int, 10800) + The maximum wait time of *job_wait* in seconds.The job will only be tracked for this duration. + + This option is applicable when *job_wait* is ``True``. + + + hostname (True, str, None) + OpenManage Enterprise IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise username. + + + password (True, str, None) + OpenManage Enterprise password. + + + port (optional, int, 443) + OpenManage Enterprise HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module supports ``check_mode``. + - Ensure that the devices have the required licenses to perform the baseline compliance operations. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create a configuration compliance baseline using device IDs + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + names: "baseline1" + template_name: "template1" + description: "description of baseline" + device_ids: + - 1111 + - 2222 + + - name: Create a configuration compliance baseline using device service tags + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + names: "baseline1" + template_id: 1234 + description: "description of baseline" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + + - name: Create a configuration compliance baseline using group names + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + names: "baseline2" + template_id: 2 + job_wait_timeout: 1000 + description: "description of baseline" + device_group_names: + - "Group1" + - "Group2" + + - name: Delete the configuration compliance baselines + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: delete + names: + - baseline1 + - baseline2 + + - name: Modify a configuration compliance baseline using group names + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: modify + names: "baseline1" + new_name: "baseline_update" + template_name: "template2" + description: "new description of baseline" + job_wait_timeout: 1000 + device_group_names: + - Group1 + + - name: Remediate specific non-compliant devices to a configuration compliance baseline using device IDs + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + device_ids: + - 1111 + + - name: Remediate specific non-compliant devices to a configuration compliance baseline using device service tags + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + + - name: Remediate all the non-compliant devices to a configuration compliance baseline + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + + + +Return Values +------------- + +msg (always, str, Successfully created the configuration compliance baseline.) + Overall status of the configuration compliance baseline operation. + + +incompatible_devices (when I(device_service_tags) or I(device_ids) contains incompatible devices for C(create) or C(modify), list, [1234, 5678]) + Details of the devices which cannot be used to perform baseline compliance operations + + +compliance_status (when I(command) is C(create) or C(modify), dict, AnsibleMapping([('Id', 13), ('Name', 'baseline1'), ('Description', None), ('TemplateId', 102), ('TemplateName', 'one'), ('TemplateType', 2), ('TaskId', 26584), ('PercentageComplete', '100'), ('TaskStatus', 2070), ('LastRun', '2021-02-27 13:15:13.751'), ('BaselineTargets', [AnsibleMapping([('Id', 1111), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('ConfigComplianceSummary', AnsibleMapping([('ComplianceStatus', 'OK'), ('NumberOfCritical', 0), ('NumberOfWarning', 0), ('NumberOfNormal', 0), ('NumberOfIncomplete', 0)]))])) + Status of compliance baseline operation. + + +job_id (when I(command) is C(remediate), int, 14123) + Task ID created when *command* is ``remediate``. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst new file mode 100644 index 00000000..feeadd16 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst @@ -0,0 +1,155 @@ +.. _ome_configuration_compliance_info_module: + + +ome_configuration_compliance_info -- Device compliance report for devices managed in OpenManage Enterprise +========================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows the generation of a compliance report of a specific or all of devices in a configuration compliance baseline. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + baseline (True, str, None) + The name of the created baseline. + + A compliance report is generated even when the template is not associated with the baseline. + + + device_id (False, int, None) + The ID of the target device which is associated with the *baseline*. + + + device_service_tag (False, str, None) + The device service tag of the target device associated with the *baseline*. + + *device_service_tag* is mutually exclusive with *device_id*. + + + hostname (True, str, None) + OpenManage Enterprise IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise username. + + + password (True, str, None) + OpenManage Enterprise password. + + + port (optional, int, 443) + OpenManage Enterprise HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline. + dellemc.openmanage.ome_configuration_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + + - name: Retrieve the compliance report for a specific device associated with the baseline using the device ID. + dellemc.openmanage.ome_configuration_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + device_id: 10001 + + - name: Retrieve the compliance report for a specific device associated with the baseline using the device service tag. + dellemc.openmanage.ome_configuration_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + device_service_tag: 2HFGH3 + + + +Return Values +------------- + +msg (on error, str, Unable to complete the operation because the entered target baseline name 'baseline' is invalid.) + Over all compliance report status. + + +compliance_info (success, dict, [AnsibleMapping([('ComplianceAttributeGroups', [AnsibleMapping([('Attributes', []), ('ComplianceReason', 'One or more attributes on the target device(s) does not match the compliance template.'), ('ComplianceStatus', 2), ('ComplianceSubAttributeGroups', [AnsibleMapping([('Attributes', [AnsibleMapping([('AttributeId', 75369), ('ComplianceReason', 'Attribute has different value from template'), ('ComplianceStatus', 3), ('CustomId', 0), ('Description', None), ('DisplayName', 'Workload Profile'), ('ExpectedValue', 'HpcProfile'), ('Value', 'NotAvailable')])]), ('ComplianceReason', 'One or more attributes on the target device(s) does not match the compliance template.'), ('ComplianceStatus', 2), ('ComplianceSubAttributeGroups', []), ('DisplayName', 'System Profile Settings'), ('GroupNameId', 1)])]), ('DisplayName', 'BIOS'), ('GroupNameId', 1)])]), ('ComplianceStatus', 'NONCOMPLIANT'), ('DeviceName', 'WIN-PLOV8MPIP40'), ('DeviceType', 1000), ('Id', 25011), ('InventoryTime', '2021-03-18 00:01:57.809771'), ('Model', 'PowerEdge R7525'), ('ServiceTag', 'JHMBX53')])]) + Returns the compliance report information. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen A (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst new file mode 100644 index 00000000..e3a32f82 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst @@ -0,0 +1,327 @@ +.. _ome_device_group_module: + + +ome_device_group -- Add or remove device(s) from a static device group on OpenManage Enterprise +=============================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to add or remove device(s) from a static device group on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 +- netaddr >= 0.7.19 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` allows to add the device(s) to a static device group. + + ``absent`` allows to remove the device(s) from a static device group. + + + name (optional, str, None) + Name of the static group. + + *name* is mutually exclusive with *group_id*. + + + group_id (optional, int, None) + ID of the static device. + + *group_id* is mutually exclusive with *name*. + + + device_ids (optional, list, None) + List of ID(s) of the device(s) to be added or removed from the device group. + + *device_ids* is mutually exclusive with *device_service_tags* and *ip_addresses*. + + + device_service_tags (optional, list, None) + List of service tag(s) of the device(s) to be added or removed from the device group. + + *device_service_tags* is mutually exclusive with *device_ids* and *ip_addresses*. + + + ip_addresses (optional, list, None) + List of IPs of the device(s) to be added or removed from the device group. + + *ip_addresses* is mutually exclusive with *device_ids* and *device_service_tags*. + + Supported IP address range formats: + + - 192.35.0.1 + + - 10.36.0.0-192.36.0.255 + + - 192.37.0.0/24 + + - fe80::ffff:ffff:ffff:ffff + + - fe80::ffff:192.0.2.0/125 + + - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff + + ``NOTE`` Hostname is not supported. + + ``NOTE`` *ip_addresses* requires python's netaddr packages to work on IP Addresses. + + ``NOTE`` This module reports success even if one of the IP addresses provided in the *ip_addresses* list is available in OpenManage Enterprise.The module reports failure only if none of the IP addresses provided in the list are available in OpenManage Enterprise. + + + hostname (True, str, None) + OpenManage Enterprise IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise username. + + + password (True, str, None) + OpenManage Enterprise password. + + + port (optional, int, 443) + OpenManage Enterprise HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Add devices to a static device group by using the group name and device IDs + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + device_ids: + - 11111 + - 11112 + - 11113 + + - name: Add devices to a static device group by using the group name and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + device_service_tags: + - GHRT2RL + - KJHDF3S + - LKIJNG6 + + - name: Add devices to a static device group by using the group ID and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + device_service_tags: + - GHRT2RL + - KJHDF3S + + - name: Add devices to a static device group by using the group name and IPv4 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + ip_addresses: + - 192.35.0.1 + - 192.35.0.5 + + - name: Add devices to a static device group by using the group ID and IPv6 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + ip_addresses: + - fe80::ffff:ffff:ffff:ffff + - fe80::ffff:ffff:ffff:2222 + + - name: Add devices to a static device group by using the group ID and supported IPv4 and IPv6 address formats. + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + ip_addresses: + - 192.35.0.1 + - 10.36.0.0-192.36.0.255 + - 192.37.0.0/24 + - fe80::ffff:ffff:ffff:ffff + - ::ffff:192.0.2.0/125 + - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff + + - name: Remove devices from a static device group by using the group name and device IDs + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + device_ids: + - 11111 + - 11112 + - 11113 + + - name: Remove devices from a static device group by using the group name and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + device_service_tags: + - GHRT2RL + - KJHDF3S + - LKIJNG6 + + - name: Remove devices from a static device group by using the group ID and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + device_service_tags: + - GHRT2RL + - KJHDF3S + + - name: Remove devices from a static device group by using the group name and IPv4 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + ip_addresses: + - 192.35.0.1 + - 192.35.0.5 + + - name: Remove devices from a static device group by using the group ID and IPv6 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + ip_addresses: + - fe80::ffff:ffff:ffff:ffff + - fe80::ffff:ffff:ffff:2222 + + - name: Remove devices from a static device group by using the group ID and supported IPv4 and IPv6 address formats. + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + ip_addresses: + - 192.35.0.1 + - 10.36.0.0-192.36.0.255 + - 192.37.0.0/24 + - fe80::ffff:ffff:ffff:ffff + - ::ffff:192.0.2.0/125 + - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff + + + + +Return Values +------------- + +msg (always, str, ['Successfully added member(s) to the device group.']) + Overall status of the device group settings. + + +group_id (success, int, 21078) + ID of the group. + + +ip_addresses_added (success, list, 21078) + IP Addresses which are added to the device group. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Sajna Shetty(@Sajna-Shetty) +- Abhishek Sinha (@Abhishek-Dell) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst new file mode 100644 index 00000000..1896725d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst @@ -0,0 +1,207 @@ +.. _ome_device_info_module: + + +ome_device_info -- Retrieves the information of devices inventoried by OpenManage Enterprise +============================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module retrieves the list of devices in the inventory of OpenManage Enterprise along with the details of each device. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + fact_subset (optional, str, basic_inventory) + ``basic_inventory`` returns the list of the devices. + + ``detailed_inventory`` returns the inventory details of specified devices. + + ``subsystem_health`` returns the health status of specified devices. + + + system_query_options (optional, dict, None) + *system_query_options* applicable for the choices of the fact_subset. Either *device_id* or *device_service_tag* is mandatory for ``detailed_inventory`` and ``subsystem_health`` or both can be applicable. + + + device_id (optional, list, None) + A list of unique identifier is applicable for ``detailed_inventory`` and ``subsystem_health``. + + + device_service_tag (optional, list, None) + A list of service tags are applicable for ``detailed_inventory`` and ``subsystem_health``. + + + inventory_type (optional, str, None) + For ``detailed_inventory``, it returns details of the specified inventory type. + + + filter (optional, str, None) + For ``basic_inventory``, it filters the collection of devices. *filter* query format should be aligned with OData standards. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieve basic inventory of all devices + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "basic_inventory" + system_query_options: + filter: "Id eq 33333 or Id eq 11111" + + - name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222 + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + - 22222 + + - name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567 + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + + - name: Retrieve details of specified inventory type of specified devices identified by ID and service tags + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + device_service_tag: + - MXL1234 + - MXL4567 + inventory_type: "serverDeviceCards" + + - name: Retrieve subsystem health of specified devices identified by service tags + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "subsystem_health" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + + + + +Return Values +------------- + +msg (on error, str, Failed to fetch the device information) + Over all device information status. + + +device_info (success, dict, AnsibleMapping([('value', [AnsibleMapping([('Actions', None), ('AssetTag', None), ('ChassisServiceTag', None), ('ConnectionState', True), ('DeviceManagement', [AnsibleMapping([('DnsName', 'dnsname.host.com'), ('InstrumentationName', 'MX-12345'), ('MacAddress', '11:10:11:10:11:10'), ('ManagementId', 12345), ('ManagementProfile', [AnsibleMapping([('HasCreds', 0), ('ManagementId', 12345), ('ManagementProfileId', 12345), ('ManagementURL', 'https://192.168.0.1:443'), ('Status', 1000), ('StatusDateTime', '2019-01-21 06:30:08.501')])]), ('ManagementType', 2), ('NetworkAddress', '192.168.0.1')])]), ('DeviceName', 'MX-0003I'), ('DeviceServiceTag', 'MXL1234'), ('DeviceSubscription', None), ('LastInventoryTime', '2019-01-21 06:30:08.501'), ('LastStatusTime', '2019-01-21 06:30:02.492'), ('ManagedState', 3000), ('Model', 'PowerEdge MX7000'), ('PowerState', 17), ('SlotConfiguration', AnsibleMapping()), ('Status', 4000), ('SystemId', 2031), ('Type', 2000)])])])) + Returns the information collected from the Device. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst new file mode 100644 index 00000000..0e9e2b7b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst @@ -0,0 +1,304 @@ +.. _ome_device_local_access_configuration_module: + + +ome_device_local_access_configuration -- Configure local access settings on OpenManage Enterprise Modular. +========================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the local access settings of the power button, quick sync, KVM, LCD, and chassis direct access on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_id (optional, int, None) + The ID of the chassis for which the local access configuration to be updated. + + If the device ID is not specified, this module updates the local access settings for the *hostname*. + + *device_id* is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, str, None) + The service tag of the chassis for which the local access settings needs to be updated. + + If the device service tag is not specified, this module updates the local access settings for the *hostname*. + + *device_service_tag* is mutually exclusive with *device_id*. + + + enable_kvm_access (optional, bool, None) + Enables or disables the keyboard, video, and mouse (KVM) interfaces. + + + enable_chassis_direct_access (optional, bool, None) + Enables or disables the access to management consoles such as iDRAC and the management module of the device on the chassis. + + + chassis_power_button (optional, dict, None) + The settings for the chassis power button. + + + enable_chassis_power_button (True, bool, None) + Enables or disables the chassis power button. + + If ``False``, the chassis cannot be turn on or turn off using the power button. + + + enable_lcd_override_pin (optional, bool, None) + Enables or disables the LCD override pin. + + This is required when *enable_chassis_power_button* is ``False``. + + + disabled_button_lcd_override_pin (optional, int, None) + The six digit LCD override pin to change the power state of the chassis. + + This is required when *enable_lcd_override_pin* is ``True``. + + The module will always report change when *disabled_button_lcd_override_pin* is ``True``. + + + + quick_sync (optional, dict, None) + The settings for quick sync. + + The *quick_sync* options are ignored if the quick sync hardware is not present. + + + quick_sync_access (optional, str, None) + Users with administrator privileges can set the following types of *quick_sync_access*. + + ``READ_WRITE`` enables writing configuration using quick sync. + + ``READ_ONLY`` enables read only access to Wi-Fi and Bluetooth Low Energy(BLE). + + ``DISABLED`` disables reading or writing configuration through quick sync. + + + enable_inactivity_timeout (optional, bool, None) + Enables or disables the inactivity timeout. + + + timeout_limit (optional, int, None) + Inactivity timeout in seconds or minutes. + + The range is 120 to 3600 in seconds, or 2 to 60 in minutes. + + This option is required when *enable_inactivity_timeout* is ``True``. + + + timeout_limit_unit (optional, str, None) + Inactivity timeout limit unit. + + ``SECONDS`` to set *timeout_limit* in seconds. + + ``MINUTES`` to set *timeout_limit* in minutes. + + This option is required when *enable_inactivity_timeout* is ``True``. + + + enable_read_authentication (optional, bool, None) + Enables or disables the option to log in using your user credentials and to read the inventory in a secure data center. + + + enable_quick_sync_wifi (optional, bool, None) + Enables or disables the Wi-Fi communication path to the chassis. + + + + lcd (optional, dict, None) + The settings for LCD. + + The *lcd* options are ignored if the LCD hardware is not present in the chassis. + + + lcd_access (optional, str, None) + Option to configure the quick sync settings using LCD. + + ``VIEW_AND_MODIFY`` to set access level to view and modify. + + ``VIEW_ONLY`` to set access level to view. + + ``DISABLED`` to disable the access. + + + user_defined (optional, str, None) + The text to display on the LCD Home screen. The LCD Home screen is displayed when the system is reset to factory default settings. The user-defined text can have a maximum of 62 characters. + + + lcd_language (optional, str, None) + The language code in which the text on the LCD must be displayed. + + en to set English language. + + fr to set French language. + + de to set German language. + + es to set Spanish language. + + ja to set Japanese language. + + zh to set Chinese language. + + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to OpenManage Enterprise Modular. + - This module supports ``check_mode``. + - The module will always report change when *enable_chassis_power_button* is ``True``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure KVM, direct access and power button settings of the chassis using device ID. + dellemc.openmanage.ome_device_local_access_configuration: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + enable_kvm_access: true + enable_chassis_direct_access: false + chassis_power_button: + enable_chassis_power_button: false + enable_lcd_override_pin: true + disabled_button_lcd_override_pin: 123456 + + - name: Configure Quick sync and LCD settings of the chassis using device service tag. + dellemc.openmanage.ome_device_local_access_configuration: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + quick_sync: + quick_sync_access: READ_ONLY + enable_read_authentication: true + enable_quick_sync_wifi: true + enable_inactivity_timeout: true + timeout_limit: 10 + timeout_limit_unit: MINUTES + lcd: + lcd_access: VIEW_ONLY + lcd_language: en + user_defined: "LCD Text" + + - name: Configure all local access settings of the host chassis. + dellemc.openmanage.ome_device_local_access_configuration: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_kvm_access: true + enable_chassis_direct_access: false + chassis_power_button: + enable_chassis_power_button: false + enable_lcd_override_pin: true + disabled_button_lcd_override_pin: 123456 + quick_sync: + quick_sync_access: READ_WRITE + enable_read_authentication: true + enable_quick_sync_wifi: true + enable_inactivity_timeout: true + timeout_limit: 120 + timeout_limit_unit: SECONDS + lcd: + lcd_access: VIEW_MODIFY + lcd_language: en + user_defined: "LCD Text" + + + +Return Values +------------- + +msg (always, str, Successfully updated the local access settings.) + Overall status of the device local access settings. + + +location_details (success, dict, AnsibleMapping([('SettingType', 'LocalAccessConfiguration'), ('EnableChassisDirect', False), ('EnableChassisPowerButton', False), ('EnableKvmAccess', True), ('EnableLcdOverridePin', False), ('LcdAccess', 'VIEW_ONLY'), ('LcdCustomString', 'LCD Text'), ('LcdLanguage', 'en'), ('LcdOverridePin', ''), ('LcdPinLength', None), ('LcdPresence', 'Present'), ('LedPresence', None), ('QuickSync', AnsibleMapping([('EnableInactivityTimeout', True), ('EnableQuickSyncWifi', False), ('EnableReadAuthentication', False), ('QuickSyncAccess', 'READ_ONLY'), ('QuickSyncHardware', 'Present'), ('TimeoutLimit', 7), ('TimeoutLimitUnit', 'MINUTES')]))])) + returned when local access settings are updated successfully. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst new file mode 100644 index 00000000..3d61a4f4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst @@ -0,0 +1,194 @@ +.. _ome_device_location_module: + + +ome_device_location -- Configure device location settings on OpenManage Enterprise Modular +========================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the device location settings of the chassis on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_id (optional, int, None) + The ID of the chassis for which the settings need to be updated. + + If the device ID is not specified, this module updates the location settings for the *hostname*. + + *device_id* is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, str, None) + The service tag of the chassis for which the settings need to be updated. + + If the device service tag is not specified, this module updates the location settings for the *hostname*. + + *device_service_tag* is mutually exclusive with *device_id*. + + + data_center (optional, str, None) + The data center name of the chassis. + + + room (optional, str, None) + The room of the chassis. + + + aisle (optional, str, None) + The aisle of the chassis. + + + rack (optional, str, None) + The rack name of the chassis. + + + rack_slot (optional, int, None) + The rack slot number of the chassis. + + + location (optional, str, None) + The physical location of the chassis. + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update device location settings of a chassis using the device ID. + dellemc.openmanage.ome_device_location: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + data_center: data center 1 + room: room 1 + aisle: aisle 1 + rack: rack 1 + rack_slot: 2 + location: location 1 + + - name: Update device location settings of a chassis using the device service tag. + dellemc.openmanage.ome_device_location: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + data_center: data center 2 + room: room 7 + aisle: aisle 4 + rack: rack 6 + rack_slot: 22 + location: location 5 + + - name: Update device location settings of the host chassis. + dellemc.openmanage.ome_device_location: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + data_center: data center 3 + room: room 3 + aisle: aisle 1 + rack: rack 7 + rack_slot: 10 + location: location 9 + + + +Return Values +------------- + +msg (always, str, Successfully updated the location settings.) + Overall status of the device location settings. + + +location_details (success, dict, AnsibleMapping([('Aisle', 'aisle 1'), ('DataCenter', 'data center 1'), ('Location', 'location 1'), ('RackName', 'rack 1'), ('RackSlot', 2), ('Room', 'room 1'), ('SettingType', 'Location')])) + returned when location settings are updated successfully. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst new file mode 100644 index 00000000..a60b0926 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst @@ -0,0 +1,429 @@ +.. _ome_device_mgmt_network_module: + + +ome_device_mgmt_network -- Configure network settings of devices on OpenManage Enterprise Modular +================================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure network settings on Chassis, Servers, and I/O Modules on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_service_tag (optional, str, None) + Service tag of the device. + + This option is mutually exclusive with *device_id*. + + + device_id (optional, int, None) + ID of the device. + + This option is mutually exclusive with *device_service_tag*. + + + enable_nic (optional, bool, True) + Enable or disable Network Interface Card (NIC) configuration of the device. + + This option is not applicable to I/O Module. + + + delay (optional, int, 0) + The time in seconds, after which settings are applied. + + This option is applicable only for Chassis. + + + ipv4_configuration (optional, dict, None) + IPv4 network configuration. + + ``WARNING`` Ensure that you have an alternate interface to access OpenManage Enterprise Modular because these options can change the current IPv4 address for *hostname*. + + + enable_ipv4 (True, bool, None) + Enable or disable access to the network using IPv4. + + + enable_dhcp (optional, bool, None) + Enable or disable the automatic request to obtain an IPv4 address from the IPv4 Dynamic Host Configuration Protocol (DHCP) server. + + ``NOTE`` If this option is ``True``, the values provided for *static_ip_address*, *static_subnet_mask*, and *static_gateway* are not applied for these fields. However, the module may report changes. + + + static_ip_address (optional, str, None) + Static IPv4 address + + This option is applicable when *enable_dhcp* is false. + + + static_subnet_mask (optional, str, None) + Static IPv4 subnet mask address + + This option is applicable when *enable_dhcp* is false. + + + static_gateway (optional, str, None) + Static IPv4 gateway address + + This option is applicable when *enable_dhcp* is false. + + + use_dhcp_to_obtain_dns_server_address (optional, bool, None) + This option allows to automatically request and obtain IPv4 address for the DNS Server from the DHCP server. + + This option is applicable when *enable_dhcp* is true. + + ``NOTE`` If this option is ``True``, the values provided for *static_preferred_dns_server* and *static_alternate_dns_server* are not applied for these fields. However, the module may report changes. + + + static_preferred_dns_server (optional, str, None) + Static IPv4 DNS preferred server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + static_alternate_dns_server (optional, str, None) + Static IPv4 DNS alternate server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + + ipv6_configuration (optional, dict, None) + IPv6 network configuration. + + ``WARNING`` Ensure that you have an alternate interface to access OpenManage Enterprise Modular because these options can change the current IPv6 address for *hostname*. + + + enable_ipv6 (True, bool, None) + Enable or disable access to the network using the IPv6. + + + enable_auto_configuration (optional, bool, None) + Enable or disable the automatic request to obtain an IPv6 address from the IPv6 DHCP server or router advertisements(RA) + + If *enable_auto_configuration* is ``true``, OpenManage Enterprise Modular retrieves IP configuration (IPv6 address, prefix, and gateway address) from a DHCPv6 server on the existing network. + + ``NOTE`` If this option is ``True``, the values provided for *static_ip_address*, *static_prefix_length*, and *static_gateway* are not applied for these fields. However, the module may report changes. + + + static_ip_address (optional, str, None) + Static IPv6 address + + This option is applicable when *enable_auto_configuration* is false. + + + static_prefix_length (optional, int, None) + Static IPv6 prefix length + + This option is applicable when *enable_auto_configuration* is false. + + + static_gateway (optional, str, None) + Static IPv6 gateway address + + This option is applicable when *enable_auto_configuration* is false. + + + use_dhcpv6_to_obtain_dns_server_address (optional, bool, None) + This option allows to automatically request and obtain a IPv6 address for the DNS server from the DHCP server. + + This option is applicable when *enable_auto_configuration* is true + + ``NOTE`` If this option is ``True``, the values provided for *static_preferred_dns_server* and *static_alternate_dns_server* are not applied for these fields. However, the module may report changes. + + + static_preferred_dns_server (optional, str, None) + Static IPv6 DNS preferred server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + static_alternate_dns_server (optional, str, None) + Static IPv6 DNS alternate server + + This option is applicable when *use_dhcp_for_dns_server_names* is false. + + + + management_vlan (optional, dict, None) + VLAN configuration. + + + enable_vlan (True, bool, None) + Enable or disable VLAN for management. + + The VLAN configuration cannot be updated if the *register_with_dns* field under *dns_configuration* is true. + + ``WARNING`` Ensure that the network cable is connected to the correct port after the VLAN configuration is changed. If not, the VLAN configuration changes may not be applied. + + + vlan_id (optional, int, None) + VLAN ID. + + The valid VLAN IDs are: 1 to 4000, and 4021 to 4094. + + This option is applicable when *enable_vlan* is true. + + + + dns_configuration (optional, dict, None) + Domain Name System(DNS) settings. + + + register_with_dns (optional, bool, None) + Register/Unregister *dns_name* on the DNS Server. + + ``WARNING`` This option cannot be updated if VLAN configuration changes. + + + use_dhcp_for_dns_domain_name (optional, bool, None) + Get the *dns_domain_name* using a DHCP server. + + + dns_name (optional, str, None) + DNS name for *hostname* + + This is applicable when *register_with_dns* is true. + + + dns_domain_name (optional, str, None) + Static DNS domain name + + This is applicable when *use_dhcp_for_dns_domain_name* is false. + + + auto_negotiation (optional, bool, None) + Enables or disables the auto negation of the network speed. + + ``NOTE``: Setting *auto_negotiation* to false and choosing a network port speed may result in the chassis loosing link to the top of rack network switch, or to the neighboring chassis in case of MCM mode. It is recommended that the *auto_negotiation* is set to ``true`` for most use cases. + + This is applicable when *use_dhcp_for_dns_domain_name* is false. + + This is applicable only for Chassis. + + + network_speed (optional, str, None) + The speed of the network port. + + This is applicable when *auto_negotiation* is false. + + ``10_MB`` to select network speed of 10 MB. + + ``100_MB`` to select network speed of 100 MB. + + This is applicable only for Chassis. + + + + dns_server_settings (optional, dict, None) + DNS server settings. + + This is applicable only for I/O Module. + + + preferred_dns_server (optional, str, None) + Enter the IP address of the preferred DNS server. + + + alternate_dns_server1 (optional, str, None) + Enter the IP address of the first alternate DNS server. + + + alternate_dns_server2 (optional, str, None) + Enter the IP address of the second alternate DNS server. + + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Network settings for chassis + dellemc.openmanage.ome_device_mgmt_network: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: CHAS123 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_to_obtain_dns_server_address: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcpv6_to_obtain_dns_server_address: false + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "dnslocaldomain" + auto_negotiation: no + network_speed: 100_MB + + - name: Network settings for server + dellemc.openmanage.ome_device_mgmt_network: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: SRVR123 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_to_obtain_dns_server_address: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcpv6_to_obtain_dns_server_address: false + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + + - name: Network settings for I/O module + dellemc.openmanage.ome_device_mgmt_network: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: IOM1234 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + dns_server_settings: + preferred_dns_server: 192.168.0.4 + alternate_dns_server1: 192.168.0.5 + + - name: Management VLAN configuration of chassis using device id + dellemc.openmanage.ome_device_mgmt_network: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id : 12345 + management_vlan: + enable_vlan: true + vlan_id: 2345 + dns_configuration: + register_with_dns: false + + + +Return Values +------------- + +msg (always, str, Successfully applied the network settings.) + Overall status of the network config operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1004'), ('RelatedProperties', []), ('Message', 'Unable to complete the request because IPV4 Settings Capability is not Supported does not exist or is not applicable for the resource URI.'), ('MessageArgs', ['IPV4 Settings Capability is not Supported']), ('Severity', 'Critical'), ('Resolution', "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties.")])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst new file mode 100644 index 00000000..9d57373c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst @@ -0,0 +1,229 @@ +.. _ome_device_network_services_module: + + +ome_device_network_services -- Configure chassis network services settings on OpenManage Enterprise Modular +=========================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the network services on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_id (optional, int, None) + The ID of the chassis for which the settings need to be updated. + + If the device ID is not specified, this module updates the network services settings for the *hostname*. + + *device_id* is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, str, None) + The service tag of the chassis for which the setting needs to be updated. + + If the device service tag is not specified, this module updates the network services settings for the *hostname*. + + *device_service_tag* is mutually exclusive with *device_id*. + + + snmp_settings (optional, dict, None) + The settings for SNMP configuration. + + + enabled (True, bool, None) + Enables or disables the SNMP settings. + + + port_number (optional, int, None) + The SNMP port number. + + + community_name (optional, str, None) + The SNMP community string. + + Required when *enabled* is ``true``. + + + + ssh_settings (optional, dict, None) + The settings for SSH configuration. + + + enabled (True, bool, None) + Enables or disables the SSH settings. + + + port_number (optional, int, None) + The port number for SSH service. + + + max_sessions (optional, int, None) + Number of SSH sessions. + + + max_auth_retries (optional, int, None) + The number of retries when the SSH session fails. + + + idle_timeout (optional, float, None) + SSH idle timeout in minutes. + + + + remote_racadm_settings (optional, dict, None) + The settings for remote RACADM configuration. + + + enabled (True, bool, None) + Enables or disables the remote RACADM settings. + + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update network services settings of a chassis using the device ID + dellemc.openmanage.ome_device_network_services: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + snmp_settings: + enabled: true + port_number: 161 + community_name: public + ssh_settings: + enabled: false + remote_racadm_settings: + enabled: false + + - name: Update network services settings of a chassis using the device service tag. + dellemc.openmanage.ome_device_network_services: + hostname: "192.168.0.2" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + snmp_settings: + enabled: false + ssh_settings: + enabled: true + port_number: 22 + max_sessions: 1 + max_auth_retries: 3 + idle_timeout: 1 + remote_racadm_settings: + enabled: false + + - name: Update network services settings of the host chassis. + dellemc.openmanage.ome_device_network_services: + hostname: "192.168.0.3" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + snmp_settings: + enabled: false + ssh_settings: + enabled: false + remote_racadm_settings: + enabled: true + + + +Return Values +------------- + +msg (always, str, Successfully updated the network services settings.) + Overall status of the network services settings. + + +network_services_details (success, dict, AnsibleMapping([('EnableRemoteRacadm', True), ('SettingType', 'NetworkServices'), ('SnmpConfiguration', AnsibleMapping([('PortNumber', 161), ('SnmpEnabled', True), ('SnmpV1V2Credential', AnsibleMapping([('CommunityName', 'public')]))])), ('SshConfiguration', AnsibleMapping([('IdleTimeout', 60), ('MaxAuthRetries', 3), ('MaxSessions', 1), ('PortNumber', 22), ('SshEnabled', False)]))])) + returned when network services settings are updated successfully. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CAPP1042'), ('RelatedProperties', []), ('Message', 'Unable to update the network configuration because the SNMP PortNumber is already in use.'), ('MessageArgs', ['SNMP PortNumber']), ('Severity', 'Informational'), ('Resolution', 'Enter a different port number and retry the operation.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst new file mode 100644 index 00000000..46f75bb2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst @@ -0,0 +1,208 @@ +.. _ome_device_power_settings_module: + + +ome_device_power_settings -- Configure chassis power settings on OpenManage Enterprise Modular +============================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the chassis power settings on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_id (optional, int, None) + The ID of the chassis for which the settings need to be updated. + + If the device ID is not specified, this module updates the power settings for the *hostname*. + + *device_id* is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, str, None) + The service tag of the chassis for which the setting needs to be updated. + + If the device service tag is not specified, this module updates the power settings for the *hostname*. + + *device_service_tag* is mutually exclusive with *device_id*. + + + power_configuration (optional, dict, None) + The settings for Power configuration. + + + enable_power_cap (True, bool, None) + Enables or disables the Power Cap Settings. + + + power_cap (optional, int, None) + The maximum power consumption limit of the device. Specify the consumption limit in Watts. + + This is required if *enable_power_cap* is set to true. + + + + redundancy_configuration (optional, dict, None) + The settings for Redundancy configuration. + + + redundancy_policy (optional, str, NO_REDUNDANCY) + The choices to configure the redundancy policy. + + ``NO_REDUNDANCY`` no redundancy policy is used. + + ``GRID_REDUNDANCY`` to distributes power by dividing the PSUs into two grids. + + ``PSU_REDUNDANCY`` to distribute power between all the PSUs. + + + + hot_spare_configuration (optional, dict, None) + The settings for Hot Spare configuration. + + + enable_hot_spare (True, bool, None) + Enables or disables Hot Spare configuration to facilitate voltage regulation when power utilized by the Power Supply Unit (PSU) is low. + + + primary_grid (optional, str, GRID_1) + The choices for PSU grid. + + ``GRID_1`` Hot Spare on Grid 1. + + ``GRID_2`` Hot Spare on Grid 2. + + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update power configuration settings of a chassis using the device ID. + dellemc.openmanage.ome_device_power_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + power_configuration: + enable_power_cap: true + power_cap: 3424 + + - name: Update redundancy configuration settings of a chassis using the device service tag. + dellemc.openmanage.ome_device_power_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + redundancy_configuration: + redundancy_policy: GRID_REDUNDANCY + + - name: Update hot spare configuration settings of a chassis using device ID. + dellemc.openmanage.ome_device_power_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25012 + hot_spare_configuration: + enable_hot_spare: true + primary_grid: GRID_1 + + + +Return Values +------------- + +msg (always, str, Successfully updated the power settings.) + Overall status of the device power settings. + + +power_details (success, dict, AnsibleMapping([('EnableHotSpare', True), ('EnablePowerCapSettings', True), ('MaxPowerCap', '3424'), ('MinPowerCap', '3291'), ('PowerCap', '3425'), ('PrimaryGrid', 'GRID_1'), ('RedundancyPolicy', 'NO_REDUNDANCY'), ('SettingType', 'Power')])) + returned when power settings are updated successfully. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst new file mode 100644 index 00000000..0f32a413 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst @@ -0,0 +1,293 @@ +.. _ome_device_quick_deploy_module: + + +ome_device_quick_deploy -- Configure Quick Deploy settings on OpenManage Enterprise Modular. +============================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure the Quick Deploy settings of the server or IOM on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_id (optional, int, None) + The ID of the chassis for which the Quick Deploy settings to be deployed. + + If the device ID is not specified, this module updates the Quick Deploy settings for the *hostname*. + + *device_id* is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, str, None) + The service tag of the chassis for which the Quick Deploy settings to be deployed. + + If the device service tag is not specified, this module updates the Quick Deploy settings for the *hostname*. + + *device_service_tag* is mutually exclusive with *device_id*. + + + setting_type (True, str, None) + The type of the Quick Deploy settings to be applied. + + ``ServerQuickDeploy`` to apply the server Quick Deploy settings. + + ``IOMQuickDeploy`` to apply the IOM Quick Deploy settings. + + + job_wait (optional, bool, True) + Determines whether to wait for the job completion or not. + + + job_wait_timeout (optional, int, 120) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + quick_deploy_options (True, dict, None) + The Quick Deploy settings for server and IOM quick deploy. + + + password (optional, str, None) + The password to login to the server or IOM. + + The module will always report change when *password* option is added. + + + ipv4_enabled (optional, bool, None) + Enables or disables the IPv4 network. + + + ipv4_network_type (optional, str, None) + IPv4 network type. + + *ipv4_network_type* is required if *ipv4_enabled* is ``True``. + + ``Static`` to configure the static IP settings. + + ``DHCP`` to configure the Dynamic IP settings. + + + ipv4_subnet_mask (optional, str, None) + IPv4 subnet mask. + + *ipv4_subnet_mask* is required if *ipv4_network_type* is ``Static``. + + + ipv4_gateway (optional, str, None) + IPv4 gateway. + + *ipv4_gateway* is required if *ipv4_network_type* is ``Static``. + + + ipv6_enabled (optional, bool, None) + Enables or disables the IPv6 network. + + + ipv6_network_type (optional, str, None) + IPv6 network type. + + *ipv6_network_type* is required if *ipv6_enabled* is ``True``. + + ``Static`` to configure the static IP settings. + + ``DHCP`` to configure the Dynamic IP settings. + + + ipv6_prefix_length (optional, int, None) + IPV6 prefix length. + + *ipv6_prefix_length* is required if *ipv6_network_type* is ``Static``. + + + ipv6_gateway (optional, str, None) + IPv6 gateway. + + *ipv6_gateway* is required if *ipv6_network_type* is ``Static``. + + + slots (optional, list, None) + The slot configuration for the server or IOM. + + + slot_id (True, int, None) + The ID of the slot. + + + slot_ipv4_address (optional, str, None) + The IPv4 address of the slot. + + + slot_ipv6_address (optional, str, None) + The IPv6 address of the slot. + + + vlan_id (optional, int, None) + The ID of the VLAN. + + + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to OpenManage Enterprise Modular. + - This module supports ``check_mode``. + - The module will always report change when *password* option is added. + - If the chassis is a member of a multi-chassis group and it is assigned as a backup lead chassis, the operations performed on the chassis using this module may conflict with the management operations performed on the chassis through the lead chassis. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Configure server Quick Deploy settings of the chassis using device ID. + dellemc.openmanage.ome_device_quick_deploy: + hostname: "192.168.0.1" + username: "username" + password: "password" + device_id: 25011 + setting_type: ServerQuickDeploy + ca_path: "/path/to/ca_cert.pem" + quick_deploy_options: + password: "password" + ipv4_enabled: True + ipv4_network_type: Static + ipv4_subnet_mask: 255.255.255.0 + ipv4_gateway: 192.168.0.1 + ipv6_enabled: True + ipv6_network_type: Static + ipv6_prefix_length: 1 + ipv6_gateway: "::" + slots: + - slot_id: 1 + slot_ipv4_address: 192.168.0.2 + slot_ipv6_address: "::" + vlan_id: 1 + - slot_id: 2 + slot_ipv4_address: 192.168.0.3 + slot_ipv6_address: "::" + vlan_id: 2 + + - name: Configure server Quick Deploy settings of the chassis using device service tag. + dellemc.openmanage.ome_device_quick_deploy: + hostname: "192.168.0.1" + username: "username" + password: "password" + device_service_tag: GHRT2RL + setting_type: IOMQuickDeploy + ca_path: "/path/to/ca_cert.pem" + quick_deploy_options: + password: "password" + ipv4_enabled: True + ipv4_network_type: Static + ipv4_subnet_mask: 255.255.255.0 + ipv4_gateway: 192.168.0.1 + ipv6_enabled: True + ipv6_network_type: Static + ipv6_prefix_length: 1 + ipv6_gateway: "::" + slots: + - slot_id: 1 + slot_ipv4_address: 192.168.0.2 + slot_ipv6_address: "::" + vlan_id: 1 + - slot_id: 2 + slot_ipv4_address: 192.168.0.3 + slot_ipv6_address: "::" + vlan_id: 2 + + + +Return Values +------------- + +msg (always, str, Successfully deployed the quick deploy settings.) + Overall status of the device quick deploy settings. + + +job_id (when quick deploy job is submitted., int, 1234) + The job ID of the submitted quick deploy job. + + +quick_deploy_settings (success, dict, AnsibleMapping([('DeviceId', 25011), ('SettingType', 'ServerQuickDeploy'), ('ProtocolTypeV4', True), ('NetworkTypeV4', 'Static'), ('IpV4Gateway', '192.168.0.1'), ('IpV4SubnetMask', '255.255.255.0'), ('ProtocolTypeV6', True), ('NetworkTypeV6', 'Static'), ('PrefixLength', '2'), ('IpV6Gateway', '::'), ('slots', [AnsibleMapping([('DeviceId', 25011), ('DeviceCapabilities', [18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 41, 8, 7, 4, 3, 2, 1, 31, 30]), ('DeviceIPV4Address', '192.168.0.2'), ('DeviceIPV6Address', '::'), ('Dhcpipv4', 'Disabled'), ('Dhcpipv6', 'Disabled'), ('Ipv4Enabled', 'Enabled'), ('Ipv6Enabled', 'Enabled'), ('Model', 'PowerEdge MX840c'), ('SlotIPV4Address', '192.168.0.2'), ('SlotIPV6Address', '::'), ('SlotId', 1), ('SlotSelected', True), ('SlotSettingsApplied', True), ('SlotType', '2000'), ('Type', '1000'), ('VlanId', '1')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 2), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 3), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 4), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 5), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 6), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 7), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 8), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')])])])) + returned when quick deploy settings are deployed successfully. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst new file mode 100644 index 00000000..66f4f27f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst @@ -0,0 +1,219 @@ +.. _ome_devices_module: + + +ome_devices -- Perform device-specific operations on target devices +=================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +Perform device-specific operations such as refresh inventory, clear iDRAC job queue, and reset iDRAC from OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_service_tags (optional, list, None) + Service tag of the target devices. + + This is mutually exclusive with *device_ids*. + + + device_ids (optional, list, None) + IDs of the target devices. + + This is mutually exclusive with *device_service_tags*. + + + state (optional, str, present) + ``present`` Allows to perform the *device_action* on the target devices. + + ``absent`` Removes the device from OpenManage Enterprise. No job is triggered. *job_wait*, *job_schedule*, *job_name*, and *job_description* are not applicable to this operation. + + + device_action (optional, str, refresh_inventory) + ``refresh_inventory`` refreshes the inventory on the target devices. + + ``reset_idrac`` Triggers a reset on the target iDRACs. + + ``clear_idrac_job_queue`` Clears the job queue on the target iDRACs. + + A job is triggered for each action. + + + job_wait (optional, bool, True) + Provides an option to wait for the job completion. + + This option is applicable when *state* is ``present``. + + This is applicable when *job_schedule* is ``startnow``. + + + job_wait_timeout (optional, int, 1200) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + job_schedule (optional, str, startnow) + Provide the cron string to schedule the job. + + + job_name (optional, str, None) + Optional name for the job. + + + job_description (optional, str, None) + Optional description for the job. + + + hostname (True, str, None) + OpenManage Enterprise IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise username. + + + password (True, str, None) + OpenManage Enterprise password. + + + port (optional, int, 443) + OpenManage Enterprise HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - For ``idrac_reset``, the job triggers only the iDRAC reset operation and does not track the complete reset cycle. + - Run this module from a system that has direct access to Dell OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Refresh Inventory + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_action: refresh_inventory + device_service_tags: + - SVCTAG1 + + - name: Clear iDRAC job queue + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_action: clear_idrac_job_queue + device_service_tags: + - SVCTAG1 + + - name: Reset iDRAC using the service tag + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_action: reset_idrac + device_service_tags: + - SVCTAG1 + + - name: Remove devices using servicetags + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + device_service_tags: + - SVCTAG1 + - SVCTAF2 + + - name: Remove devices using IDs + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + device_ids: + - 10235 + + + +Return Values +------------- + +msg (always, str, Successfully removed the device(s).) + Overall status of the devices operation. + + +job (success, dict, {'Id': 14874, 'JobName': 'Refresh inventory', 'JobDescription': "The Refresh inventory task initiated from OpenManage Ansible Modules for devices with the ids '13216'.", 'Schedule': 'startnow', 'State': 'Enabled', 'CreatedBy': 'admin', 'UpdatedBy': None, 'Visible': True, 'Editable': True, 'Builtin': False, 'UserGenerated': True, 'Targets': [{'JobId': 14874, 'Id': 13216, 'Data': '', 'TargetType': {'Id': 1000, 'Name': 'DEVICE'}}], 'Params': [{'JobId': 14874, 'Key': 'action', 'Value': 'CONFIG_INVENTORY'}, {'JobId': 14874, 'Key': 'isCollectDriverInventory', 'Value': 'true'}], 'LastRunStatus': {'@odata.type': '#JobService.JobStatus', 'Id': 2060, 'Name': 'Completed'}, 'JobType': {'@odata.type': '#JobService.JobType', 'Id': 8, 'Name': 'Inventory_Task', 'Internal': False}, 'JobStatus': {'@odata.type': '#JobService.JobStatus', 'Id': 2020, 'Name': 'Scheduled'}, 'ExecutionHistories@odata.navigationLink': '/api/JobService/Jobs(14874)/ExecutionHistories', 'LastExecutionDetail': {'@odata.id': '/api/JobService/Jobs(14874)/LastExecutionDetail'}}) + Job details of the devices operation. + + +error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1002', 'RelatedProperties': [], 'Message': 'Unable to complete the operation because the requested URI is invalid.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Enter a valid URI and retry the operation.'}]}}) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst new file mode 100644 index 00000000..7a7d231f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst @@ -0,0 +1,284 @@ +.. _ome_diagnostics_module: + + +ome_diagnostics -- Export technical support logs(TSR) to network share location +=============================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to export SupportAssist collection logs from OpenManage Enterprise and OpenManage Enterprise Modular and application logs from OpenManage Enterprise Modular to a CIFS or NFS share. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_ids (optional, list, None) + List of target device IDs. + + This is applicable for ``support_assist_collection`` and ``supportassist_collection`` logs. + + This option is mutually exclusive with *device_service_tags* and *device_group_name*. + + + device_service_tags (optional, list, None) + List of target identifier. + + This is applicable for ``support_assist_collection`` and ``supportassist_collection`` logs. + + This option is mutually exclusive with *device_ids* and *device_group_name*. + + + device_group_name (optional, str, None) + Name of the device group to export ``support_assist_collection`` or ``supportassist_collection`` logs of all devices within the group. + + This is applicable for ``support_assist_collection`` and ``supportassist_collection`` logs. + + This option is not applicable for OpenManage Enterprise Modular. + + This option is mutually exclusive with *device_ids* and *device_service_tags*. + + + log_type (optional, str, support_assist_collection) + ``application`` is applicable for OpenManage Enterprise Modular to export the application log bundle. + + ``support_assist_collection`` and ``supportassist_collection`` is applicable for one or more devices to export SupportAssist logs. + + ``support_assist_collection`` and ``supportassist_collection`` supports both OpenManage Enterprise and OpenManage Enterprise Modular. + + ``support_assist_collection`` and ``supportassist_collection`` does not support export of ``OS_LOGS`` from OpenManage Enterprise. If tried to export, the tasks will complete with errors, and the module fails. + + + mask_sensitive_info (optional, bool, False) + Select this option to mask the personal identification information such as IPAddress, DNS, alert destination, email, gateway, inet6, MacAddress, netmask etc. + + This option is applicable for ``application`` of *log_type*. + + + log_selectors (optional, list, None) + By default, the SupportAssist logs contain only hardware logs. To collect additional logs such as OS logs, RAID logs or Debug logs, specify the log types to be collected in the choices list. + + If the log types are not specified, only the hardware logs are exported. + + ``OS_LOGS`` to collect OS Logs. + + ``RAID_LOGS`` to collect RAID controller logs. + + ``DEBUG_LOGS`` to collect Debug logs. + + This option is applicable only for ``support_assist_collection`` and ``supportassist_collection`` of *log_type*. + + + share_address (True, str, None) + Network share IP address. + + + share_name (True, str, None) + Network share path. + + Filename is auto generated and should not be provided as part of *share_name*. + + + share_type (True, str, None) + Network share type + + + share_user (optional, str, None) + Network share username. + + This option is applicable for ``CIFS`` of *share_type*. + + + share_password (optional, str, None) + Network share password + + This option is applicable for ``CIFS`` of *share_type*. + + + share_domain (optional, str, None) + Network share domain name. + + This option is applicable for ``CIFS`` if *share_type*. + + + job_wait (optional, bool, True) + Whether to wait for the Job completion or not. + + The maximum wait time is *job_wait_timeout*. + + + job_wait_timeout (optional, int, 60) + The maximum wait time of *job_wait* in minutes. + + This option is applicable *job_wait* is true. + + + test_connection (optional, bool, False) + Test the availability of the network share location. + + *job_wait* and *job_wait_timeout* options are not applicable for *test_connection*. + + + lead_chassis_only (optional, bool, False) + Extract the logs from Lead chassis only. + + *lead_chassis_only* is only applicable when *log_type* is ``application`` on OpenManage Enterprise Modular. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to OpenManage Enterprise. + - This module performs the test connection and device validations. It does not create a job for copying the logs in check mode and always reports as changes found. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Export application log using CIFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_type: CIFS + share_address: "192.168.0.2" + share_user: share_username + share_password: share_password + share_name: cifs_share + log_type: application + mask_sensitive_info: false + test_connection: true + + - name: Export application log using NFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_address: "192.168.0.3" + share_type: NFS + share_name: nfs_share + log_type: application + mask_sensitive_info: true + test_connection: true + + - name: Export SupportAssist log using CIFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_address: "192.168.0.3" + share_user: share_username + share_password: share_password + share_name: cifs_share + share_type: CIFS + log_type: support_assist_collection + device_ids: [10011, 10022] + log_selectors: [OS_LOGS] + test_connection: true + + - name: Export SupportAssist log using NFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_address: "192.168.0.3" + share_type: NFS + share_name: nfs_share + log_type: support_assist_collection + device_group_name: group_name + test_connection: true + + + +Return Values +------------- + +msg (always, str, Export log job completed successfully.) + Overall status of the export log. + + +jog_status (success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'root'), ('Editable', True), ('EndTime', 'None'), ('Id', 12778), ('JobDescription', 'Export device log'), ('JobName', 'Export Log'), ('JobStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 18), ('Internal', False), ('Name', 'DebugLogs_Task')])), ('LastRun', '2021-07-06 10:52:50.519'), ('LastRunStatus', AnsibleMapping([('Id', 2060), ('Name', 'Completed')])), ('NextRun', 'None'), ('Schedule', 'startnow'), ('StartTime', 'None'), ('State', 'Enabled'), ('UpdatedBy', 'None'), ('UserGenerated', True), ('Visible', True), ('Params', [AnsibleMapping([('JobId', 12778), ('Key', 'maskSensitiveInfo'), ('Value', 'FALSE')]), AnsibleMapping([('JobId', 12778), ('Key', 'password'), ('Value', 'tY86w7q92u0QzvykuF0gQQ')]), AnsibleMapping([('JobId', 12778), ('Key', 'userName'), ('Value', 'administrator')]), AnsibleMapping([('JobId', 12778), ('Key', 'shareName'), ('Value', 'iso')]), AnsibleMapping([('JobId', 12778), ('Key', 'OPERATION_NAME'), ('Value', 'EXTRACT_LOGS')]), AnsibleMapping([('JobId', 12778), ('Key', 'shareType'), ('Value', 'CIFS')]), AnsibleMapping([('JobId', 12778), ('Key', 'shareAddress'), ('Value', '100.96.32.142')])]), ('Targets', [AnsibleMapping([('Data', ''), ('Id', 10053), ('JobId', 12778), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])])])) + Details of the export log operation status. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Sachin Apagundi(@sachin-apa) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst new file mode 100644 index 00000000..79f68dd8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst @@ -0,0 +1,665 @@ +.. _ome_discovery_module: + + +ome_discovery -- Create, modify, or delete a discovery job on OpenManage Enterprise +=================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, or delete a discovery job. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` creates a discovery job or modifies an existing discovery job. + + *discovery_job_name* is mandatory for the creation of a new discovery job. + + If multiple discoveries of the same *discovery_job_name* exist, then the new discovery job will not be created. + + ``absent`` deletes an existing discovery job(s) with the specified *discovery_job_name*. + + + discovery_job_name (optional, str, None) + Name of the discovery configuration job. + + It is mutually exclusive with *discovery_id*. + + + discovery_id (optional, int, None) + ID of the discovery configuration group. + + This value is DiscoveryConfigGroupId in the return values under discovery_status. + + It is mutually exclusive with *discovery_job_name*. + + + new_name (optional, str, None) + New name of the discovery configuration job. + + + schedule (optional, str, RunNow) + Provides the option to schedule the discovery job. + + If ``RunLater`` is selected, then *cron* must be specified. + + + cron (optional, str, None) + Provide a cron expression based on Quartz cron format. + + + trap_destination (optional, bool, False) + Enable OpenManage Enterprise to receive the incoming SNMP traps from the discovered devices. + + This is effective only for servers discovered by using their iDRAC interface. + + + community_string (optional, bool, False) + Enable the use of SNMP community strings to receive SNMP traps using Application Settings in OpenManage Enterprise. This option is available only for the discovered iDRAC servers and MX7000 chassis. + + + email_recipient (optional, str, None) + Enter the email address to which notifications are to be sent about the discovery job status. Configure the SMTP settings to allow sending notifications to an email address. + + + job_wait (optional, bool, True) + Provides the option to wait for job completion. + + This option is applicable when *state* is ``present``. + + + job_wait_timeout (optional, int, 10800) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + ignore_partial_failure (optional, bool, False) + Provides the option to ignore partial failures. Partial failures occur when there is a combination of both discovered and undiscovered IPs. + + If ``False``, then the partial failure is not ignored, and the module will error out. + + If ``True``, then the partial failure is ignored. + + This option is only applicable if *job_wait* is ``True``. + + + discovery_config_targets (optional, list, None) + Provide the list of discovery targets. + + Each discovery target is a set of *network_address_detail*, *device_types*, and one or more protocol credentials. + + This is mandatory when *state* is ``present``. + + ``WARNING`` Modification of this field is not supported, this field is overwritten every time. Ensure to provide all the required details for this field. + + + network_address_detail (True, list, None) + Provide the list of IP addresses, host names, or the range of IP addresses of the devices to be discovered or included. + + Sample Valid IP Range Formats + + 192.35.0.0 + + 192.36.0.0-10.36.0.255 + + 192.37.0.0/24 + + 2345:f2b1:f083:135::5500/118 + + 2345:f2b1:f083:135::a500-2607:f2b1:f083:135::a600 + + hostname.domain.tld + + hostname + + 2345:f2b1:f083:139::22a + + Sample Invalid IP Range Formats + + 192.35.0.* + + 192.36.0.0-255 + + 192.35.0.0/255.255.255.0 + + ``NOTE`` The range size for the number of IP addresses is limited to 16,385 (0x4001). + + ``NOTE`` Both IPv6 and IPv6 CIDR formats are supported. + + + device_types (True, list, None) + Provide the type of devices to be discovered. + + The accepted types are SERVER, CHASSIS, NETWORK SWITCH, and STORAGE. + + A combination or all of the above can be provided. + + Supported protocols for each device type are: + + SERVER - *wsman*, *redfish*, *snmp*, *ipmi*, *ssh*, and *vmware*. + + CHASSIS - *wsman*, and *redfish*. + + NETWORK SWITCH - *snmp*. + + STORAGE - *storage*, and *snmp*. + + + wsman (optional, dict, None) + Web Services-Management (WS-Man). + + + username (True, str, None) + Provide a username for the protocol. + + + password (True, str, None) + Provide a password for the protocol. + + + domain (optional, str, None) + Provide a domain for the protocol. + + + port (optional, int, 443) + Enter the port number that the job must use to discover the devices. + + + retries (optional, int, 3) + Enter the number of repeated attempts required to discover a device. + + + timeout (optional, int, 60) + Enter the time in seconds after which a job must stop running. + + + cn_check (optional, bool, False) + Enable the Common Name (CN) check. + + + ca_check (optional, bool, False) + Enable the Certificate Authority (CA) check. + + + certificate_data (optional, str, None) + Provide certificate data for the CA check. + + + + redfish (optional, dict, None) + REDFISH protocol. + + + username (True, str, None) + Provide a username for the protocol. + + + password (True, str, None) + Provide a password for the protocol. + + + domain (optional, str, None) + Provide a domain for the protocol. + + + port (optional, int, 443) + Enter the port number that the job must use to discover the devices. + + + retries (optional, int, 3) + Enter the number of repeated attempts required to discover a device. + + + timeout (optional, int, 60) + Enter the time in seconds after which a job must stop running. + + + cn_check (optional, bool, False) + Enable the Common Name (CN) check. + + + ca_check (optional, bool, False) + Enable the Certificate Authority (CA) check. + + + certificate_data (optional, str, None) + Provide certificate data for the CA check. + + + + snmp (optional, dict, None) + Simple Network Management Protocol (SNMP). + + + community (True, str, None) + Community string for the SNMP protocol. + + + port (optional, int, 161) + Enter the port number that the job must use to discover the devices. + + + retries (optional, int, 3) + Enter the number of repeated attempts required to discover a device. + + + timeout (optional, int, 3) + Enter the time in seconds after which a job must stop running. + + + + storage (optional, dict, None) + HTTPS Storage protocol. + + + username (True, str, None) + Provide a username for the protocol. + + + password (True, str, None) + Provide a password for the protocol. + + + domain (optional, str, None) + Provide a domain for the protocol. + + + port (optional, int, 443) + Enter the port number that the job must use to discover the devices. + + + retries (optional, int, 3) + Enter the number of repeated attempts required to discover a device. + + + timeout (optional, int, 60) + Enter the time in seconds after which a job must stop running. + + + cn_check (optional, bool, False) + Enable the Common Name (CN) check. + + + ca_check (optional, bool, False) + Enable the Certificate Authority (CA) check. + + + certificate_data (optional, str, None) + Provide certificate data for the CA check. + + + + vmware (optional, dict, None) + VMWARE protocol. + + + username (True, str, None) + Provide a username for the protocol. + + + password (True, str, None) + Provide a password for the protocol. + + + domain (optional, str, None) + Provide a domain for the protocol. + + + port (optional, int, 443) + Enter the port number that the job must use to discover the devices. + + + retries (optional, int, 3) + Enter the number of repeated attempts required to discover a device. + + + timeout (optional, int, 60) + Enter the time in seconds after which a job must stop running. + + + cn_check (optional, bool, False) + Enable the Common Name (CN) check. + + + ca_check (optional, bool, False) + Enable the Certificate Authority (CA) check. + + + certificate_data (optional, str, None) + Provide certificate data for the CA check. + + + + ssh (optional, dict, None) + Secure Shell (SSH). + + + username (True, str, None) + Provide a username for the protocol. + + + password (True, str, None) + Provide a password for the protocol. + + + port (optional, int, 22) + Enter the port number that the job must use to discover the devices. + + + retries (optional, int, 3) + Enter the number of repeated attempts required to discover a device. + + + timeout (optional, int, 60) + Enter the time in seconds after which a job must stop running. + + + check_known_hosts (optional, bool, False) + Verify the known host key. + + + is_sudo_user (optional, bool, False) + Use the SUDO option. + + + + ipmi (optional, dict, None) + Intelligent Platform Management Interface (IPMI) + + + username (True, str, None) + Provide a username for the protocol. + + + password (True, str, None) + Provide a password for the protocol. + + + retries (optional, int, 3) + Enter the number of repeated attempts required to discover a device. + + + timeout (optional, int, 60) + Enter the time in seconds after which a job must stop running. + + + kgkey (optional, str, None) + KgKey for the IPMI protocol. + + + + + hostname (True, str, None) + OpenManage Enterprise IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise username. + + + password (True, str, None) + OpenManage Enterprise password. + + + port (optional, int, 443) + OpenManage Enterprise HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise. + - This module does not support ``check_mode``. + - If *state* is ``present``, then Idempotency is not supported. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Discover servers in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_server_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - SERVER + wsman: + username: user + password: password + + - name: Discover chassis in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_chassis_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - CHASSIS + wsman: + username: user + password: password + + - name: Discover switches in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discover_switch_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - NETWORK SWITCH + snmp: + community: snmp_creds + + - name: Discover storage in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discover_storage_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - STORAGE + storage: + username: user + password: password + snmp: + community: snmp_creds + + - name: Delete a discovery job + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + discovery_job_name: "Discovery-123" + + - name: Schedule the discovery of multiple devices ignoring partial failure and enable trap to receive alerts + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + discovery_job_name: "Discovery-123" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + - 192.96.0.0/24 + - 192.96.26.108 + device_types: + - SERVER + - CHASSIS + - STORAGE + - NETWORK SWITCH + wsman: + username: wsman_user + password: wsman_pwd + redfish: + username: redfish_user + password: redfish_pwd + snmp: + community: snmp_community + - network_address_detail: + - 192.96.25.1-192.96.25.255 + - ipmihost + - esxiserver + - sshserver + device_types: + - SERVER + ssh: + username: ssh_user + password: ssh_pwd + vmware: + username: vm_user + password: vmware_pwd + ipmi: + username: ipmi_user + password: ipmi_pwd + schedule: RunLater + cron: "0 0 9 ? * MON,WED,FRI *" + ignore_partial_failure: True + trap_destination: True + community_string: True + email_recipient: test_email@company.com + + - name: Discover servers with ca check enabled + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_server_ca1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.108 + device_types: + - SERVER + wsman: + username: user + password: password + ca_check: True + certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}" + + - name: Discover chassis with ca check enabled data + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_chassis_ca1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.108 + device_types: + - CHASSIS + redfish: + username: user + password: password + ca_check: True + certificate_data: "-----BEGIN CERTIFICATE-----\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + aqwertyuiopasdfghjklzxcvbnmasdasagasvv=\r\n + -----END CERTIFICATE-----" + + + +Return Values +------------- + +msg (always, str, Successfully deleted 1 discovery job(s).) + Overall status of the discovery operation. + + +discovery_status (when I(state) is C(present), dict, AnsibleMapping([('Completed', ['192.168.24.17', '192.168.24.20', '192.168.24.22']), ('Failed', ['192.168.24.15', '192.168.24.16', '192.168.24.18', '192.168.24.19', '192.168.24.21', 'host123']), ('DiscoveredDevicesByType', [AnsibleMapping([('Count', 3), ('DeviceType', 'SERVER')])]), ('DiscoveryConfigDiscoveredDeviceCount', 3), ('DiscoveryConfigEmailRecipient', 'myemail@dell.com'), ('DiscoveryConfigExpectedDeviceCount', 9), ('DiscoveryConfigGroupId', 125), ('JobDescription', 'D1'), ('JobEnabled', True), ('JobEndTime', '2021-01-01 06:27:29.99'), ('JobId', 12666), ('JobName', 'D1'), ('JobNextRun', None), ('JobProgress', '100'), ('JobSchedule', 'startnow'), ('JobStartTime', '2021-01-01 06:24:10.071'), ('JobStatusId', 2090), ('LastUpdateTime', '2021-01-01 06:27:30.001'), ('UpdatedBy', 'admin')])) + Details of the discovery job created or modified. + + If *job_wait* is true, Completed and Failed IPs are also listed. + + +discovery_ids (when discoveries with duplicate name exist for I(state) is C(present), list, [1234, 5678]) + IDs of the discoveries with duplicate names. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V (@jagadeeshnv) +- Sajna Shetty (@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst new file mode 100644 index 00000000..29a8b20c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst @@ -0,0 +1,191 @@ +.. _ome_domain_user_groups_module: + + +ome_domain_user_groups -- Create, modify, or delete an Active Directory user group on OpenManage Enterprise and OpenManage Enterprise Modular +============================================================================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, or delete an Active Directory user group on OpenManage Enterprise and OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` imports or modifies the Active Directory user group. + + ``absent`` deletes an existing Active Directory user group. + + + group_name (True, str, None) + The desired Active Directory user group name to be imported or removed. + + Examples for user group name: Administrator or Account Operators or Access Control Assistance Operator. + + *group_name* value is case insensitive. + + + role (optional, str, None) + The desired roles and privilege for the imported Active Directory user group. + + OpenManage Enterprise Modular Roles: CHASSIS ADMINISTRATOR, COMPUTE MANAGER, STORAGE MANAGER, FABRIC MANAGER, VIEWER. + + OpenManage Enterprise Roles: ADMINISTRATOR, DEVICE MANAGER, VIEWER. + + *role* value is case insensitive. + + + directory_name (optional, str, None) + The directory name set while adding the Active Directory. + + *directory_name* is mutually exclusive with *directory_id*. + + + directory_id (optional, int, None) + The ID of the Active Directory. + + *directory_id* is mutually exclusive with *directory_name*. + + + domain_username (optional, str, None) + Active directory domain username. + + Example: username@domain or domain\username. + + + domain_password (optional, str, None) + Active directory domain password. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module supports ``check_mode`` and idempotency. + - Run this module from a system that has direct access to OpenManage Enterprise or OpenManage Enterprise Modular. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create Active Directory user group + dellemc.openmanage.ome_domain_user_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + group_name: account operators + directory_name: directory_name + role: administrator + domain_username: username@domain + domain_password: domain_password + + - name: Update Active Directory user group + dellemc.openmanage.ome_domain_user_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + group_name: account operators + role: viewer + + - name: Delete active directory user group + dellemc.openmanage.ome_domain_user_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + group_name: administrators + + + +Return Values +------------- + +msg (always, str, Successfully imported the active directory user group.) + Overall status of the Active Directory user group operation. + + +domain_user_status (When I(state) is C(present)., dict, AnsibleMapping([('Description', None), ('DirectoryServiceId', 16097), ('Enabled', True), ('Id', '16617'), ('IsBuiltin', False), ('IsVisible', True), ('Locked', False), ('Name', 'Account Operators'), ('ObjectGuid', 'a491859c-031e-42a3-ae5e-0ab148ecf1d6'), ('ObjectSid', None), ('Oem', None), ('Password', None), ('PlainTextPassword', None), ('RoleId', '16'), ('UserName', 'Account Operators'), ('UserTypeId', 2)])) + Details of the domain user operation, when *state* is ``present``. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst new file mode 100644 index 00000000..4dcc4ae3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst @@ -0,0 +1,322 @@ +.. _ome_firmware_module: + + +ome_firmware -- Update firmware on PowerEdge devices and its components through OpenManage Enterprise +===================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module updates the firmware of PowerEdge devices and all its components through OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_service_tag (optional, list, None) + List of service tags of the targeted devices. + + Either *device_id* or *device_service_tag* can be used individually or together. + + This option is mutually exclusive with *device_group_names* and *devices*. + + + device_id (optional, list, None) + List of ids of the targeted device. + + Either *device_id* or *device_service_tag* can be used individually or together. + + This option is mutually exclusive with *device_group_names* and *devices*. + + + device_group_names (optional, list, None) + Enter the name of the device group that contains the devices on which firmware needs to be updated. + + This option is mutually exclusive with *device_id* and *device_service_tag*. + + + dup_file (optional, path, None) + The path of the Dell Update Package (DUP) file that contains the firmware or drivers required to update the target system device or individual device components. + + This is mutually exclusive with *baseline_name*, *components*, and *devices*. + + + baseline_name (optional, str, None) + Enter the baseline name to update the firmware of all devices or list of devices that are not complaint. + + This option is mutually exclusive with *dup_file* and *device_group_names*. + + + components (optional, list, None) + List of components to be updated. + + If not provided, all components applicable are considered. + + This option is case sensitive. + + This is applicable to *device_service_tag*, *device_id*, and *baseline_name*. + + + devices (optional, list, None) + This option allows to select components on each device for firmware update. + + This option is mutually exclusive with *dup_file*, *device_group_names*, *device_id*, and *device_service_tag*. + + + id (optional, int, None) + The id of the target device to be updated. + + This option is mutually exclusive with *service_tag*. + + + service_tag (optional, str, None) + The service tag of the target device to be updated. + + This option is mutually exclusive with *id*. + + + components (optional, list, None) + The target components to be updated. If not specified, all applicable device components are considered. + + + + schedule (optional, str, RebootNow) + Select the schedule for the firmware update. + + if ``StageForNextReboot`` is chosen, the firmware will be staged and updated during the next reboot of the target device. + + if ``RebootNow`` will apply the firmware updates immediately. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update firmware from DUP file using device ids + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 11111 + - 22222 + dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE" + + - name: Update firmware from a DUP file using a device service tags + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - KLBR111 + - KLBR222 + dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE" + + - name: Update firmware from a DUP file using a device group names + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_group_names: + - servers + dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE" + + - name: Update firmware using baseline name + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + + - name: Stage firmware for the next reboot using baseline name + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + schedule: StageForNextReboot + + - name: "Update firmware using baseline name and components." + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + components: + - BIOS + + - name: Update firmware of device components from a DUP file using a device ids in a baseline + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + device_id: + - 11111 + - 22222 + components: + - iDRAC with Lifecycle Controller + + - name: Update firmware of device components from a baseline using a device service tags under a baseline + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + device_service_tag: + - KLBR111 + - KLBR222 + components: + - IOM-SAS + + - name: Update firmware using baseline name with a device id and required components + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - id: 12345 + components: + - Lifecycle Controller + - id: 12346 + components: + - Enterprise UEFI Diagnostics + - BIOS + + - name: "Update firmware using baseline name with a device service tag and required components." + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - service_tag: ABCDE12 + components: + - PERC H740P Adapter + - BIOS + - service_tag: GHIJK34 + components: + - OS Drivers Pack + + - name: "Update firmware using baseline name with a device service tag or device id and required components." + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - service_tag: ABCDE12 + components: + - BOSS-S1 Adapter + - PowerEdge Server BIOS + - id: 12345 + components: + - iDRAC with Lifecycle Controller + + + +Return Values +------------- + +msg (always, str, Successfully submitted the firmware update job.) + Overall firmware update status. + + +update_status (success, dict, AnsibleMapping([('LastRun', 'None'), ('CreatedBy', 'user'), ('Schedule', 'startnow'), ('LastRunStatus', AnsibleMapping([('Id', 1111), ('Name', 'NotRun')])), ('Builtin', False), ('Editable', True), ('NextRun', 'None'), ('JobStatus', AnsibleMapping([('Id', 1111), ('Name', 'New')])), ('JobName', 'Firmware Update Task'), ('Visible', True), ('State', 'Enabled'), ('JobDescription', 'dup test'), ('Params', [AnsibleMapping([('Value', 'true'), ('Key', 'signVerify'), ('JobId', 11111)]), AnsibleMapping([('Value', 'false'), ('Key', 'stagingValue'), ('JobId', 11112)]), AnsibleMapping([('Value', 'false'), ('Key', 'complianceUpdate'), ('JobId', 11113)]), AnsibleMapping([('Value', 'INSTALL_FIRMWARE'), ('Key', 'operationName'), ('JobId', 11114)])]), ('Targets', [AnsibleMapping([('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')])), ('Data', 'DCIM:INSTALLED#701__NIC.Mezzanine.1A-1-1=1234567654321'), ('Id', 11115), ('JobId', 11116)])]), ('StartTime', 'None'), ('UpdatedBy', 'None'), ('EndTime', 'None'), ('Id', 11117), ('JobType', AnsibleMapping([('Internal', False), ('Id', 5), ('Name', 'Update_Task')]))])) + The firmware update job and progress details from the OME. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) +- Jagadeesh N V (@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst new file mode 100644 index 00000000..673804ea --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst @@ -0,0 +1,260 @@ +.. _ome_firmware_baseline_module: + + +ome_firmware_baseline -- Create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular +================================================================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` creates or modifies a baseline. + + ``absent`` deletes an existing baseline. + + + baseline_name (optional, str, None) + Name of the the baseline. + + This option is mutually exclusive with *baseline_id*. + + + baseline_id (optional, int, None) + ID of the existing baseline. + + This option is mutually exclusive with *baseline_name*. + + + new_baseline_name (optional, str, None) + New name of the baseline. + + + baseline_description (optional, str, None) + Description for the baseline being created. + + + catalog_name (optional, str, None) + Name of the catalog to be associated with the baseline. + + + downgrade_enabled (optional, bool, None) + Indicates whether firmware downgrade is allowed for the devices in the baseline. + + This value will be set to ``True`` by default, if not provided during baseline creation. + + + is_64_bit (optional, bool, None) + Indicates if the repository contains 64-bit DUPs. + + This value will be set to ``True`` by default, if not provided during baseline creation. + + + device_ids (optional, list, None) + List of device IDs. + + This option is mutually exclusive with *device_service_tags* and *device_group_names*. + + + device_service_tags (optional, list, None) + List of device service tags. + + This option is mutually exclusive with *device_ids* and *device_group_names*. + + + device_group_names (optional, list, None) + List of group names. + + This option is mutually exclusive with *device_ids* and *device_service_tags*. + + + job_wait (optional, bool, True) + Provides the option to wait for job completion. + + This option is applicable when *state* is ``present``. + + + job_wait_timeout (optional, int, 600) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular. + - *device_group_names* option is not applicable for OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create baseline for device IDs + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_ids: + - 1010 + - 2020 + + - name: Create baseline for servicetags + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + + - name: Create baseline for device groups without job tracking + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_group_names: + - "Group1" + - "Group2" + job_wait: no + + - name: Modify an existing baseline + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "existing_baseline_name" + new_baseline_name: "new_baseline_name" + baseline_description: "new baseline_description" + catalog_name: "catalog_other" + device_group_names: + - "Group3" + - "Group4" + - "Group5" + downgrade_enabled: no + is_64_bit: yes + + - name: Delete a baseline + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + baseline_name: "baseline_name" + + + +Return Values +------------- + +msg (always, str, Successfully created the firmware baseline.) + Overall status of the firmware baseline operation. + + +baseline_status (success, dict, AnsibleMapping([('CatalogId', 123), ('Description', 'BASELINE DESCRIPTION'), ('DeviceComplianceReports', []), ('DowngradeEnabled', True), ('Id', 23), ('Is64Bit', True), ('Name', 'my_baseline'), ('RepositoryId', 123), ('RepositoryName', 'catalog123'), ('RepositoryType', 'HTTP'), ('Targets', [AnsibleMapping([('Id', 10083), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))]), AnsibleMapping([('Id', 10076), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('TaskId', 11235), ('TaskStatusId', 2060)])) + Details of the baseline status. + + +job_id (When baseline job is in running state, int, 10123) + Job ID of the baseline task. + + +baseline_id (When I(state) is C(absent), int, 10123) + ID of the deleted baseline. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to retrieve baseline list either because the device ID(s) entered are invalid'), ('Resolution', 'Make sure the entered device ID(s) are valid and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst new file mode 100644 index 00000000..80b4c507 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst @@ -0,0 +1,189 @@ +.. _ome_firmware_baseline_compliance_info_module: + + +ome_firmware_baseline_compliance_info -- Retrieves baseline compliance details on OpenManage Enterprise +======================================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to retrieve firmware compliance for a list of devices, or against a specified baseline on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + baseline_name (optional, str, None) + Name of the baseline, for which the device compliance report is generated. + + This option is mandatory for generating baseline based device compliance report. + + *baseline_name* is mutually exclusive with *device_ids*, *device_service_tags* and *device_group_names*. + + + device_ids (optional, list, None) + A list of unique identifier for device based compliance report. + + Either *device_ids*, *device_service_tags* or *device_group_names* is required to generate device based compliance report. + + *device_ids* is mutually exclusive with *device_service_tags*, *device_group_names* and *baseline_name*. + + Devices without reports are ignored. + + + device_service_tags (optional, list, None) + A list of service tags for device based compliance report. + + Either *device_ids*, *device_service_tags* or *device_group_names* is required to generate device based compliance report. + + *device_service_tags* is mutually exclusive with *device_ids*, *device_group_names* and *baseline_name*. + + Devices without reports are ignored. + + + device_group_names (optional, list, None) + A list of group names for device based compliance report. + + Either *device_ids*, *device_service_tags* or *device_group_names* is required to generate device based compliance report. + + *device_group_names* is mutually exclusive with *device_ids*, *device_service_tags* and *baseline_name*. + + Devices without reports are ignored. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieves device based compliance report for specified device IDs + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_ids: + - 11111 + - 22222 + + - name: Retrieves device based compliance report for specified service Tags + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tags: + - MXL1234 + - MXL4567 + + - name: Retrieves device based compliance report for specified group names + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_group_names: + - "group1" + - "group2" + + - name: Retrieves device compliance report for a specified baseline + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + + + +Return Values +------------- + +msg (on error, str, Failed to fetch the compliance baseline information.) + Overall baseline compliance report status. + + +baseline_compliance_info (success, dict, [AnsibleMapping([('CatalogId', 53), ('ComplianceSummary', AnsibleMapping([('ComplianceStatus', 'CRITICAL'), ('NumberOfCritical', 2), ('NumberOfDowngrade', 0), ('NumberOfNormal', 0), ('NumberOfWarning', 0)])), ('Description', ''), ('DeviceComplianceReports', [AnsibleMapping([('ComplianceStatus', 'CRITICAL'), ('ComponentComplianceReports', [AnsibleMapping([('ComplianceDependencies', []), ('ComplianceStatus', 'DOWNGRADE'), ('Criticality', 'Ok'), ('CurrentVersion', 'OSC_1.1'), ('Id', 1258), ('ImpactAssessment', ''), ('Name', 'OS COLLECTOR 2.1'), ('Path', 'FOLDER04118304M/2/Diagnostics_Application_JCCH7_WN64_4.0_A00_01.EXE'), ('PrerequisiteInfo', ''), ('RebootRequired', False), ('SourceName', 'DCIM:INSTALLED#802__OSCollector.Embedded.1'), ('TargetIdentifier', '101734'), ('UniqueIdentifier', 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'), ('UpdateAction', 'DOWNGRADE'), ('Uri', 'http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX'), ('Version', '4.0')]), AnsibleMapping([('ComplianceDependencies', []), ('ComplianceStatus', 'CRITICAL'), ('Criticality', 'Recommended'), ('CurrentVersion', 'DN02'), ('Id', 1259), ('ImpactAssessment', ''), ('Name', 'TOSHIBA AL14SE 1.8 TB 2.5 12Gb 10K 512n SAS HDD Drive'), ('Path', 'FOLDER04086111M/1/SAS-Drive_Firmware_VDGFM_WN64_DN03_A00.EXE'), ('PrerequisiteInfo', ''), ('RebootRequired', True), ('SourceName', 'DCIM:INSTALLED#304_C_Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'), ('TargetIdentifier', '103730'), ('UniqueIdentifier', 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'), ('UpdateAction', 'UPGRADE'), ('Uri', 'http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX'), ('Version', 'DN03')])]), ('DeviceId', 11603), ('DeviceModel', 'PowerEdge R630'), ('DeviceName', None), ('DeviceTypeId', 1000), ('DeviceTypeName', 'CPGCGS'), ('FirmwareStatus', 'Non-Compliant'), ('Id', 194), ('RebootRequired', True), ('ServiceTag', 'MXL1234')])]), ('DowngradeEnabled', True), ('Id', 53), ('Is64Bit', False), ('LastRun', '2019-09-27 05:08:16.301'), ('Name', 'baseline1'), ('RepositoryId', 43), ('RepositoryName', 'catalog2'), ('RepositoryType', 'CIFS'), ('Targets', [AnsibleMapping([('Id', 11603), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('TaskId', 11710), ('TaskStatusId', 0)])]) + Details of the baseline compliance report. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to retrieve baseline list either because the device ID(s) entered are invalid'), ('Resolution', 'Make sure the entered device ID(s) are valid and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst new file mode 100644 index 00000000..8b03396f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst @@ -0,0 +1,128 @@ +.. _ome_firmware_baseline_info_module: + + +ome_firmware_baseline_info -- Retrieves baseline details from OpenManage Enterprise +=================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module retrieves the list and details of all the baselines on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + baseline_name (optional, str, None) + Name of the baseline.If *baseline_name* is not provided, all the available firmware baselines are returned. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieve details of all the available firmware baselines + dellemc.openmanage.ome_firmware_baseline_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve details of a specific firmware baseline identified by its baseline name + dellemc.openmanage.ome_firmware_baseline_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + + + +Return Values +------------- + +msg (on error, str, Successfully fetched firmware baseline information.) + Overall baseline information. + + +baseline_info (success, dict, AnsibleMapping([('@odata.id', '/api/UpdateService/Baselines(239)'), ('@odata.type', '#UpdateService.Baselines'), ('CatalogId', 22), ('ComplianceSummary', AnsibleMapping([('ComplianceStatus', 'CRITICAL'), ('NumberOfCritical', 1), ('NumberOfDowngrade', 0), ('NumberOfNormal', 0), ('NumberOfWarning', 0)])), ('Description', 'baseline_description'), ('DeviceComplianceReports@odata.navigationLink', '/api/UpdateService/Baselines(239)/DeviceComplianceReports'), ('DowngradeEnabled', True), ('Id', 239), ('Is64Bit', True), ('LastRun', '2020-05-22 16:42:40.307'), ('Name', 'baseline_name'), ('RepositoryId', 12), ('RepositoryName', 'HTTP DELL'), ('RepositoryType', 'DELL_ONLINE'), ('Targets', [AnsibleMapping([('Id', 10342), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('TaskId', 41415), ('TaskStatusId', 2060)])) + Details of the baselines. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst new file mode 100644 index 00000000..99983a76 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst @@ -0,0 +1,331 @@ +.. _ome_firmware_catalog_module: + + +ome_firmware_catalog -- Create, modify, or delete a firmware catalog on OpenManage Enterprise or OpenManage Enterprise Modular +============================================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, or delete a firmware catalog on OpenManage Enterprise or OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` creates or modifies a catalog. + + ``absent`` deletes an existing catalog. + + + catalog_name (optional, list, None) + Name of the firmware catalog to be created. + + This option is mutually exclusive with *catalog_id*. + + Provide the list of firmware catalog names that are supported when *state* is ``absent``. + + + new_catalog_name (optional, str, None) + New name of the firmware catalog. + + + catalog_id (optional, list, None) + ID of the catalog. + + This option is mutually exclusive with *catalog_name*. + + Provide the list of firmware catalog IDs that are supported when *state* is ``absent``. + + + catalog_description (optional, str, None) + Description for the catalog. + + + source (optional, str, None) + The IP address of the system where the firmware catalog is stored on the local network. + + By default, this option is set to downloads.dell.com when *repository_type* is ``DELL_ONLINE``. + + + source_path (optional, str, None) + Specify the complete path of the catalog file location without the file name. + + This is option ignored when *repository_type* is ``DELL_ONLINE``. + + + file_name (optional, str, None) + Catalog file name associated with the *source_path*. + + This option is ignored when *repository_type* is ``DELL_ONLINE``. + + + repository_type (optional, str, None) + Type of repository. The supported types are NFS, CIFS, HTTP, HTTPS,and DELL_ONLINE. + + + repository_username (optional, str, None) + User name of the repository where the catalog is stored. + + This option is mandatory when *repository_type* is CIFS. + + This option is ignored when *repository_type* is ``DELL_ONLINE``. + + + repository_password (optional, str, None) + Password to access the repository. + + This option is mandatory when *repository_type* is CIFS. + + This option is ignored when *repository_type* is ``DELL_ONLINE``. + + ``NOTE`` The module always reports the changed status, when this is provided. + + + repository_domain (optional, str, None) + Domain name of the repository. + + This option is ignored when *repository_type* is ``DELL_ONLINE``. + + + check_certificate (optional, bool, False) + The certificate warnings are ignored when *repository_type* is HTTPS. If ``True``. If not, certificate warnings are not ignored. + + + job_wait (optional, bool, True) + Provides the option to wait for job completion. + + This option is applicable when *state* is ``present``. + + + job_wait_timeout (optional, int, 600) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - If *repository_password* is provided, then the module always reports the changed status. + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create a catalog from HTTPS repository + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "HTTPS" + source: "downloads.dell.com" + source_path: "catalog" + file_name: "catalog.gz" + check_certificate: True + + - name: Create a catalog from HTTP repository + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "HTTP" + source: "downloads.dell.com" + source_path: "catalog" + file_name: "catalog.gz" + + - name: Create a catalog using CIFS share + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "CIFS" + source: "192.167.0.1" + source_path: "cifs/R940" + file_name: "catalog.gz" + repository_username: "repository_username" + repository_password: "repository_password" + repository_domain: "repository_domain" + + - name: Create a catalog using NFS share + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "NFS" + source: "192.166.0.2" + source_path: "/nfs/R940" + file_name: "catalog.xml" + + - name: Create a catalog using repository from Dell.com + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "DELL_ONLINE" + check_certificate: True + + - name: Modify a catalog using a repository from CIFS share + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "new catalog_description" + repository_type: "CIFS" + source: "192.167.0.2" + source_path: "cifs/R941" + file_name: "catalog1.gz" + repository_username: "repository_username" + repository_password: "repository_password" + repository_domain: "repository_domain" + + - name: Modify a catalog using a repository from Dell.com + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_id: 10 + new_catalog_name: "new_catalog_name" + repository_type: "DELL_ONLINE" + catalog_description: "catalog_description" + + - name: Delete catalog using catalog name + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + catalog_name: ["catalog_name1", "catalog_name2"] + + - name: Delete catalog using catalog id + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + catalog_id: [11, 34] + + + +Return Values +------------- + +msg (always, str, Successfully triggered the job to create a catalog with Task ID : 10094) + Overall status of the firmware catalog operation. + + +catalog_status (When I(state) is C(present), dict, AnsibleMapping([('AssociatedBaselines', []), ('BaseLocation', None), ('BundlesCount', 0), ('Filename', 'catalog.gz'), ('Id', 0), ('LastUpdated', None), ('ManifestIdentifier', None), ('ManifestVersion', None), ('NextUpdate', None), ('PredecessorIdentifier', None), ('ReleaseDate', None), ('ReleaseIdentifier', None), ('Repository', AnsibleMapping([('CheckCertificate', True), ('Description', 'HTTPS Desc'), ('DomainName', None), ('Id', None), ('Name', 'catalog4'), ('Password', None), ('RepositoryType', 'HTTPS'), ('Source', 'company.com'), ('Username', None)])), ('Schedule', None), ('SourcePath', 'catalog'), ('Status', None), ('TaskId', 10094)])) + Details of the catalog operation. + + +job_id (When catalog job is in a running state, int, 10123) + Job ID of the catalog task. + + +catalog_id (When I(state) is C(absent), int, 10123) + IDs of the deleted catalog. + + +associated_baselines (When I(state) is C(absent), list, [AnsibleMapping([('BaselineId', 24), ('BaselineName', 'new')]), AnsibleMapping([('BaselineId', 25), ('BaselineName', 'c7')]), AnsibleMapping([('BaselineId', 27), ('BaselineName', 'c4')])]) + IDs of the baselines associated with catalog. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to create or update the catalog because a repository with the same name already exists.'), ('Resolution', 'Enter a different name and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of the http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst new file mode 100644 index 00000000..d5abeab1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst @@ -0,0 +1,220 @@ +.. _ome_groups_module: + + +ome_groups -- Manages static device groups on OpenManage Enterprise +=================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, and delete static device groups on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` allows to create or modify a device group. + + ``absent`` allows to delete a device group. + + + name (optional, list, None) + Name of the device group to be created, modified, or deleted. + + If *state* is absent, multiple names can be provided. + + This option is case insensitive. + + This option is mutually exclusive with *group_id*. + + + group_id (optional, list, None) + ID of the device group to be created, modified, or deleted. + + If *state* is absent, multiple IDs can be provided. + + This option is mutually exclusive with *name*. + + + new_name (optional, str, None) + New name for the existing device group. + + This is applicable only when *state* is ``present``. + + + description (optional, str, None) + Description for the device group. + + This is applicable only when *state* is ``present``. + + + parent_group_name (optional, str, Static Groups) + Name of the parent device group under which the device group to be created or modified. + + This is applicable only when *state* is ``present``. + + ``NOTE`` If device group with such a name does not exist, device group with *parent_group_name* is created. + + This option is case insensitive. + + This option is mutually exclusive with *parent_group_id*. + + + parent_group_id (optional, int, None) + ID of the parent device group under which the device group to be created or modified. + + This is applicable only when *state* is ``present``. + + This option is mutually exclusive with *parent_group_name*. + + + hostname (True, str, None) + OpenManage Enterprise IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise username. + + + password (True, str, None) + OpenManage Enterprise password. + + + port (optional, int, 443) + OpenManage Enterprise HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module manages only static device groups on Dell EMC OpenManage Enterprise. + - If a device group with the name *parent_group_name* does not exist, a new device group with the same name is created. + - Make sure the entered parent group is not the descendant of the provided group. + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create a new device group + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "group 1" + description: "Group 1 description" + parent_group_name: "group parent 1" + + - name: Modify a device group using the group ID + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 1234 + description: "Group description updated" + parent_group_name: "group parent 2" + + - name: Delete a device group using the device group name + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + name: "group 1" + + - name: Delete multiple device groups using the group IDs + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + group_id: + - 1234 + - 5678 + + + +Return Values +------------- + +msg (always, str, Successfully deleted the device group(s).) + Overall status of the device group operation. + + +group_status (success, dict, AnsibleMapping([('Description', 'my group description'), ('Id', 12123), ('MembershipTypeId', 12), ('Name', 'group 1'), ('ParentId', 12345), ('TypeId', 3000), ('IdOwner', 30), ('CreatedBy', 'admin'), ('CreationTime', '2021-01-01 10:10:10.100'), ('DefinitionDescription', 'UserDefined'), ('DefinitionId', 400), ('GlobalStatus', 5000), ('HasAttributes', False), ('UpdatedBy', ''), ('UpdatedTime', '2021-01-01 11:11:10.100'), ('Visible', True)])) + Details of the device group operation status. + + +group_ids (when I(state) is C(absent), list, [1234, 5678]) + List of the deleted device group IDs. + + +invalid_groups (when I(state) is C(absent), list, [1234, 5678]) + List of the invalid device group IDs or names. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGRP9013'), ('RelatedProperties', []), ('Message', 'Unable to update group 12345 with the provided parent 54321 because a group/parent relationship already exists.'), ('MessageArgs', ['12345', '54321']), ('Severity', 'Warning'), ('Resolution', 'Make sure the entered parent ID does not create a bidirectional relationship and retry the operation.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst new file mode 100644 index 00000000..733c837c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst @@ -0,0 +1,316 @@ +.. _ome_identity_pool_module: + + +ome_identity_pool -- Manages identity pool settings on OpenManage Enterprise +============================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, or delete a single identity pool on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` modifies an existing identity pool. If the provided I (pool_name) does not exist, it creates an identity pool. - ``absent`` deletes an existing identity pool. + + + pool_name (True, str, None) + This option is mandatory for *state* when creating, modifying and deleting an identity pool. + + + new_pool_name (optional, str, None) + After creating an identity pool, *pool_name* can be changed to *new_pool_name*. + + This option is ignored when creating an identity pool. + + + pool_description (optional, str, None) + Description of the identity pool. + + + ethernet_settings (optional, dict, None) + Applicable for creating and modifying an identity pool using Ethernet settings. + + *starting_mac_address* and *identity_count* are required to create an identity pool. + + + starting_mac_address (optional, str, None) + Starting MAC address of the ethernet setting. + + + identity_count (optional, int, None) + Number of MAC addresses. + + + + fcoe_settings (optional, dict, None) + Applicable for creating and modifying an identity pool using FCoE settings. + + *starting_mac_address* and *identity_count* are required to create an identity pool. + + + starting_mac_address (optional, str, None) + Starting MAC Address of the FCoE setting. + + + identity_count (optional, int, None) + Number of MAC addresses. + + + + iscsi_settings (optional, dict, None) + Applicable for creating and modifying an identity pool using ISCSI settings. + + *starting_mac_address*, *identity_count*, *iqn_prefix*, *ip_range* and *subnet_mask* are required to create an identity pool. + + + starting_mac_address (optional, str, None) + Starting MAC address of the iSCSI setting.This is required option for iSCSI setting. + + + identity_count (optional, int, None) + Number of MAC addresses. + + + initiator_config (optional, dict, None) + Applicable for creating and modifying an identity pool using iSCSI Initiator settings. + + + iqn_prefix (optional, str, None) + IQN prefix addresses. + + + + initiator_ip_pool_settings (optional, dict, None) + Applicable for creating and modifying an identity pool using ISCSI Initiator IP pool settings. + + + ip_range (optional, str, None) + Range of non-multicast IP addresses. + + + subnet_mask (optional, str, None) + Subnet mask for *ip_range*. + + + gateway (optional, str, None) + IP address of gateway. + + + primary_dns_server (optional, str, None) + IP address of the primary DNS server. + + + secondary_dns_server (optional, str, None) + IP address of the secondary DNS server. + + + + + fc_settings (optional, dict, None) + Applicable for creating and modifying an identity pool using fibre channel(FC) settings. + + This option allows OpenManage Enterprise to generate a Worldwide port name (WWPN) and Worldwide node name (WWNN) address. + + The value 0x2001 is beginning to the starting address for the generation of a WWPN, and 0x2000 for a WWNN. + + *starting_address* and *identity_count* are required to create an identity pool. + + + starting_address (optional, str, None) + Starting MAC Address of FC setting.*starting_address* is required to option to create FC settings. + + + identity_count (optional, int, None) + Number of MAC addresses.*identity_count* is required to option to create FC settings. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create an identity pool using ethernet, FCoE, iSCSI and FC settings + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + pool_name: "pool1" + pool_description: "Identity pool with Ethernet, FCoE, iSCSI and FC settings" + ethernet_settings: + starting_mac_address: "50:50:50:50:50:00" + identity_count: 60 + fcoe_settings: + starting_mac_address: "70:70:70:70:70:00" + identity_count: 75 + iscsi_settings: + starting_mac_address: "60:60:60:60:60:00" + identity_count: 30 + initiator_config: + iqn_prefix: "iqn.myprefix." + initiator_ip_pool_settings: + ip_range: "10.33.0.1-10.33.0.255" + subnet_mask: "255.255.255.0" + gateway: "192.168.4.1" + primary_dns_server : "10.8.8.8" + secondary_dns_server : "8.8.8.8" + fc_settings: + starting_address: "30:30:30:30:30:00" + identity_count: 45 + + - name: Create an identity pool using only ethernet settings + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool2" + pool_description: "create identity pool with ethernet" + ethernet_settings: + starting_mac_address: "aa-bb-cc-dd-ee-aa" + identity_count: 80 + + - name: Modify an identity pool + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool2" + new_pool_name: "pool3" + pool_description: "modifying identity pool with ethernet and fcoe settings" + ethernet_settings: + starting_mac_address: "90-90-90-90-90-90" + identity_count: 61 + fcoe_settings: + starting_mac_address: "aabb.ccdd.5050" + identity_count: 77 + + - name: Modify an identity pool using iSCSI and FC settings + dellemc.openmanage.ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool_new" + new_pool_name: "pool_new2" + pool_description: "modifying identity pool with iscsi and fc settings" + iscsi_settings: + identity_count: 99 + initiator_config: + iqn_prefix: "iqn1.myprefix2." + initiator_ip_pool_settings: + gateway: "192.168.4.5" + fc_settings: + starting_address: "10:10:10:10:10:10" + identity_count: 98 + + - name: Delete an identity pool + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + pool_name: "pool2" + + + +Return Values +------------- + +msg (always, str, Successfully created an identity pool.) + Overall status of the identity pool operation. + + +pool_status (success, dict, AnsibleMapping([('Id', 29), ('IsSuccessful', True), ('Issues', [])])) + Details of the user operation, when *state* is ``present``. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to process the request because an error occurred: Ethernet-MAC Range overlap found (in this Identity Pool or in a different one) .'), ('MessageArgs', ['Ethernet-MAC Range overlap found (in this Identity Pool or in a different one)"']), ('MessageId', 'CGEN6001'), ('RelatedProperties', []), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) +- Deepak Joshi(@Dell-Deepak-Joshi)) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst new file mode 100644 index 00000000..cd417016 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst @@ -0,0 +1,157 @@ +.. _ome_job_info_module: + + +ome_job_info -- Get job details for a given job ID or an entire job queue on OpenMange Enterprise +================================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module retrieves job details for a given job ID or an entire job queue on OpenMange Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + job_id (optional, int, None) + Unique ID of the job. + + + system_query_options (optional, dict, None) + Options for pagination of the output. + + + top (optional, int, None) + Number of records to return. Default value is 100. + + + skip (optional, int, None) + Number of records to skip. Default value is 0. + + + filter (optional, str, None) + Filter records by the values supported. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Get all jobs details + dellemc.openmanage.ome_job_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + + - name: Get job details for id + dellemc.openmanage.ome_job_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + job_id: 12345 + + - name: Get filtered job details + dellemc.openmanage.ome_job_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + top: 2 + skip: 1 + filter: "JobType/Id eq 8" + + + + +Return Values +------------- + +msg (always, str, Successfully fetched the job info) + Overall status of the job facts operation. + + +job_info (success, dict, AnsibleMapping([('value', [AnsibleMapping([('Builtin', False), ('CreatedBy', 'system'), ('Editable', True), ('EndTime', None), ('Id', 12345), ('JobDescription', 'Refresh Inventory for Device'), ('JobName', 'Refresh Inventory for Device'), ('JobStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 8), ('Internal', False), ('Name', 'Inventory_Task')])), ('LastRun', '2000-01-29 10:51:34.776'), ('LastRunStatus', AnsibleMapping([('Id', 2060), ('Name', 'Completed')])), ('NextRun', None), ('Params', []), ('Schedule', ''), ('StartTime', None), ('State', 'Enabled'), ('Targets', [AnsibleMapping([('Data', "''"), ('Id', 123123), ('JobId', 12345), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('UpdatedBy', None), ('Visible', True)])])])) + Details of the OpenManage Enterprise jobs. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst new file mode 100644 index 00000000..798f41bc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst @@ -0,0 +1,143 @@ +.. _ome_network_port_breakout_module: + + +ome_network_port_breakout -- This module allows to automate the port portioning or port breakout to logical sub ports +===================================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to automate breaking out of IOMs in fabric mode into logical sub ports. + +The port breakout operation is only supported in OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + target_port (True, str, None) + The ID of the port in the switch to breakout. Enter the port ID in the format: service tag:port. For example, 2HB7NX2:ethernet1/1/13. + + + breakout_type (True, str, None) + The preferred breakout type. For example, 4X10GE. + + To revoke the default breakout configuration, enter 'HardwareDefault'. + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Port breakout configuration + dellemc.openmanage.ome_network_port_breakout: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + target_port: "2HB7NX2:phy-port1/1/11" + breakout_type: "1X40GE" + + - name: Revoke the default breakout configuration + dellemc.openmanage.ome_network_port_breakout: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + target_port: "2HB7NX2:phy-port1/1/11" + breakout_type: "HardwareDefault" + + + +Return Values +------------- + +msg (always, str, Port breakout configuration job submitted successfully.) + Overall status of the port configuration. + + +breakout_status (success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'root'), ('Editable', True), ('EndTime', None), ('Id', 11111), ('JobDescription', ''), ('JobName', 'Breakout Port'), ('JobStatus', AnsibleMapping([('Id', 1112), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 3), ('Internal', False), ('Name', 'DeviceAction_Task')])), ('LastRun', None), ('LastRunStatus', AnsibleMapping([('Id', 1113), ('Name', 'NotRun')])), ('NextRun', None), ('Params', [AnsibleMapping([('JobId', 11111), ('Key', 'operationName'), ('Value', 'CONFIGURE_PORT_BREAK_OUT')]), AnsibleMapping([('JobId', 11111), ('Key', 'interfaceId'), ('Value', '2HB7NX2:phy-port1/1/11')]), AnsibleMapping([('JobId', 11111), ('Key', 'breakoutType'), ('Value', '1X40GE')])]), ('Schedule', 'startnow'), ('StartTime', None), ('State', 'Enabled'), ('Targets', [AnsibleMapping([('Data', ''), ('Id', 11112), ('JobId', 34206), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('UpdatedBy', None), ('UserGenerated', True), ('Visible', True)])) + Details of the OpenManage Enterprise jobs. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst new file mode 100644 index 00000000..e5dc4bdf --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst @@ -0,0 +1,206 @@ +.. _ome_network_vlan_module: + + +ome_network_vlan -- Create, modify & delete a VLAN +================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to, + +Create a VLAN on OpenManage Enterprise. + +Modify or delete an existing VLAN on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` creates a new VLAN or modifies an existing VLAN. + + ``absent`` deletes an existing VLAN. + + *WARNING* Deleting a VLAN can impact the network infrastructure. + + + name (True, str, None) + Provide the *name* of the VLAN to be created, deleted or modified. + + + new_name (optional, str, None) + Provide the *name* of the VLAN to be modified. + + + description (optional, str, None) + Short description of the VLAN to be created or modified. + + + vlan_minimum (optional, int, None) + The minimum VLAN value of the range. + + + vlan_maximum (optional, int, None) + The maximum VLAN value of the range. + + A single value VLAN is created if the vlan_maximum and vlan_minmum values are the same. + + + type (optional, str, None) + Types of supported VLAN networks. + + For the description of each network type, use API https://*hostname*/api/NetworkConfigurationService/NetworkTypes. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create a VLAN range + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan1" + description: "VLAN desc" + type: "General Purpose (Bronze)" + vlan_minimum: 35 + vlan_maximum: 40 + tags: create_vlan_range + + - name: Create a VLAN with a single value + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan2" + description: "VLAN desc" + type: "General Purpose (Bronze)" + vlan_minimum: 127 + vlan_maximum: 127 + tags: create_vlan_single + + - name: Modify a VLAN + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan1" + new_name: "vlan_gold1" + description: "new description" + type: "General Purpose (Gold)" + vlan_minimum: 45 + vlan_maximum: 50 + tags: modify_vlan + + - name: Delete a VLAN + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "vlan1" + tags: delete_vlan + + + +Return Values +------------- + +msg (always, str, Successfully created the VLAN.) + Overall status of the VLAN operation. + + +vlan_status (when I(state=present), dict, AnsibleMapping([('@odata.context', '/api/$metadata#NetworkConfigurationService.Network'), ('@odata.type', '#NetworkConfigurationService.Network'), ('@odata.id', '/api/NetworkConfigurationService/Networks(1234)'), ('Id', 1234), ('Name', 'vlan1'), ('Description', 'VLAN description'), ('VlanMaximum', 130), ('VlanMinimum', 140), ('Type', 1), ('CreatedBy', 'admin'), ('CreationTime', '2020-01-01 05:54:36.113'), ('UpdatedBy', None), ('UpdatedTime', '2020-01-01 05:54:36.113'), ('InternalRefNWUUId', '6d6effcc-eca4-44bd-be07-1234ab5cd67e')])) + Details of the VLAN that is either created or modified. + + +error_info (on HTTP error, dict, AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CTEM1043'), ('RelatedProperties', []), ('Message', 'Unable to create or update the network because the entered VLAN minimum 0 is not within a valid range ( 1 - 4000 or 4021 - 4094 ).'), ('MessageArgs', ['0', '1', '4000', '4021', '4094']), ('Severity', 'Warning'), ('Resolution', 'Enter a valid VLAN minimum as identified in the message and retry the operation.')])])])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst new file mode 100644 index 00000000..266e67d9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst @@ -0,0 +1,144 @@ +.. _ome_network_vlan_info_module: + + +ome_network_vlan_info -- Retrieves the information about networks VLAN(s) present in OpenManage Enterprise +========================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to retrieve the following. - A list of all the network VLANs with their detailed information. - Information about a specific network VLAN using VLAN *id* or VLAN *name*. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + id (optional, int, None) + A unique identifier of the network VLAN available in the device. + + *id* and *name* are mutually exclusive. + + + name (optional, str, None) + A unique name of the network VLAN available in the device. + + *name* and *id* are mutually exclusive. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieve information about all network VLANs(s) available in the device + dellemc.openmanage.ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve information about a network VLAN using the VLAN ID + dellemc.openmanage.ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + id: 12345 + + - name: Retrieve information about a network VLAN using the VLAN name + dellemc.openmanage.ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Network VLAN - 1" + + + +Return Values +------------- + +msg (success, str, AnsibleMapping([('msg', 'Successfully retrieved the network VLAN information.'), ('network_vlan_info', [AnsibleMapping([('CreatedBy', 'admin'), ('CreationTime', '2020-09-02 18:48:42.129'), ('Description', 'Description of Logical Network - 1'), ('Id', 20057), ('InternalRefNWUUId', '42b9903d-93f8-4184-adcf-0772e4492f71'), ('Name', 'Network VLAN - 1'), ('Type', AnsibleMapping([('Description', 'This is the network for general purpose traffic. QOS Priority : Bronze.'), ('Id', 1), ('Name', 'General Purpose (Bronze)'), ('NetworkTrafficType', 'Ethernet'), ('QosType', AnsibleMapping([('Id', 4), ('Name', 'Bronze')])), ('VendorCode', 'GeneralPurpose')])), ('UpdatedBy', None), ('UpdatedTime', '2020-09-02 18:48:42.129'), ('VlanMaximum', 111), ('VlanMinimum', 111)]), AnsibleMapping([('CreatedBy', 'admin'), ('CreationTime', '2020-09-02 18:49:11.507'), ('Description', 'Description of Logical Network - 2'), ('Id', 20058), ('InternalRefNWUUId', 'e46ccb3f-ef57-4617-ac76-46c56594005c'), ('Name', 'Network VLAN - 2'), ('Type', AnsibleMapping([('Description', 'This is the network for general purpose traffic. QOS Priority : Silver.'), ('Id', 2), ('Name', 'General Purpose (Silver)'), ('NetworkTrafficType', 'Ethernet'), ('QosType', AnsibleMapping([('Id', 3), ('Name', 'Silver')])), ('VendorCode', 'GeneralPurpose')])), ('UpdatedBy', None), ('UpdatedTime', '2020-09-02 18:49:11.507'), ('VlanMaximum', 112), ('VlanMinimum', 112)])])])) + Detailed information of the network VLAN(s). + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Deepak Joshi(@deepakjoshishri) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst new file mode 100644 index 00000000..461f1ed5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst @@ -0,0 +1,167 @@ +.. _ome_powerstate_module: + + +ome_powerstate -- Performs the power management operations on OpenManage Enterprise +=================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module performs the supported power management operations on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + power_state (True, str, None) + Desired end power state. + + + device_service_tag (optional, str, None) + Targeted device service tag. + + *device_service_tag* is mutually exclusive with *device_id*. + + + device_id (optional, int, None) + Targeted device id. + + *device_id* is mutually exclusive with *device_service_tag*. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Power state operation based on device id + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 11111 + power_state: "off" + + - name: Power state operation based on device service tag + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "KLBR111" + power_state: "on" + + - name: Power state operation based on list of device ids + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: "{{ item.device_id }}" + power_state: "{{ item.state }}" + with_items: + - { "device_id": 11111, "state": "on" } + - { "device_id": 22222, "state": "off" } + + - name: Power state operation based on list of device service tags + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "{{ item.service_tag }}" + power_state: "{{ item.state }}" + with_items: + - { "service_tag": "KLBR111", "state": "on" } + - { "service_tag": "KLBR222", "state": "off" } + + + +Return Values +------------- + +msg (always, str, Power State operation job submitted successfully.) + Overall power state operation job status. + + +job_status (success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'user'), ('Editable', True), ('EndTime', None), ('Id', 11111), ('JobDescription', 'DeviceAction_Task'), ('JobName', 'DeviceAction_Task_PowerState'), ('JobStatus', AnsibleMapping([('Id', 1111), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 1), ('Internal', False), ('Name', 'DeviceAction_Task')])), ('LastRun', '2019-04-01 06:39:02.69'), ('LastRunStatus', AnsibleMapping([('Id', 1112), ('Name', 'Running')])), ('NextRun', None), ('Params', [AnsibleMapping([('JobId', 11111), ('Key', 'powerState'), ('Value', '2')]), AnsibleMapping([('JobId', 11111), ('Key', 'operationName'), ('Value', 'POWER_CONTROL')])]), ('Schedule', ''), ('StartTime', None), ('State', 'Enabled'), ('Targets', [AnsibleMapping([('Data', ''), ('Id', 11112), ('JobId', 11111), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('UpdatedBy', None), ('Visible', True)])) + Power state operation job and progress details from the OME. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst new file mode 100644 index 00000000..e0b5f0ee --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst @@ -0,0 +1,470 @@ +.. _ome_profile_module: + + +ome_profile -- Create, modify, delete, assign, unassign and migrate a profile on OpenManage Enterprise +====================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, delete, assign, unassign, and migrate a profile on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + command (optional, str, create) + ``create`` creates new profiles. + + ``modify`` modifies an existing profile. Only *name*, *description*, *boot_to_network_iso*, and *attributes* can be modified. + + ``delete`` deletes an existing profile. + + ``assign`` Deploys an existing profile on a target device and returns a task ID. + + ``unassign`` unassigns a profile from a specified target and returns a task ID. + + ``migrate`` migrates an existing profile and returns a task ID. + + + name_prefix (optional, str, Profile) + The name provided when creating a profile is used a prefix followed by the number assigned to it by OpenManage Enterprise. + + This is applicable only for a create operation. + + This option is mutually exclusive with *name*. + + + name (optional, str, None) + Name of the profile. + + This is applicable for modify, delete, assign, unassign, and migrate operations. + + This option is mutually exclusive with *name_prefix* and *number_of_profiles*. + + + new_name (optional, str, None) + New name of the profile. + + Applicable when *command* is ``modify``. + + + number_of_profiles (optional, int, 1) + Provide the number of profiles to be created. + + This is applicable when *name_prefix* is used with ``create``. + + This option is mutually exclusive with *name*. + + Openmanage Enterprise can create a maximum of 100 profiles. + + + template_name (optional, str, None) + Name of the template for creating the profile(s). + + This is applicable when *command* is ``create``. + + This option is mutually exclusive with *template_id*. + + + template_id (optional, int, None) + ID of the template. + + This is applicable when *command* is ``create``. + + This option is mutually exclusive with *template_name*. + + + device_id (optional, int, None) + ID of the target device. + + This is applicable when *command* is ``assign`` and ``migrate``. + + This option is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, str, None) + Identifier of the target device. + + This is typically 7 to 8 characters in length. + + Applicable when *command* is ``assign``, and ``migrate``. + + This option is mutually exclusive with *device_id*. + + If the device does not exist when *command* is ``assign`` then the profile is auto-deployed. + + + description (optional, str, None) + Description of the profile. + + + boot_to_network_iso (optional, dict, None) + Details of the Share iso. + + Applicable when *command* is ``create``, ``assign``, and ``modify``. + + + boot_to_network (True, bool, None) + Enable or disable a network share. + + + share_type (optional, str, None) + Type of network share. + + + share_ip (optional, str, None) + IP address of the network share. + + + share_user (optional, str, None) + User name when *share_type* is ``CIFS``. + + + share_password (optional, str, None) + User password when *share_type* is ``CIFS``. + + + workgroup (optional, str, None) + User workgroup when *share_type* is ``CIFS``. + + + iso_path (optional, str, None) + Specify the full ISO path including the share name. + + + iso_timeout (optional, int, 4) + Set the number of hours that the network ISO file will remain mapped to the target device(s). + + + + filters (optional, dict, None) + Filters the profiles based on selected criteria. + + This is applicable when *command* is ``delete`` or ``unassign``. + + This supports suboption *ProfileIds* which takes a list of profile IDs. + + This also supports OData filter expressions with the suboption *Filters*. + + See OpenManage Enterprise REST API guide for the filtering options available. + + *WARNING* When this option is used in case of ``unassign``, task ID is not returned for any of the profiles affected. + + + force (optional, bool, False) + Provides the option to force the migration of a profile even if the source device cannot be contacted. + + This option is applicable when *command* is ``migrate``. + + + attributes (optional, dict, None) + Attributes for ``modify`` and ``assign``. + + + Attributes (optional, list, None) + List of attributes to be modified, when *command* is ``modify``. + + List of attributes to be overridden when *command* is ``assign``. + + Use the *Id* If the attribute Id is available. If not, use the comma separated I (DisplayName). For more details about using the *DisplayName*, see the example provided. + + + Options (optional, dict, None) + Provides the different shut down options. + + This is applicable when *command* is ``assign``. + + + Schedule (optional, dict, None) + Schedule for profile deployment. + + This is applicable when *command* is ``assign``. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + - ``assign`` operation on a already assigned profile will not redeploy. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create two profiles from a template + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 2 + + - name: Create profile with NFS share + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 1 + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.1" + iso_path: "path/to/my_iso.iso" + iso_timeout: 8 + + - name: Create profile with CIFS share + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 1 + boot_to_network_iso: + boot_to_network: True + share_type: CIFS + share_ip: "192.168.0.2" + share_user: "username" + share_password: "password" + workgroup: "workgroup" + iso_path: "\\path\\to\\my_iso.iso" + iso_timeout: 8 + + - name: Modify profile name with NFS share and attributes + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: modify + name: "Profile 00001" + new_name: "modified profile" + description: "new description" + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.3" + iso_path: "path/to/my_iso.iso" + iso_timeout: 8 + attributes: + Attributes: + - Id: 4506 + Value: "server attr 1" + IsIgnored: false + - Id: 4507 + Value: "server attr 2" + IsIgnored: false + # Enter the comma separated string as appearing in the Detailed view on GUI + # System -> Server Topology -> ServerTopology 1 Aisle Name + - DisplayName: 'System, Server Topology, ServerTopology 1 Aisle Name' + Value: Aisle 5 + IsIgnored: false + + - name: Delete a profile using profile name + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + name: "Profile 00001" + + - name: Delete profiles using filters + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + filters: + SelectAll: True + Filters: =contains(ProfileName,'Profile 00002') + + - name: Delete profiles using profile list filter + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + filters: + ProfileIds: + - 17123 + - 16124 + + - name: Assign a profile to target along with network share + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: assign + name: "Profile 00001" + device_id: 12456 + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.1" + iso_path: "path/to/my_iso.iso" + iso_timeout: 8 + attributes: + Attributes: + - Id: 4506 + Value: "server attr 1" + IsIgnored: true + Options: + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + EndHostPowerState: 1 + StrictCheckingVlan: True + Schedule: + RunNow: True + RunLater: False + + - name: Unassign a profile using profile name + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + name: "Profile 00003" + + - name: Unassign profiles using filters + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + filters: + SelectAll: True + Filters: =contains(ProfileName,'Profile 00003') + + - name: Unassign profiles using profile list filter + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + filters: + ProfileIds: + - 17123 + - 16123 + + - name: Migrate a profile + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "migrate" + name: "Profile 00001" + device_id: 12456 + + + +Return Values +------------- + +msg (always, str, Successfully created 2 profile(s).) + Overall status of the profile operation. + + +profile_ids (when I(command) is C(create), list, [1234, 5678]) + IDs of the profiles created. + + +job_id (when I(command) is C(assign), C(migrate) or C(unassign), int, 14123) + Task ID created when *command* is ``assign``, ``migrate`` or ``unassign``. + + ``assign`` and ``unassign`` operations do not trigger a task if a profile is auto-deployed. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V (@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst new file mode 100644 index 00000000..3531cb24 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst @@ -0,0 +1,145 @@ +.. _ome_server_interface_profile_info_module: + + +ome_server_interface_profile_info -- Retrieves the information of server interface profile on OpenManage Enterprise Modular. +============================================================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to retrieves the information of server interface profile on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_id (optional, list, None) + The ID of the device. + + *device_id* is mutually exclusive with *device_service_tag*. + + + device_service_tag (optional, list, None) + The service tag of the device. + + *device_service_tag* is mutually exclusive with *device_id*. + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieves the server interface profiles of all the device using device ID. + dellemc.openmanage.ome_server_interface_profile_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 10001 + - 10002 + + - name: Retrieves the server interface profiles of all the device using device service tag. + dellemc.openmanage.ome_server_interface_profile_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - 6GHH6H2 + - 6KHH6H3 + + + +Return Values +------------- + +msg (on success, str, Successfully retrieved the server interface profile information.) + Overall status of the server interface profile information. + + +server_profiles (success, list, [AnsibleMapping([('BondingTechnology', 'LACP'), ('Id', '6KZK6K2'), ('ServerInterfaceProfile', [AnsibleMapping([('FabricId', '1ea6bf64-3cf0-4e06-a136-5046d874d1e7'), ('Id', 'NIC.Mezzanine.1A-1-1'), ('NativeVLAN', 0), ('Networks', [AnsibleMapping([('CreatedBy', 'system'), ('CreationTime', '2018-11-27 10:22:14.140'), ('Description', 'VLAN 1'), ('Id', 10001), ('InternalRefNWUUId', 'add035b9-a971-400d-a3fa-bb365df1d476'), ('Name"', 'VLAN 1'), ('Type', 2), ('UpdatedBy', None), ('UpdatedTime', '2018-11-27 10:22:14.140'), ('VlanMaximum', 1), ('VlanMinimum', 1)])]), ('NicBonded', True), ('OnboardedPort', '59HW8X2:ethernet1/1/1')]), AnsibleMapping([('FabricId', '3ea6be04-5cf0-4e05-a136-5046d874d1e6'), ('Id', 'NIC.Mezzanine.1A-2-1'), ('NativeVLAN', 0), ('Networks', [AnsibleMapping([('CreatedBy', 'system'), ('CreationTime', '2018-09-25 14:46:12.374'), ('Description', None), ('Id', 10155), ('InternalRefNWUUId', 'f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d'), ('Name', 'jagvlan'), ('Type', 1), ('UpdatedBy', None), ('UpdatedTime', '2018-09-25 14:46:12.374'), ('VlanMaximum', 143), ('VlanMinimum', 143)])]), ('NicBonded', False), ('OnboardedPort', '6H7J6Z2:ethernet1/1/1')])])])]) + Returns the information of collected server interface profile information. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst new file mode 100644 index 00000000..c4f9f0f4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst @@ -0,0 +1,241 @@ +.. _ome_server_interface_profiles_module: + + +ome_server_interface_profiles -- Configure server interface profiles +==================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to configure server interface profiles on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + device_id (optional, list, None) + Device id of the Server under chassis fabric. + + *device_id* and *device_service_tag* is mutually exclusive. + + + device_service_tag (optional, list, None) + Service tag of the Server under chassis fabric. + + *device_service_tag* and *device_id* is mutually exclusive. + + + nic_teaming (optional, str, None) + NIC teaming options. + + ``NoTeaming`` the NICs are not bonded and provide no load balancing or redundancy. + + ``LACP`` use LACP for NIC teaming. + + ``Other`` use other technology for NIC teaming. + + + nic_configuration (optional, list, None) + NIC configuration for the Servers to be applied. + + + nic_identifier (True, str, None) + ID of the NIC or port number. + + ``Note`` This will not be validated. + + + team (optional, bool, None) + Group two or more ports. The ports must be connected to the same pair of Ethernet switches. + + *team* is applicable only if *nic_teaming* is ``LACP``. + + + untagged_network (optional, int, None) + The maximum or minimum VLAN id of the network to be untagged. + + The *untagged_network* can be retrieved using the :ref:`dellemc.openmanage.ome_network_vlan_info ` + + If *untagged_network* needs to be unset this needs to be sent as ``0`` + + ``Note`` The network cannot be added as a untagged network if it is already assigned to a tagged network. + + + tagged_networks (optional, dict, None) + List of tagged networks + + Network cannot be added as a tagged network if it is already assigned to untagged network + + + state (optional, str, present) + Indicates if a list of networks needs to be added or deleted. + + ``present`` to add the network to the tagged list + + ``absent`` to delete the Network from the tagged list + + + names (True, list, None) + List of network name to be marked as tagged networks + + The *names* can be retrieved using the :ref:`dellemc.openmanage.ome_network_vlan_info ` + + + + + job_wait (optional, bool, True) + Provides the option to wait for job completion. + + + job_wait_timeout (optional, int, 120) + The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration. + + This option is applicable when *job_wait* is ``True``. + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - This module supports ``check_mode``. + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Modify Server Interface Profile for the server using the service tag + dellemc.openmanage.ome_server_interface_profiles: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - SVCTAG1 + - SVCTAG2 + nic_teaming: LACP + nic_configuration: + - nic_identifier: NIC.Mezzanine.1A-1-1 + team: no + untagged_network: 2 + tagged_networks: + names: + - vlan1 + - nic_identifier: NIC.Mezzanine.1A-2-1 + team: yes + untagged_network: 3 + tagged_networks: + names: + - range120-125 + + - name: Modify Server Interface Profile for the server using the device id + dellemc.openmanage.ome_server_interface_profiles: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 34523 + - 48999 + nic_teaming: NoTeaming + nic_configuration: + - nic_identifier: NIC.Mezzanine.1A-1-1 + team: no + untagged_network: 2 + tagged_networks: + names: + - vlan2 + - nic_identifier: NIC.Mezzanine.1A-2-1 + team: yes + untagged_network: 3 + tagged_networks: + names: + - range120-125 + + + +Return Values +------------- + +msg (always, str, Successfully triggered apply server profiles job.) + Status of the overall server interface operation. + + +job_id (on applying the Interface profiles, int, 14123) + Job ID of the task to apply the server interface profiles. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V (@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst new file mode 100644 index 00000000..1e6ddda5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst @@ -0,0 +1,199 @@ +.. _ome_smart_fabric_module: + + +ome_smart_fabric -- Create, modify or delete a fabric on OpenManage Enterprise Modular +====================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create a fabric, and modify or delete an existing fabric on OpenManage Enterprise Modular. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` creates a new fabric or modifies an existing fabric. + + ``absent`` deletes an existing fabric. + + Notes: The create, modify, or delete fabric operation takes around 15-20 minutes to complete. It is recommended not to start an another operation until the current operation is completed. + + + name (True, str, None) + Provide the *name* of the fabric to be created, deleted or modified. + + + new_name (optional, str, None) + Provide the *name* of the fabric to be modified. + + + description (optional, str, None) + Provide a short description of the fabric to be created or modified. + + + fabric_design (optional, str, None) + Specify the fabric topology.See the use API https://www.dell.com/support/manuals/en-in/poweredge-mx7000/omem_1_20_10_ug/smartfabric-network-topologies to know why its topology. + + *fabric_design* is mandatory for fabric creation. + + + primary_switch_service_tag (optional, str, None) + Service tag of the first switch. + + *primary_switch_service_tag* is mandatory for fabric creation. + + *primary_switch_service_tag* must belong to the model selected in *fabric_design*. + + + secondary_switch_service_tag (optional, str, None) + Service tag of the second switch. + + *secondary_switch_service_tag* is mandatory for fabric creation. + + *secondary_switch_service_tag* must belong to the model selected in *fabric_design*. + + + override_LLDP_configuration (optional, str, None) + Enable this configuration to allow Fabric Management Address to be included in LLDP messages. + + Notes: OpenManage Enterprise Modular 1.0 does not support this option. Some software networking solutions require a single management address to be transmitted by all Ethernet switches to represent the entire fabric. Enable this feature only when connecting to such a solution. + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create a fabric + dellemc.openmanage.ome_smart_fabric: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "fabric1" + description: "fabric desc" + fabric_design: "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" + primary_switch_service_tag: "SVTG123" + secondary_switch_service_tag: "PXYT456" + override_LLDP_configuration: "Enabled" + + - name: Modify a fabric + dellemc.openmanage.ome_smart_fabric: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "fabric1" + new_name: "fabric_gold1" + description: "new description" + + - name: Delete a fabric + dellemc.openmanage.ome_smart_fabric: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "fabric1" + + + +Return Values +------------- + +msg (always, str, Fabric creation operation is initiated.) + Overall status of the fabric operation. + + +fabric_id (success, str, 1312cceb-c3dd-4348-95c1-d8541a17d776) + Returns the ID when an fabric is created, modified or deleted. + + +additional_info (when I(state=present) and additional information present in response., dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('RelatedProperties', []), ('Message', 'Fabric update is successful. The OverrideLLDPConfiguration attribute is not provided in the payload, so it preserves the previous value.'), ('MessageArgs', []), ('Severity', 'Informational'), ('Resolution', 'Please update the Fabric with the OverrideLLDPConfiguration as Disabled or Enabled if necessary.')])])]))])) + Additional details of the fabric operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('RelatedProperties', []), ('Message', 'Unable to perform operation, because the fabric manager was not reachable.'), ('MessageArgs', []), ('Severity', 'Warning'), ('Resolution', 'Make sure of the following and retry the operation: 1) There is at least one advanced I/O Module in power-on mode. For example, MX9116n Ethernet Switch and MX5108n Ethernet Switch. However, if an advanced I/O Module is available in the power-on mode, make sure that the network profile is not set when the fabric manager is in the switch-over mode. 2) If the issue persists, wait for few minutes and retry the operation.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst new file mode 100644 index 00000000..83ddfcdc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst @@ -0,0 +1,291 @@ +.. _ome_smart_fabric_uplink_module: + + +ome_smart_fabric_uplink -- Create, modify or delete a uplink for a fabric on OpenManage Enterprise Modular +========================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify or delete an uplink for a fabric. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` - Creates a new uplink with the provided *name*. - Modifies an existing uplink with the provided *name*. + + ``absent`` – Deletes the uplink with the provided *name*. + + *WARNING* Delete operation can impact the network infrastructure. + + + fabric_name (True, str, None) + Provide the *fabric_name* of the fabric for which the uplink is to be configured. + + + name (True, str, None) + Provide the *name* of the uplink to be created, modified or deleted. + + + new_name (optional, str, None) + Provide the new *new_name* for the uplink. + + + description (optional, str, None) + Provide a short description for the uplink to be created or modified. + + + uplink_type (optional, str, None) + Specify the uplink type. + + *NOTE* The uplink type cannot be changed for an existing uplink. + + + ufd_enable (optional, str, None) + Add or Remove the uplink to the Uplink Failure Detection (UFD) group. The UFD group identifies the loss of connectivity to the upstream switch and notifies the servers that are connected to the switch. During an uplink failure, the switch disables the corresponding downstream server ports. The downstream servers can then select alternate connectivity routes, if available. + + *WARNING* The firmware version of the I/O Module running the Fabric Manager must support this configuration feature. If not, uplink creation will be successful with an appropriate error message in response. + + + primary_switch_service_tag (optional, str, None) + Service tag of the primary switch. + + + primary_switch_ports (optional, list, None) + The IOM slots to be connected to the primary switch. + + *primary_switch_service_tag* is mandatory for this option. + + + secondary_switch_service_tag (optional, str, None) + Service tag of the secondary switch. + + + secondary_switch_ports (optional, list, None) + The IOM slots to be connected to the secondary switch. + + *secondary_switch_service_tag* is mandatory for this option. + + + tagged_networks (optional, list, None) + VLANs to be associated with the uplink *name*. + + + untagged_network (optional, str, None) + Specify the name of the VLAN to be added as untagged to the uplink. + + + hostname (True, str, None) + OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create an Uplink + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + description: "CREATED from OMAM" + uplink_type: "Ethernet" + ufd_enable: "Enabled" + primary_switch_service_tag: "ABC1234" + primary_switch_ports: + - ethernet1/1/13 + - ethernet1/1/14 + secondary_switch_service_tag: "XYZ1234" + secondary_switch_ports: + - ethernet1/1/13 + - ethernet1/1/14 + tagged_networks: + - vlan1 + - vlan3 + untagged_network: vlan2 + tags: create_uplink + + - name: Modify an existing uplink + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + new_name: "uplink2" + description: "Modified from OMAM" + uplink_type: "Ethernet" + ufd_enable: "Disabled" + primary_switch_service_tag: "DEF1234" + primary_switch_ports: + - ethernet1/2/13 + - ethernet1/2/14 + secondary_switch_service_tag: "TUV1234" + secondary_switch_ports: + - ethernet1/2/13 + - ethernet1/2/14 + tagged_networks: + - vlan11 + - vlan33 + untagged_network: vlan22 + tags: modify_uplink + + - name: Delete an Uplink + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + fabric_name: "fabric1" + name: "uplink1" + tags: delete_uplink + + - name: Modify an Uplink name + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + new_name: "uplink2" + tags: modify_uplink_name + + - name: Modify Uplink ports + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + description: "uplink ports modified" + primary_switch_service_tag: "ABC1234" + primary_switch_ports: + - ethernet1/1/6 + - ethernet1/1/7 + secondary_switch_service_tag: "XYZ1234" + secondary_switch_ports: + - ethernet1/1/9 + - ethernet1/1/10 + tags: modify_ports + + - name: Modify Uplink networks + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "create1" + description: "uplink networks modified" + tagged_networks: + - vlan4 + tags: modify_networks + + + +Return Values +------------- + +msg (always, str, Successfully modified the uplink.) + Overall status of the uplink operation. + + +uplink_id (when I(state=present), str, ddc3d260-fd71-46a1-97f9-708e12345678) + Returns the ID when an uplink is created or modified. + + +additional_info (when I(state=present) and additional information present in response., dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to configure the Uplink Failure Detection mode on the uplink because the firmware version of the I/O Module running the Fabric Manager does not support the configuration feature.'), ('MessageArgs', []), ('MessageId', 'CDEV7151'), ('RelatedProperties', []), ('Resolution', "Update the firmware version of the I/O Module running the Fabric Manager and retry the operation. For information about the recommended I/O Module firmware versions, see the OpenManage Enterprise-Modular User's Guide available on the support site."), ('Severity', 'Informational')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Additional details of the fabric operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1006'), ('RelatedProperties', []), ('Message', 'Unable to complete the request because the resource URI does not exist or is not implemented.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties.")])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst new file mode 100644 index 00000000..5b58dffc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst @@ -0,0 +1,547 @@ +.. _ome_template_module: + + +ome_template -- Create, modify, deploy, delete, export, import and clone a template on OpenManage Enterprise +============================================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module creates, modifies, deploys, deletes, exports, imports and clones a template on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + command (optional, str, create) + ``create`` creates a new template. + + ``modify`` modifies an existing template. + + ``deploy`` creates a template-deployment job. + + ``delete`` deletes an existing template. + + ``export`` exports an existing template. + + ``import`` creates a template from a specified configuration text in SCP XML format. + + ``clone`` creates a clone of a existing template. + + + template_id (optional, int, None) + ID of the existing template. + + This option is applicable when *command* is ``modify``, ``deploy``, ``delete`` and ``export``. + + This option is mutually exclusive with *template_name*. + + + template_name (optional, str, None) + Name of the existing template. + + This option is applicable when *command* is ``modify``, ``deploy``, ``delete`` and ``export``. + + This option is mutually exclusive with *template_id*. + + + device_id (optional, list, []) + Specify the list of targeted device ID(s) when *command* is ``deploy``. When I (command) is ``create``, specify the ID of a single device. + + Either *device_id* or *device_service_tag* is mandatory or both can be applicable. + + + device_service_tag (optional, list, []) + Specify the list of targeted device service tags when I (command) is ``deploy``. When *command* is ``create``, specify the service tag of a single device. + + Either *device_id* or *device_service_tag* is mandatory or both can be applicable. + + + device_group_names (optional, list, []) + Specify the list of groups when I (command) is ``deploy``. + + Provide at least one of the mandatory options *device_id*, *device_service_tag*, or *device_group_names*. + + + template_view_type (optional, str, Deployment) + Select the type of view of the OME template. + + This is applicable when *command* is ``create``,``clone`` and ``import``. + + + attributes (optional, dict, None) + Payload data for the template operations. All the variables in this option are added as payload for ``create``, ``modify``, ``deploy``, ``import``, and ``clone`` operations. It takes the following attributes. + + Attributes: List of dictionaries of attributes (if any) to be modified in the deployment template. This is applicable when *command* is ``deploy`` and ``modify``. Use the *Id* If the attribute Id is available. If not, use the comma separated I (DisplayName). For more details about using the *DisplayName*, see the example provided. + + Name: Name of the template. This is mandatory when *command* is ``create``, ``import``, ``clone``, and optional when *command* is ``modify``. + + Description: Description for the template. This is applicable when *command* is ``create`` or ``modify``. + + Fqdds: This allows to create a template using components from a specified reference server. One or more, of the following values must be specified in a comma-separated string: iDRAC, System, BIOS, NIC, LifeCycleController, RAID, and EventFilters. If none of the values are specified, the default value 'All' is selected. This is applicable when I (command) is ``create``. + + Options: Options to control device shutdown or end power state post template deployment. This is applicable for ``deploy`` operation. + + Schedule: Provides options to schedule the deployment task immediately, or at a specified time. This is applicable when *command* is ``deploy``. + + NetworkBootIsoModel: Payload to specify the ISO deployment details. This is applicable when *command* is ``deploy``. + + Content: The XML content of template. This is applicable when *command* is ``import``. + + Type: Template type ID, indicating the type of device for which configuration is supported, such as chassis and servers. This is applicable when *command* is ``import``. + + TypeId: Template type ID, indicating the type of device for which configuration is supported, such as chassis and servers. This is applicable when *command* is ``create``. + + Refer OpenManage Enterprise API Reference Guide for more details. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create a template from a reference device + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25123 + attributes: + Name: "New Template" + Description: "New Template description" + + - name: Modify template name, description, and attribute value + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "modify" + template_id: 12 + attributes: + Name: "New Custom Template" + Description: "Custom Template Description" + # Attributes to be modified in the template. + # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails + # This section is optional + Attributes: + - Id: 1234 + Value: "Test Attribute" + IsIgnored: false + + - name: Modify template name, description, and attribute using detailed view + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "modify" + template_id: 12 + attributes: + Name: "New Custom Template" + Description: "Custom Template Description" + Attributes: + # Enter the comma separated string as appearing in the Detailed view on GUI + # NIC -> NIC.Integrated.1-1-1 -> NIC Configuration -> Wake On LAN1 + - DisplayName: 'NIC, NIC.Integrated.1-1-1, NIC Configuration, Wake On LAN' + Value: Enabled + IsIgnored: false + # System -> LCD Configuration -> LCD 1 User Defined String for LCD + - DisplayName: 'System, LCD Configuration, LCD 1 User Defined String for LCD' + Value: LCD str by OMAM + IsIgnored: false + + - name: Deploy template on multiple devices + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + - 'SVTG456' + + - name: Deploy template on groups + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_group_names: + - server_group_1 + - server_group_2 + + - name: Deploy template on multiple devices along with the attributes values to be modified on the target devices + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + attributes: + # Device specific attributes to be modified during deployment. + # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails + # This section is optional + Attributes: + # specific device where attribute to be modified at deployment run-time. + # The DeviceId should be mentioned above in the 'device_id' section. + # Service tags not allowed. + - DeviceId: 12765 + Attributes: + - Id : 15645 + Value : "0.0.0.0" + IsIgnored : false + - DeviceId: 10173 + Attributes: + - Id : 18968, + Value : "hostname-1" + IsIgnored : false + + - name: Deploy template and Operating System (OS) on multiple devices + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + device_service_tag: + - 'SVTG123' + attributes: + # Include this to install OS on the devices. + # This section is optional + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "NFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + + - name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed, + install OS using its image" + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + - 'SVTG456' + attributes: + Attributes: + - DeviceId: 12765 + Attributes: + - Id : 15645 + Value : "0.0.0.0" + IsIgnored : false + - DeviceId: 10173 + Attributes: + - Id : 18968, + Value : "hostname-1" + IsIgnored : false + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "NFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + + - name: Delete template + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + template_id: 12 + + - name: Export a template + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + template_id: 12 + + # Start of example to export template to a local xml file + - name: Export template to a local xml file + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + template_name: "my_template" + register: result + - name: Save template into a file + ansible.builtin.copy: + content: "{{ result.Content}}" + dest: "/path/to/exported_template.xml" + # End of example to export template to a local xml file + + - name: Clone a template + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "clone" + template_id: 12 + attributes: + Name: "New Cloned Template Name" + + - name: Import template from XML content + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + attributes: + Name: "Imported Template Name" + # Template Type from TemplateService/TemplateTypes + Type: 2 + # xml string content + Content: "\n\nTrue\nClear\n + \n\nReady + \nNo\n\n + \nReady\n + No\n\n\n" + + - name: Import template from local XML file + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + attributes: + Name: "Imported Template Name" + Type: 2 + Content: "{{ lookup('ansible.builtin.file', '/path/to/xmlfile') }}" + + - name: "Deploy template and Operating System (OS) on multiple devices." + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + device_service_tag: + - 'SVTG123' + attributes: + # Include this to install OS on the devices. + # This section is optional + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "CIFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + + - name: Create a compliance template from reference device + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "create" + device_service_tag: + - "SVTG123" + template_view_type: "Compliance" + attributes: + Name: "Configuration Compliance" + Description: "Configuration Compliance Template" + Fqdds: "BIOS" + + - name: Import a compliance template from XML file + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + template_view_type: "Compliance" + attributes: + Name: "Configuration Compliance" + Content: "{{ lookup('ansible.builtin.file', './test.xml') }}" + Type: 2 + + + +Return Values +------------- + +msg (always, str, Successfully created a template with ID 23) + Overall status of the template operation. + + +return_id (success, when I(command) is C(create), C(modify), C(import), C(clone) and C(deploy), int, 12) + ID of the template for ``create``, ``modify``, ``import`` and ``clone`` or task created in case of ``deploy``. + + +TemplateId (success, when I(command) is C(export), int, 13) + ID of the template for ``export``. + + +Content (success, when I(command) is C(export), str, + +True +Clear + + +Ready +No + + +Ready +No + +) + XML content of the exported template. This content can be written to a xml file. + + +devices_assigned (I(command) is C(deploy), dict, AnsibleMapping([('10362', 28), ('10312', 23)])) + Mapping of devices with the templates already deployed on them. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V (@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst new file mode 100644 index 00000000..3210a8da --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst @@ -0,0 +1,134 @@ +.. _ome_template_identity_pool_module: + + +ome_template_identity_pool -- Attach or detach an identity pool to a requested template on OpenManage Enterprise +================================================================================================================ + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to- - Attach an identity pool to a requested template on OpenManage Enterprise. - Detach an identity pool from a requested template on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + template_name (True, str, None) + Name of the template to which an identity pool is attached or detached. + + + identity_pool_name (optional, str, None) + Name of the identity pool. - To attach an identity pool to a template, provide the name of the identity pool. - This option is not applicable when detaching an identity pool from a template. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Attach an identity pool to a template + dellemc.openmanage.ome_template_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_name: template_name + identity_pool_name: identity_pool_name + + - name: Detach an identity pool from a template + dellemc.openmanage.ome_template_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_name: template_name + + + +Return Values +------------- + +msg (always, str, Successfully attached identity pool to template.) + Overall identity pool status of the attach or detach operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst new file mode 100644 index 00000000..72e0b6d9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst @@ -0,0 +1,146 @@ +.. _ome_template_info_module: + + +ome_template_info -- Retrieves template details from OpenManage Enterprise +========================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module retrieves the list and details of all the templates on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + template_id (optional, int, None) + Unique Id of the template. + + + system_query_options (optional, dict, None) + Options for pagination of the output. + + + filter (optional, str, None) + Filter records by the supported values. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieve basic details of all templates + dellemc.openmanage.ome_template_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve details of a specific template identified by its template ID + dellemc.openmanage.ome_template_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_id: 1 + + - name: Get filtered template info based on name + dellemc.openmanage.ome_template_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + filter: "Name eq 'new template'" + + + +Return Values +------------- + +msg (on error, str, Failed to fetch the template facts) + Overall template facts status. + + +template_info (success, dict, AnsibleMapping([('192.168.0.1', AnsibleMapping([('CreatedBy', 'system'), ('CreationTime', '1970-01-31 00:00:56.372144'), ('Description', 'Tune workload for Performance Optimized Virtualization'), ('HasIdentityAttributes', False), ('Id', 1), ('IdentityPoolId', 0), ('IsBuiltIn', True), ('IsPersistencePolicyValid', False), ('IsStatelessAvailable', False), ('LastUpdatedBy', None), ('LastUpdatedTime', '1970-01-31 00:00:56.372144'), ('Name', 'iDRAC Enable Performance Profile for Virtualization'), ('SourceDeviceId', 0), ('Status', 0), ('TaskId', 0), ('TypeId', 2), ('ViewTypeId', 4)]))])) + Details of the templates. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst new file mode 100644 index 00000000..6d2752fc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst @@ -0,0 +1,238 @@ +.. _ome_template_network_vlan_module: + + +ome_template_network_vlan -- Set tagged and untagged vlans to native network card supported by a template on OpenManage Enterprise +================================================================================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to set tagged and untagged vlans to native network card supported by a template on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + template_name (optional, str, None) + Name of the template. + + It is mutually exclusive with *template_id*. + + + template_id (optional, int, None) + Id of the template. + + It is mutually exclusive with *template_name*. + + + nic_identifier (True, str, None) + Display name of NIC port in the template for VLAN configuration. + + + propagate_vlan (optional, bool, True) + To deploy the modified VLAN settings immediately without rebooting the server. + + This option will be applied only when there are changes to the VLAN configuration. + + + untagged_networks (optional, list, None) + List of untagged networks and their corresponding NIC ports. + + + port (True, int, None) + NIC port number of the untagged VLAN. + + + untagged_network_id (optional, int, None) + ID of the untagged VLAN + + Enter 0 to clear the untagged VLAN from the port. + + This option is mutually exclusive with *untagged_network_name* + + To get the VLAN network ID use the API https://*hostname*/api/NetworkConfigurationService/Networks + + + untagged_network_name (optional, str, None) + name of the vlan for untagging + + provide 0 for clearing the untagging for this *port* + + This parameter is mutually exclusive with *untagged_network_id* + + + + tagged_networks (optional, list, None) + List of tagged VLANs and their corresponding NIC ports. + + + port (True, int, None) + NIC port number of the tagged VLAN + + + tagged_network_ids (optional, list, None) + List of IDs of the tagged VLANs + + Enter [] to remove the tagged VLAN from a port. + + List of *tagged_network_ids* is combined with list of *tagged_network_names* when adding tagged VLANs to a port. + + To get the VLAN network ID use the API https://*hostname*/api/NetworkConfigurationService/Networks + + + tagged_network_names (optional, list, None) + List of names of tagged VLANs + + Enter [] to remove the tagged VLAN from a port. + + List of *tagged_network_names* is combined with list of *tagged_network_ids* when adding tagged VLANs to a port. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Add tagged or untagged VLANs to a template using VLAN ID and name + dellemc.openmanage.ome_template_network_vlan: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_id: 78 + nic_identifier: NIC Slot 4 + untagged_networks: + - port: 1 + untagged_network_id: 127656 + - port: 2 + untagged_network_name: vlan2 + tagged_networks: + - port: 1 + tagged_network_ids: + - 12767 + - 12768 + - port: 4 + tagged_network_ids: + - 12767 + - 12768 + tagged_network_names: + - vlan3 + - port: 2 + tagged_network_names: + - vlan4 + - vlan1 + + - name: Clear the tagged and untagged VLANs from a template + dellemc.openmanage.ome_template_network_vlan: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_id: 78 + nic_identifier: NIC Slot 4 + untagged_networks: + # For removing the untagged VLANs for the port 1 and 2 + - port: 1 + untagged_network_id: 0 + - port: 2 + untagged_network_name: 0 + tagged_networks: + # For removing the tagged VLANs for port 1, 4 and 2 + - port: 1 + tagged_network_ids: [] + - port: 4 + tagged_network_ids: [] + tagged_network_names: [] + - port: 2 + tagged_network_names: [] + + + +Return Values +------------- + +msg (always, str, Successfully applied the network settings to template.) + Overall status of the template vlan operation. + + +error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because TemplateId does not exist or is not applicable for the resource URI.'), ('MessageArgs', ['TemplateId']), ('MessageId', 'CGEN1004'), ('RelatedProperties', []), ('Resolution', "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties."), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of the HTTP Error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst new file mode 100644 index 00000000..4e46c91f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst @@ -0,0 +1,199 @@ +.. _ome_user_module: + + +ome_user -- Create, modify or delete a user on OpenManage Enterprise +==================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module creates, modifies or deletes a user on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + state (optional, str, present) + ``present`` creates a user in case the *UserName* provided inside *attributes* does not exist. + + ``present`` modifies a user in case the *UserName* provided inside *attributes* exists. + + ``absent`` deletes an existing user. + + + user_id (optional, int, None) + Unique ID of the user to be deleted. + + Either *user_id* or *name* is mandatory for ``absent`` operation. + + + name (optional, str, None) + Unique Name of the user to be deleted. + + Either *user_id* or *name* is mandatory for ``absent`` operation. + + + attributes (optional, dict, AnsibleMapping()) + Payload data for the user operations. It can take the following attributes for ``present``. + + UserTypeId, DirectoryServiceId, Description, Name, Password, UserName, RoleId, Locked, Enabled. + + OME will throw error if required parameter is not provided for operation. + + Refer OpenManage Enterprise API Reference Guide for more details. + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create user with required parameters + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + attributes: + UserName: "user1" + Password: "UserPassword" + RoleId: "10" + Enabled: True + + - name: Create user with all parameters + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + attributes: + UserName: "user2" + Description: "user2 description" + Password: "UserPassword" + RoleId: "10" + Enabled: True + DirectoryServiceId: 0 + UserTypeId: 1 + Locked: False + Name: "user2" + + - name: Modify existing user + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + attributes: + UserName: "user3" + RoleId: "10" + Enabled: True + Description: "Modify user Description" + + - name: Delete existing user using id + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + user_id: 1234 + + - name: Delete existing user using name + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "name" + + + +Return Values +------------- + +msg (always, str, Successfully created a User) + Overall status of the user operation. + + +user_status (When I(state) is C(present)., dict, AnsibleMapping([('Description', 'Test user creation'), ('DirectoryServiceId', 0), ('Enabled', True), ('Id', '61546'), ('IsBuiltin', False), ('Locked', False), ('Name', 'test'), ('Password', None), ('PlainTextPassword', None), ('RoleId', '10'), ('UserName', 'test'), ('UserTypeId', 1)])) + Details of the user operation, when *state* is ``present``. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst new file mode 100644 index 00000000..80e8250d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst @@ -0,0 +1,146 @@ +.. _ome_user_info_module: + + +ome_user_info -- Retrieves details of all accounts or a specific account on OpenManage Enterprise +================================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module retrieves the list and basic details of all accounts or details of a specific account on OpenManage Enterprise. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + account_id (optional, int, None) + Unique Id of the account. + + + system_query_options (optional, dict, None) + Options for filtering the output. + + + filter (optional, str, None) + Filter records for the supported values. + + + + hostname (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + + + username (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular username. + + + password (True, str, None) + OpenManage Enterprise or OpenManage Enterprise Modular password. + + + port (optional, int, 443) + OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Retrieve basic details of all accounts + dellemc.openmanage.ome_user_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve details of a specific account identified by its account ID + dellemc.openmanage.ome_user_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + account_id: 1 + + - name: Get filtered user info based on user name + dellemc.openmanage.ome_user_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + filter: "UserName eq 'test'" + + + +Return Values +------------- + +msg (on error, str, Unable to retrieve the account details.) + Over all status of fetching user facts. + + +user_info (success, dict, AnsibleMapping([('192.168.0.1', AnsibleMapping([('Id', '1814'), ('UserTypeId', 1), ('DirectoryServiceId', 0), ('Description', 'user name description'), ('Name', 'user_name'), ('Password', None), ('UserName', 'user_name'), ('RoleId', '10'), ('Locked', False), ('IsBuiltin', True), ('Enabled', True)]))])) + Details of the user. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Jagadeesh N V(@jagadeeshnv) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst new file mode 100644 index 00000000..d5fe2c96 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst @@ -0,0 +1,172 @@ +.. _redfish_event_subscription_module: + + +redfish_event_subscription -- Manage Redfish Subscriptions +========================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to add or delete Redfish Event subscriptions. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + destination (True, str, None) + The HTTPS URI of the destination to send events. + + HTTPS is required. + + + event_type (optional, str, Alert) + Specifies the event type to be subscribed. + + ``Alert`` used to subscribe for alert. + + ``MetricReport`` used to subscribe for the metrics report. + + + event_format_type (optional, str, Event) + Specifies the format type of the event to be subscribed. + + ``Event`` used to subscribe for Event format type. + + ``MetricReport`` used to subscribe for the metrics report format type. + + + state (optional, str, present) + ``present`` adds new event subscription. + + ``absent`` deletes event subscription with the specified *destination*. + + + baseuri (True, str, None) + IP address of the target out-of-band controller. For example- :. + + + username (True, str, None) + Username of the target out-of-band controller. + + + password (True, str, None) + Password of the target out-of-band controller. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - *event_type* needs to be ``MetricReport`` and *event_format_type* needs to be ``MetricReport`` for metrics subscription. + - *event_type* needs to be ``Alert`` and *event_format_type* needs to be ``Event`` for event subscription. + - Modifying a subscription is not supported. + - Context is always set to RedfishEvent. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Add Redfish metric subscription + redfish_event_subscription: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination: "https://192.168.1.100:8188" + event_type: MetricReport + event_format_type: MetricReport + state: present + + - name: Add Redfish alert subscription + redfish_event_subscription: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination: "https://server01.example.com:8188" + event_type: Alert + event_format_type: Event + state: present + + - name: Delete Redfish subscription with a specified destination + redfish_event_subscription: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination: "https://server01.example.com:8188" + state: absent + + + +Return Values +------------- + +msg (always, str, Successfully added the subscription.) + Overall status of the task. + + +status (on adding subscription successfully, dict, AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'The resource has been created successfully'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'Base.1.7.Created'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'None'), ('Severity', 'OK')]), AnsibleMapping([('Message', 'A new resource is successfully created.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'IDRAC.2.2.SYS414'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'No response action is required.'), ('Severity', 'Informational')])]), ('Actions', AnsibleMapping([('#EventDestination.ResumeSubscription', AnsibleMapping([('target', '/redfish/v1/EventService/Subscriptions/5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a/Actions/EventDestination.ResumeSubscription')]))])), ('Context', 'RedfishEvent'), ('DeliveryRetryPolicy', 'RetryForever'), ('Description', 'Event Subscription Details'), ('Destination', 'https://192.168.1.100:8188'), ('EventFormatType', 'Event'), ('EventTypes', ['Alert']), ('EventTypes@odata.count', 1), ('HttpHeaders', []), ('HttpHeaders@odata.count', 0), ('Id', '5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a'), ('MetricReportDefinitions', []), ('MetricReportDefinitions@odata.count', 0), ('Name', 'EventSubscription 5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a'), ('OriginResources', []), ('OriginResources@odata.count', 0), ('Protocol', 'Redfish'), ('Status', AnsibleMapping([('Health', 'OK'), ('HealthRollup', 'OK'), ('State', 'Enabled')])), ('SubscriptionType', 'RedfishEvent')])) + Returns subscription object created + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the operation because the JSON data format entered is invalid.'), ('Resolution', 'Do the following and the retry the operation: 1) Enter the correct JSON data format and retry the operation. 2) Make sure that no syntax error is present in JSON data format. 3) Make sure that a duplicate key is not present in JSON data format.'), ('Severity', 'Critical')]), AnsibleMapping([('Message', 'The request body submitted was malformed JSON and could not be parsed by the receiving service.'), ('Resolution', 'Ensure that the request body is valid JSON and resubmit the request.'), ('Severity', 'Critical')])]), ('code', 'Base.1.2.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Trevor Squillario (@TrevorSquillario) +- Sachin Apagundi (@sachin-apa) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst new file mode 100644 index 00000000..d1225dd7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst @@ -0,0 +1,139 @@ +.. _redfish_firmware_module: + + +redfish_firmware -- To perform a component firmware update using the image file available on the local or remote system +======================================================================================================================= + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows the firmware update of only one component at a time. If the module is run for more than one component, an error message is returned. + +Depending on the component, the firmware update is applied after an automatic or manual reboot. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 +- urllib3 + + + +Parameters +---------- + + image_uri (True, str, None) + Firmware Image location URI or local path. + + For example- http:///components.exe or /home/firmware_repo/component.exe. + + + transfer_protocol (optional, str, HTTP) + Protocol used to transfer the firmware image file. Applicable for URI based update. + + + baseuri (True, str, None) + IP address of the target out-of-band controller. For example- :. + + + username (True, str, None) + Username of the target out-of-band controller. + + + password (True, str, None) + Password of the target out-of-band controller. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Redfish APIs. + - This module does not support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Update the firmware from a single executable file available in a HTTP protocol + dellemc.openmanage.redfish_firmware: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + image_uri: "http://192.168.0.2/firmware_repo/component.exe" + transfer_protocol: "HTTP" + + - name: Update the firmware from a single executable file available in a local path + dellemc.openmanage.redfish_firmware: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + image_uri: "/home/firmware_repo/component.exe" + + + +Return Values +------------- + +msg (always, str, Successfully submitted the firmware update task.) + Overall status of the firmware update task. + + +task (success, dict, AnsibleMapping([('id', 'JID_XXXXXXXXXXXX'), ('uri', '/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXX')])) + Returns ID and URI of the created task. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the operation because the JSON data format entered is invalid.'), ('Resolution', 'Do the following and the retry the operation: 1) Enter the correct JSON data format and retry the operation. 2) Make sure that no syntax error is present in JSON data format. 3) Make sure that a duplicate key is not present in JSON data format.'), ('Severity', 'Critical')]), AnsibleMapping([('Message', 'The request body submitted was malformed JSON and could not be parsed by the receiving service.'), ('Resolution', 'Ensure that the request body is valid JSON and resubmit the request.'), ('Severity', 'Critical')])]), ('code', 'Base.1.2.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))])) + Details of http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Felix Stephen (@felixs88) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst new file mode 100644 index 00000000..fb05fe3e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst @@ -0,0 +1,154 @@ +.. _redfish_powerstate_module: + + +redfish_powerstate -- Manage device power state +=============================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to manage the different power states of the specified device. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + resource_id (False, str, None) + The unique identifier of the device being managed. For example- https://<*baseuri*>/redfish/v1/Systems/<*resource_id*>. + + This option is mandatory for *base_uri* with multiple devices. + + To get the device details, use the API https://<*baseuri*>/redfish/v1/Systems. + + + reset_type (True, str, None) + This option resets the device. + + If ``ForceOff``, Turns off the device immediately. + + If ``ForceOn``, Turns on the device immediately. + + If ``ForceRestart``, Turns off the device immediately, and then restarts the device. + + If ``GracefulRestart``, Performs graceful shutdown of the device, and then restarts the device. + + If ``GracefulShutdown``, Performs a graceful shutdown of the device, and the turns off the device. + + If ``Nmi``, Sends a diagnostic interrupt to the device. This is usually a non-maskable interrupt (NMI) on x86 device. + + If ``On``, Turns on the device. + + If ``PowerCycle``, Performs power cycle on the device. + + If ``PushPowerButton``, Simulates the pressing of a physical power button on the device. + + When a power control operation is performed, which is not supported on the device, an error message is displayed with the list of operations that can be performed. + + + baseuri (True, str, None) + IP address of the target out-of-band controller. For example- :. + + + username (True, str, None) + Username of the target out-of-band controller. + + + password (True, str, None) + Password of the target out-of-band controller. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Redfish APIs. + - This module supports ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Manage power state of the first device + dellemc.openmanage.redfish_powerstate: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + reset_type: "On" + + - name: Manage power state of a specified device + dellemc.openmanage.redfish_powerstate: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + reset_type: "ForceOff" + resource_id: "System.Embedded.1" + + + +Return Values +------------- + +msg (always, str, Successfully performed the reset type operation 'On'.) + Overall status of the reset operation. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the operation because the resource /redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset entered in not found.'), ('MessageArgs', ['/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset']), ('MessageArgs@odata.count', 1), ('MessageId', 'IDRAC.2.1.SYS403'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'Enter the correct resource and retry the operation. For information about valid resource, see the Redfish Users Guide available on the support site.'), ('Severity', 'Critical')])]), ('code', 'Base.1.5.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))])) + Details of the HTTP error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst new file mode 100644 index 00000000..d0dfe4b1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst @@ -0,0 +1,277 @@ +.. _redfish_storage_volume_module: + + +redfish_storage_volume -- Manages the storage volume configuration +================================================================== + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- + +This module allows to create, modify, initialize, or delete a single storage volume. + + + +Requirements +------------ +The below requirements are needed on the host that executes this module. + +- python >= 3.8.6 + + + +Parameters +---------- + + controller_id (optional, str, None) + Fully Qualified Device Descriptor (FQDD) of the storage controller. + + For example- RAID.Slot.1-1. + + This option is mandatory when *state* is ``present`` while creating a volume. + + + volume_id (optional, str, None) + FQDD of existing volume. + + For example- Disk.Virtual.4:RAID.Slot.1-1. + + This option is mandatory in the following scenarios, + + *state* is ``present``, when updating a volume. + + *state* is ``absent``, when deleting a volume. + + *command* is ``initialize``, when initializing a volume. + + + state (optional, str, None) + ``present`` creates a storage volume for the specified I (controller_id), or modifies the storage volume for the specified I (volume_id). "Note: Modification of an existing volume properties depends on drive and controller capabilities". + + ``absent`` deletes the volume for the specified *volume_id*. + + + command (optional, str, None) + ``initialize`` initializes an existing storage volume for a specified *volume_id*. + + + volume_type (optional, str, None) + One of the following volume types must be selected to create a volume. + + ``Mirrored`` The volume is a mirrored device. + + ``NonRedundant`` The volume is a non-redundant storage device. + + ``SpannedMirrors`` The volume is a spanned set of mirrored devices. + + ``SpannedStripesWithParity`` The volume is a spanned set of devices which uses parity to retain redundant information. + + ``StripedWithParity`` The volume is a device which uses parity to retain redundant information. + + + name (optional, str, None) + Name of the volume to be created. + + Only applicable when *state* is ``present``. + + + drives (optional, list, None) + FQDD of the Physical disks. + + For example- Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1. + + Only applicable when *state* is ``present`` when creating a new volume. + + + block_size_bytes (optional, int, None) + Block size in bytes.Only applicable when *state* is ``present``. + + + capacity_bytes (optional, str, None) + Volume size in bytes. + + Only applicable when *state* is ``present``. + + + optimum_io_size_bytes (optional, int, None) + Stripe size value must be in multiples of 64 * 1024. + + Only applicable when *state* is ``present``. + + + encryption_types (optional, str, None) + The following encryption types can be selected. + + ``ControllerAssisted`` The volume is encrypted by the storage controller entity. + + ``NativeDriveEncryption`` The volume utilizes the native drive encryption capabilities of the drive hardware. + + ``SoftwareAssisted`` The volume is encrypted by the software running on the system or the operating system. + + Only applicable when *state* is ``present``. + + + encrypted (optional, bool, None) + Indicates whether volume is currently utilizing encryption or not. + + Only applicable when *state* is ``present``. + + + oem (optional, dict, None) + Includes OEM extended payloads. + + Only applicable when *state* is *present*. + + + initialize_type (optional, str, Fast) + Initialization type of existing volume. + + Only applicable when *command* is ``initialize``. + + + baseuri (True, str, None) + IP address of the target out-of-band controller. For example- :. + + + username (True, str, None) + Username of the target out-of-band controller. + + + password (True, str, None) + Password of the target out-of-band controller. + + + validate_certs (optional, bool, True) + If ``False``, the SSL certificates will not be validated. + + Configure ``False`` only on personally controlled sites where self-signed certificates are used. + + Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default. + + + ca_path (optional, path, None) + The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + + + timeout (optional, int, 30) + The socket level timeout in seconds. + + + + + +Notes +----- + +.. note:: + - Run this module from a system that has direct access to Redfish APIs. + - This module supports ``check_mode``. + - This module always reports changes when *name* and *volume_id* are not specified. Either *name* or *volume_id* is required to support ``check_mode``. + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + --- + - name: Create a volume with supported options + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + volume_type: "Mirrored" + name: "VD0" + controller_id: "RAID.Slot.1-1" + drives: + - Disk.Bay.5:Enclosure.Internal.0-1:RAID.Slot.1-1 + - Disk.Bay.6:Enclosure.Internal.0-1:RAID.Slot.1-1 + block_size_bytes: 512 + capacity_bytes: 299439751168 + optimum_io_size_bytes: 65536 + encryption_types: NativeDriveEncryption + encrypted: true + + - name: Create a volume with minimum options + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + controller_id: "RAID.Slot.1-1" + volume_type: "NonRedundant" + drives: + - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1 + + - name: Modify a volume's encryption type settings + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + volume_id: "Disk.Virtual.5:RAID.Slot.1-1" + encryption_types: "ControllerAssisted" + encrypted: true + + - name: Delete an existing volume + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + volume_id: "Disk.Virtual.5:RAID.Slot.1-1" + + - name: Initialize an existing volume + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "initialize" + volume_id: "Disk.Virtual.6:RAID.Slot.1-1" + initialize_type: "Slow" + + + +Return Values +------------- + +msg (always, str, Successfully submitted create volume task.) + Overall status of the storage configuration operation. + + +task (success, dict, AnsibleMapping([('id', 'JID_XXXXXXXXXXXXX'), ('uri', '/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXXX')])) + Returns ID and URI of the created task. + + +error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to perform configuration operations because a configuration job for the device already exists.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'IDRAC.1.6.STOR023'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'Wait for the current job for the device to complete or cancel the current job before attempting more configuration operations on the device.'), ('Severity', 'Informational')])]), ('code', 'Base.1.2.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))])) + Details of a http error. + + + + + +Status +------ + + + + + +Authors +~~~~~~~ + +- Sajna Shetty(@Sajna-Shetty) + diff --git a/ansible_collections/dellemc/openmanage/meta/execution-environment.yml b/ansible_collections/dellemc/openmanage/meta/execution-environment.yml new file mode 100644 index 00000000..5aa14625 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/meta/execution-environment.yml @@ -0,0 +1,5 @@ +--- +version: 1 +dependencies: + galaxy: requirements.yml + python: requirements.txt diff --git a/ansible_collections/dellemc/openmanage/meta/runtime.yml b/ansible_collections/dellemc/openmanage/meta/runtime.yml new file mode 100644 index 00000000..d550a6d3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/meta/runtime.yml @@ -0,0 +1,36 @@ +--- +requires_ansible: '>=2.10.0' +plugin_routing: + modules: + dellemc_get_firmware_inventory: + deprecation: + removal_date: "2023-01-15" + warning_text: dellemc_get_firmware_inventory will be removed in a future release of this collection. Use dellemc.openmanage.idrac_firmware_info instead. + dellemc_get_system_inventory: + deprecation: + removal_date: "2023-01-15" + warning_text: dellemc_get_system_inventory will be removed in a future release of this collection. Use dellemc.openmanage.idrac_system_info instead. + dellemc_configure_idrac_eventing: + deprecation: + removal_date: "2024-07-31" + warning_text: dellemc_configure_idrac_eventing will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead. + dellemc_configure_idrac_services: + deprecation: + removal_date: "2024-07-31" + warning_text: dellemc_configure_idrac_services will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead. + dellemc_idrac_lc_attributes: + deprecation: + removal_date: "2024-07-31" + warning_text: dellemc_idrac_lc_attributes will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead. + dellemc_system_lockdown_mode: + deprecation: + removal_date: "2024-07-31" + warning_text: dellemc_system_lockdown_mode will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead. + idrac_network: + deprecation: + removal_date: "2024-07-31" + warning_text: idrac_network will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead. + idrac_timezone_ntp: + deprecation: + removal_date: "2024-07-31" + warning_text: idrac_timezone_ntp will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead. \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml new file mode 100644 index 00000000..d8164065 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml @@ -0,0 +1,110 @@ +--- +- hosts: idrac + connection: local + name: iDRAC storage volume configuration. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Create single volume. + dellemc_idrac_storage_volume: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + state: "create" + controller_id: "RAID.Slot.1-1" + volumes: + - drives: + location: [5] + tags: + - create_single_volume + + - name: Create multiple volume. + dellemc_idrac_storage_volume: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + raid_reset_config: "True" + state: "create" + controller_id: "RAID.Slot.1-1" + volume_type: "RAID 1" + span_depth: 1 + span_length: 2 + number_dedicated_hot_spare: 1 + disk_cache_policy: "Enabled" + write_cache_policy: "WriteBackForce" + read_cache_policy: "ReadAhead" + stripe_size: 65536 + capacity: 100 + raid_init_operation: "Fast" + volumes: + - name: "volume_1" + drives: + id: ["Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1", + "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1"] + - name: "volume_2" + volume_type: "RAID 5" + span_length: 3 + span_depth: 1 + drives: + location: [7, 3, 5] + disk_cache_policy: "Disabled" + write_cache_policy: "WriteBack" + read_cache_policy: "NoReadAhead" + stripe_size: 131072 + capacity: "200" + raid_init_operation: "None" + tags: + - create_multiple_volume + + - name: Delete single volume. + dellemc_idrac_storage_volume: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + state: "delete" + volumes: + - name: "volume_1" + tags: + - delete_single_volume + + + - name: Delete multiple volume. + dellemc_idrac_storage_volume: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + state: "delete" + volumes: + - name: "volume_1" + - name: "volume_2" + tags: + - delete_multiple_volume + + - name: View specific volume details. + dellemc_idrac_storage_volume: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + state: "view" + controller_id: "RAID.Slot.1-1" + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + tags: + - view_specific_volume + + - name: View all volume details. + dellemc_idrac_storage_volume: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + state: "view" + tags: + - view_all_volume \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml new file mode 100644 index 00000000..c712288e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml @@ -0,0 +1,62 @@ +--- +- hosts: idrac + connection: local + name: Configure the iDRAC eventing attributes + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Setup iDRAC SMTP + dellemc_configure_idrac_eventing: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + smtp_ip_address: "0.0.0.0" + authentication: "Enabled" + username: "test" + password: "test" + + tags: + - idrac_smtp + + - name: Setup iDRAC SNMP Trap + dellemc_configure_idrac_eventing: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + snmp_trap_state: "Enabled" + destination_number: "2" + snmp_v3_username: "None" + destination: "1.1.1.1" + + tags: + - idrac_snmptrap + + - name: Setup iDRAC Email Alerts + dellemc_configure_idrac_eventing: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + email_alert_state: "Disabled" + address: "test@test.com" + alert_number: "1" + custom_message: "test" + + tags: + - idrac_email_alerts + + - name: Setup iDRAC Alerts + dellemc_configure_idrac_eventing: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + enable_alerts: "Disabled" + + tags: + - idrac_alerts \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml new file mode 100644 index 00000000..e0d4bbe8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml @@ -0,0 +1,46 @@ +--- +- hosts: idrac + connection: local + name: Configure the iDRAC services attributes + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Setup iDRAC Webserver + dellemc_configure_idrac_services: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + ssl_encryption: "T_168_Bit_or_higher" + tls_protocol: "TLS_1_0_and_Higher" + + tags: + - idrac_webserver + + - name: Setup iDRAC SNMP + dellemc_configure_idrac_services: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + snmp_enable: "Enabled" + snmp_protocol: "All" + + tags: + - idrac_snmp + + - name: Setup iDRAC SNMP settings + dellemc_configure_idrac_services: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + ipmi_lan: + community_name: public + alert_port: 161 + trap_format: SNMPv3 + tags: + - idrac-snmp-settings diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml new file mode 100644 index 00000000..ac4736c5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml @@ -0,0 +1,16 @@ +--- +- hosts: idrac + connection: local + name: Get Installed Firmware Inventory + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Get Installed Firmware Inventory + dellemc_get_firmware_inventory: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml new file mode 100644 index 00000000..085b14bf --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml @@ -0,0 +1,16 @@ +--- +- hosts: idrac + connection: local + name: Get system inventory + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Get system inventory + dellemc_get_system_inventory: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml new file mode 100644 index 00000000..51a06ad1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml @@ -0,0 +1,17 @@ +--- +- hosts: idrac + connection: local + name: Configure iDRAC CSIOR Setting + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure iDRAC CSIOR Setting + dellemc_idrac_lc_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + csior: "Enabled" diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml new file mode 100644 index 00000000..61260e3e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml @@ -0,0 +1,17 @@ +--- +- hosts: idrac + connection: local + name: Configure System lockdown mode + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure System lockdown mode + dellemc_system_lockdown_mode: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + lockdown_mode: "Disabled" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml new file mode 100644 index 00000000..9ee11728 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml @@ -0,0 +1,75 @@ +--- +- hosts: idrac + connection: local + name: Configure the iDRAC network attributes + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Register iDRAC on DNS + idrac_network: + idrac_ip: "{{idrac_ip}}" + idrac_user: "{{idrac_user}}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + register_idrac_on_dns: "Enabled" + dns_idrac_name: "idrac-3CZWCK2" + auto_config: "Enabled" + static_dns: "dell.com" + + tags: + - dns_register + + - name: Setup VLAN attributes + idrac_network: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + setup_idrac_nic_vlan: "Enabled" + + tags: + - setup_vlan + + - name: Setup iDRAC NIC + idrac_network: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + enable_nic: "Enabled" + nic_selection: "Dedicated" + failover_network: "T_None" + auto_detect: "Disabled" + auto_negotiation: "Enabled" + network_speed: "T_1000" + duplex_mode: "Full" + + tags: + - idrac_nic + + - name: Setup iDRAC IPv4 + idrac_network: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + enable_dhcp: "Enabled" + dns_from_dhcp: "Enabled" + enable_ipv4: "Enabled" + + tags: + - idrac_ipv4 + + - name: Setup iDRAC Static IPv4 + idrac_network: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + dns_from_dhcp: "Disabled" + + tags: + - idrac_staticipv4 \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml new file mode 100644 index 00000000..c5fe7791 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml @@ -0,0 +1,24 @@ +--- +- hosts: idrac + connection: local + name: Configure the iDRAC timezone attributes + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Setup iDRAC Timezone + idrac_timezone_ntp: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + setup_idrac_timezone: "Singapore" + enable_ntp: "Disabled" + ntp_server_1: "100.100.25.1" + ntp_server_2: "100.100.26.2" + ntp_server_3: "100.100.27.3" + + tags: + - idrac_timezone \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml new file mode 100644 index 00000000..9a362176 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml @@ -0,0 +1,155 @@ +--- +- hosts: idrac + connection: local + name: Dell OpenManage Ansible iDRAC Certificates management. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Update iDRAC attributes + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.AgentCommunity: Enabled + tags: idrac + + - name: Update System attributes + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + system_attributes: + ThermalSettings.1.ThermalProfile: Sound Cap + tags: system + + - name: Update Lifecycle Controller attributes + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + lifecycle_controller_attributes: + LCAttributes.1.AutoUpdate: Enabled + tags: lc + + - name: Configure the iDRAC attributes for email alert settings. + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + EmailAlert.1.CustomMsg: Display Message + EmailAlert.1.Enable: Enabled + EmailAlert.1.Address: test@test.com + tags: email-alerts + + - name: Configure the iDRAC attributes for SNMP alert settings. + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMPAlert.1.Destination: 192.168.0.2 + SNMPAlert.1.State: Enabled + SNMPAlert.1.SNMPv3Username: username + tags: snmp-alerts + + - name: Configure the iDRAC attributes for SMTP alert settings. + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + RemoteHosts.1.SMTPServerIPAddress: 192.168.0.3 + RemoteHosts.1.SMTPAuthentication: Enabled + RemoteHosts.1.SMTPPort: 25 + RemoteHosts.1.SMTPUserName: username + RemoteHosts.1.SMTPPassword: password + tags: smtp-alerts + + - name: Configure the iDRAC attributes for webserver settings. + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + WebServer.1.SSLEncryptionBitLength: 128-Bit or higher + WebServer.1.TLSProtocol: TLS 1.1 and Higher + tags: webserver-settings + + - name: Configure the iDRAC attributes for SNMP settings. + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.SNMPProtocol: All + SNMP.1.AgentEnable: Enabled + SNMP.1.TrapFormat: SNMPv1 + SNMP.1.AlertPort: 162 + SNMP.1.AgentCommunity: public + tags: snmp-settings + + - name: Configure the iDRAC LC attributes for collecting system inventory. + idrac_attributes: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + lifecycle_controller_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: Enabled + tags: collect-inventory + + - name: Configure the iDRAC system attributes for LCD settings. + idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + system_attributes: + LCD.1.Configuration: Service Tag + LCD.1.vConsoleIndication: Enabled + LCD.1.FrontPanelLocking: Full-Access + LCD.1.UserDefinedString: custom lcd string + tags: lcd-config + + - name: Configure the iDRAC attributes for Timezone settings. + idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + Time.1.TimeZone: CST6CDT + NTPConfigGroup.1.NTPEnable: Enabled + NTPConfigGroup.1.NTP1: 192.168.0.5 + NTPConfigGroup.1.NTP2: 192.168.0.6 + NTPConfigGroup.1.NTP3: 192.168.0.7 + tags: timezone-settings + + - name: Configure all attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.AgentCommunity: test + SNMP.1.AgentEnable: Enabled + SNMP.1.DiscoveryPort: 161 + system_attributes: + ServerOS.1.HostName: demohostname + lifecycle_controller_attributes: + LCAttributes.1.AutoUpdate: Disabled + tags: all-attributes diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml new file mode 100644 index 00000000..a541dce7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml @@ -0,0 +1,115 @@ +--- +- hosts: idrac + connection: local + name: Configure Boot Mode Setting + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure Bios Generic Attributes + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + attributes: + BootMode: "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + tags: + - bootconfig + + - name: Configure PXE Generic Attributes + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + attributes: + PxeDev1EnDis: "Enabled" + PxeDev1Protocol: "IPV4" + PxeDev1VlanEnDis: "Enabled" + PxeDev1VlanId: x + PxeDev1Interface: "NIC.Embedded.x-x-x" + PxeDev1VlanPriority: x + tags: + - pxeconfig + + - name: Configure attributes of the BIOS at Maintenance window + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + apply_time: AtMaintenanceWindowStart + maintenance_window: + start_time: "2022-09-30T05:15:40-05:00" + duration: 600 + attributes: + BootMode: "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + tags: + - at_maintenance_start + + - name: Clear pending BIOS attributes + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + clear_pending: yes + tags: + - clear_pending + + - name: Reset BIOS attributes to default settings. + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_pwd }}" + ca_path: "/path/to/ca_cert.pem" + reset_bios: yes + tags: + - reset_bios + + - name: Configure Boot Sources + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name: "NIC.Integrated.x-x-x" + Enabled: true + Index: 1 + - Name: "NIC.Integrated.x-x-x" + Enabled: true + Index: 0 + tags: + - boot_sources + + - name: Configure Boot Sources - Enabled + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name: "HardDisk.List.1-1" + Enabled: true + tags: + - boot_sources_enabled + + - name: Configure Boot Sources - Index + idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name: "NIC.Integrated.x-x-x" + Index: 1 + tags: + - boot_sources_index \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml new file mode 100644 index 00000000..22afb949 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml @@ -0,0 +1,69 @@ +--- +- hosts: idrac + connection: local + name: Configure the boot order settings + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + + - name: Configure the system boot options settings. + idrac_boot: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_options: + - display_name: Hard drive C + enabled: true + - boot_option_reference: NIC.PxeDevice.2-1 + enabled: true + tags: boot-option + + - name: Configure the boot order settings. + idrac_boot: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_order: + - Boot0001 + - Boot0002 + - Boot0004 + - Boot0003 + tags: boot-order + + - name: Configure the boot source override mode. + idrac_boot: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: legacy + boot_source_override_target: cd + boot_source_override_enabled: once + tags: boot-mode + + - name: Configure the UEFI target settings. + idrac_boot: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: uefi + boot_source_override_target: uefi_target + uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)" + tags: uefi-target + + - name: Configure the boot source override mode as pxe. + idrac_boot: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: legacy + boot_source_override_target: pxe + boot_source_override_enabled: continuous + tags: pxe-boot-mode diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml new file mode 100644 index 00000000..aa6d43ed --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml @@ -0,0 +1,56 @@ +--- +- hosts: idrac + connection: local + name: Dell OpenManage Ansible iDRAC boot operations. + vars: + ansible_python_interpreter: /usr/bin/python3 + virtual_media_uri: "/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia" + file_location: "192.168.0.1:/nfsshare/path/to/boot_image.iso" + nfs_dir: "192.168.0.1:/nfsshare" + iso_file: "boot_image.iso" + ca_path: "/path/to/ca_cert.pem" + boot_source_mode: "legacy" #other options are UEFI + + gather_facts: False + + tasks: + +# Mount the ISO image as a virtual media CD. + - name: "Insert virtual media" + ansible.builtin.uri: + url: "https://{{ idrac_ip }}{{ virtual_media_uri }}" + user: "{{ idrac_user }}" + password: "{{ idrac_password }}" + method: "POST" + body_format: json + body: + Image: "{{ file_location }}" + Inserted: true + WriteProtected: true + use_proxy: yes + status_code: 204 + return_content: no + ca_path: "{{ ca_path }}" + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + tags: + - virtual_media + - vm_boot + +# One-time boot with virtual media. + - name: Boot once from mounted CD. + dellemc.openmanage.idrac_boot: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "{{ ca_path }}" + boot_source_override_mode: "{{ boot_source_mode }}" + boot_source_override_target: cd + boot_source_override_enabled: once + tags: + - boot_cd + - vm_boot + +# Eject the virtual media after boot. diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml new file mode 100644 index 00000000..801f12ed --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml @@ -0,0 +1,69 @@ +--- +- hosts: idrac + connection: local + name: Dell OpenManage Ansible iDRAC Certificates management. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Generate https signing request + idrac_certificates: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + command: "generate_csr" + certificate_type: "HTTPS" + certificate_path: "/home/omam/mycert_dir" + cert_params: + common_name: "sample.domain.com" + organization_unit: "OrgUnit" + locality_name: "Bangalore" + state_name: "Karnataka" + country_code: "IN" + email_address: "admin@domain.com" + organization_name: "OrgName" + subject_alt_name: + - 192.198.2.1 + + - name: Import a SSL certificate. + idrac_certificates: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + command: "import" + certificate_type: "HTTPS" + certificate_path: "/path/to/cert.pem" + + - name: Export a SSL certificate. + idrac_certificates: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + command: "export" + certificate_type: "HTTPS" + certificate_path: "/home/omam/mycert_dir" + + - name: Import a CSC certificate. + idrac_certificates: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + command: "import" + certificate_type: "CSC" + certificate_file: "/path/to/cert.pem" + + - name: Export a Client trust certificate. + idrac_certificates: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + command: "export" + certificate_type: "CLIENT_TRUST_CERTIFICATE" + certificate_path: "/home/omam/mycert_dir" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml new file mode 100644 index 00000000..c1a2c891 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml @@ -0,0 +1,69 @@ +--- +- hosts: idrac + connection: local + name: Update Firmware Inventory + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Update firmware from repository on a HTTP/HTTP/FTP repository + idrac_firmware: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "https://downloads.dell.com" + reboot: True + job_wait: True + apply_update: True + + - name: Update firmware from repository on a internally hosted HTTP repository. + idrac_firmware: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password}}" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://192.168.0.1/path_to_folder/" + reboot: True + job_wait: True + apply_update: True + catalog_file_name: "Catalog.xml" + + - name: Update firmware from repository on a NFS Share + idrac_firmware: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password}}" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.1:/complete_share_path" + reboot: True + job_wait: True + apply_update: True + catalog_file_name: "Catalog.xml" + + - name: Update firmware from repository on a CIFS Share + idrac_firmware: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password}}" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.1\\share_path" + share_user: "{{ share_user }}" + share_password: "{{ share_password }}" + share_mnt: "/mnt/cifs_share" + reboot: False + job_wait: True + catalog_file_name: "Catalog.xml" + + - name: Firmware compliance report using HTTPS repository. + idrac_firmare: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "https://downloads.dell.com" + reboot: False + job_wait: True + apply_update: False diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml new file mode 100644 index 00000000..aaca53a5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml @@ -0,0 +1,16 @@ +--- +- hosts: idrac + connection: local + name: Get Installed Firmware Inventory + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Get Installed Firmware Inventory. + idrac_firmware_info: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml new file mode 100644 index 00000000..9f0f61de --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml @@ -0,0 +1,17 @@ +--- +- hosts: idrac + connection: local + name: Get LC job Status + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Get LC job Status + idrac_lifecycle_controller_job_status_info: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "JID_844222910040" diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml new file mode 100644 index 00000000..495e84a6 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml @@ -0,0 +1,28 @@ +--- +- hosts: idrac + connection: local + name: Delete LC job + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Delete LC job Queue + idrac_lifecycle_controller_jobs: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + tags: + - delete_all_jobs + + - name: Delete a LC job + idrac_lifecycle_controller_jobs: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "JID_123456789" + tags: + - delete_job \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml new file mode 100644 index 00000000..99c9d0ce --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml @@ -0,0 +1,18 @@ +--- +- hosts: idrac + connection: local + name: Export Lifecycle Controller Logs + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Export Lifecycle Controller Logs + idrac_lifecycle_controller_logs: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "{{ playbook_dir }}" + job_wait: "True" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml new file mode 100644 index 00000000..1798ab99 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml @@ -0,0 +1,16 @@ +--- +- hosts: idrac + connection: local + name: Check LC Ready Status + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Check LC Ready Status + idrac_lifecycle_controller_status_info: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml new file mode 100644 index 00000000..3ad52adc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml @@ -0,0 +1,22 @@ +--- +- hosts: idrac + connection: local + gather_facts: false + name: Booting to Network Operating System image + + collections: + - dellemc.openmanage + + tasks: + - name: "Booting to Network Operating System image" + idrac_os_deployment: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "{{ playbook_dir }}" + iso_image: "uninterrupted_os_installation_image.iso." + expose_duration: 180 + + tags: + - network_iso \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml new file mode 100644 index 00000000..2cb44788 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml @@ -0,0 +1,216 @@ +--- +- hosts: idrac + connection: local + name: Dell OpenManage Ansible iDRAC Redfish Storage Controller service. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Assign dedicated hot spare. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + volume_id: + - "Disk.Virtual.0:RAID.Slot.1-1" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - assign_dedicated_hot_spare + + - name: Assign global hot spare. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - assign_global_hot_spare + + - name: Unassign hot spare + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + command: UnassignSpare + tags: + - un-assign-hot-spare + + - name: Set controller encryption key. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "SetControllerKey" + controller_id: "RAID.Slot.1-1" + key: "PassPhrase@123" + key_id: "mykeyid123" + tags: + - set_controller_key + + - name: Rekey in LKM mode. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + key: "NewPassPhrase@123" + key_id: "newkeyid123" + old_key: "OldPassPhrase@123" + tags: + - rekey_lkm + + - name: Rekey in SEKM mode. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + mode: "SEKM" + tags: + - rekey_sekm + + - name: Remove controller key. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "RemoveControllerKey" + controller_id: "RAID.Slot.1-1" + tags: + - remove_controller_key + + - name: Reset controller configuration. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ResetConfig" + controller_id: "RAID.Slot.1-1" + tags: + - reset_config + + - name: Enable controller encryption + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "EnableControllerEncryption" + controller_id: "RAID.Slot.1-1" + mode: "LKM" + key: "your_Key@123" + key_id: "your_Keyid@123" + tags: + - enable-encrypt + + - name: Blink physical disk. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "BlinkTarget" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - blink-target + + - name: Blink virtual drive. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "BlinkTarget" + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + tags: + - blink-volume + + - name: Unblink physical disk. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "UnBlinkTarget" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - unblink-target + + - name: Unblink virtual drive. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "UnBlinkTarget" + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + tags: + - unblink-drive + + - name: Convert physical disk to RAID + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ConvertToRAID" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - convert-raid + + - name: Convert physical disk to non-RAID + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ConvertToNonRAID" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - convert-non-raid + + - name: Change physical disk state to online. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ChangePDStateToOnline" + target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - pd-state-online + + - name: Change physical disk state to offline. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ChangePDStateToOnline" + target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - pd-state-offline + + - name: Lock virtual drive + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "LockVirtualDisk" + volume_id: "Disk.Virtual.0:RAID.SL.3-1" + tags: + - lock diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml new file mode 100644 index 00000000..d61112f0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml @@ -0,0 +1,138 @@ +--- +- hosts: idrac + connection: local + name: iDRAC Redfish storage controller service with job tracking. + gather_facts: False + vars: + retries_count: 100 + polling_interval: 10 + all_ctrl_task_tags: + - assign_dedicated_hot_spare + - assign_global_hot_spare + - set_controller_key + - rekey_lkm + - rekey_sekm + - remove_controller_key + - reset_config + + collections: + - dellemc.openmanage + +# Use a single tag to run each task with job tracker + tasks: + - name: Assign dedicated hot spare. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + volume_id: + - "Disk.Virtual.0:RAID.Slot.1-1" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + register: result + tags: + - assign_dedicated_hot_spare + + - name: Assign global hot spare. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + register: result + tags: + - assign_global_hot_spare + + - name: Set controller encryption key. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "SetControllerKey" + controller_id: "RAID.Slot.1-1" + key: "PassPhrase@123" + key_id: "mykeyid123" + register: result + tags: + - set_controller_key + + - name: Rekey in LKM mode. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + key: "NewPassPhrase@123" + key_id: "newkeyid123" + old_key: "OldPassPhrase@123" + register: result + tags: + - rekey_lkm + + - name: Rekey in SEKM mode. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + mode: "SEKM" + register: result + tags: + - rekey_sekm + + - name: Remove controller key. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "RemoveControllerKey" + controller_id: "RAID.Slot.1-1" + register: result + tags: + - remove_controller_key + + - name: Reset controller configuration. + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "ResetConfig" + controller_id: "RAID.Slot.1-1" + register: result + tags: + - reset_config + + - name: "iDRAC Job tracking" + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: result + until: result.json.JobState == 'Completed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + tags: "{{ all_ctrl_task_tags }}" + + - name: "iDRAC job result." + set_fact: + job_details: "{{ result.json }}" + failed_when: result.json.Message == "Failed" + changed_when: result.json.Message != "Failed" + tags: "{{ all_ctrl_task_tags }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml new file mode 100644 index 00000000..209befd2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml @@ -0,0 +1,19 @@ +--- +- hosts: idrac + connection: local + name: Reset iDRAC + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Reset iDRAC + idrac_reset: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + + tags: + - idrac_reset \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml new file mode 100644 index 00000000..534b2227 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml @@ -0,0 +1,39 @@ +--- +- hosts: idrac + connection: local + name: Reset iDRAC + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Reset iDRAC + idrac_reset: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + register: result + failed_when: result is changed + + - name: Wait for port 443 to become open on the host + wait_for: + host: "{{idrac_ip}}" + port: 443 + delay: 30 + connect_timeout: 5 + timeout: 500 + register: result + failed_when: result.elapsed < 20 + + - name: Get LC status. + idrac_lifecycle_controller_status_info: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + register: result + until: result.msg.LCStatus == 'Ready' or result.msg.LCReady is true + retries: 30 + delay: 10 diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml new file mode 100644 index 00000000..0d61f54c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml @@ -0,0 +1,220 @@ +--- +- hosts: idrac + connection: local + name: Server Configuration Profile + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + + - name: Export SCP with IDRAC components in JSON format to a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + scp_components: IDRAC + scp_file: example_file + export_format: JSON + export_use: Clone + job_wait: True + tags: export-scp-local + + - name: Import SCP with IDRAC components in JSON format from a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + command: import + scp_components: "IDRAC" + scp_file: example_file.json + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: False + tags: import-scp-local + + - name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + scp_components: "BIOS" + export_format: XML + export_use: Default + job_wait: True + tags: export-scp-nfs + + - name: Import SCP with BIOS components in XML format from a NFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + command: import + scp_components: "BIOS" + scp_file: 192.168.0.1_20210618_162856.xml + shutdown_type: NoReboot + end_host_power_state: "Off" + job_wait: False + tags: import-scp-nfs + + - name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username@domain + share_password: share_password + share_mnt: /mnt/cifs + scp_file: example_file.xml + scp_components: "RAID" + export_format: XML + export_use: Default + job_wait: True + tags: export-scp-cifs + + - name: Import SCP with RAID components in XML format from a CIFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username + share_password: share_password + share_mnt: /mnt/cifs + command: import + scp_components: "RAID" + scp_file: example_file.xml + shutdown_type: Forced + end_host_power_state: "On" + job_wait: True + tags: import-scp-cifs + + - name: Export SCP with ALL components in JSON format to a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://192.168.0.3/share" + share_user: share_username + share_password: share_password + scp_file: example_file.json + scp_components: ALL + export_format: JSON + job_wait: False + tags: export-scp-http + + - name: Import SCP with ALL components in JSON format from a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + command: import + share_name: "http://192.168.0.3/share" + share_user: share_username + share_password: share_password + scp_file: example_file.json + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: True + tags: import-scp-http + + - name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "https://192.168.0.4/share" + share_user: share_username + share_password: share_password + scp_components: ALL + export_format: XML + export_use: Replace + job_wait: True + tags: export-scp-https + + - name: Import SCP with ALL components in XML format from a HTTPS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + command: import + share_name: "https://192.168.0.4/share" + share_user: share_username + share_password: share_password + scp_file: 192.168.0.1_20160618_164647.xml + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: False + tags: import-scp-https + + - name: Preview SCP with ALL components in XML format from a CIFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username + share_password: share_password + command: preview + scp_components: "ALL" + scp_file: example_file.xml + job_wait: True + tags: preview-scp-cifs + + - name: Preview SCP with ALL components in JSON format from a NFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + command: preview + scp_components: "IDRAC" + scp_file: example_file.xml + job_wait: True + tags: preview-scp-nfs + + - name: Preview SCP with ALL components in XML format from a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://192.168.0.1/http-share" + share_user: share_username + share_password: share_password + command: preview + scp_components: "ALL" + scp_file: example_file.xml + job_wait: True + tags: preview-scp-http + + - name: Preview SCP with ALL components in XML format from a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + command: preview + scp_components: "IDRAC" + scp_file: example_file.json + job_wait: False + tags: import-scp-local diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml new file mode 100644 index 00000000..9820b6b6 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml @@ -0,0 +1,18 @@ +--- +- hosts: idrac + connection: local + name: Configure iDRAC syslog attributes + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure iDRAC syslog attributes + idrac_syslog: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "{{ playbook_dir }}" + syslog: "Disabled" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml new file mode 100644 index 00000000..b2f1e1ec --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml @@ -0,0 +1,16 @@ +--- +- hosts: idrac + connection: local + name: Get system inventory + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Get system inventory. + idrac_system_info: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml new file mode 100644 index 00000000..ab011e13 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml @@ -0,0 +1,71 @@ +--- +- hosts: idrac + connection: local + name: Configure the iDRAC users attributes + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure the create iDRAC users attributes + idrac_user: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + user_name: "user_name" + user_password: "user_password" + privilege: "Administrator" + ipmi_lan_privilege: "User" + enable: "true" + sol_enable: "true" + protocol_enable: "true" + authentication_protocol: "MD5" + privacy_protocol: "DES" + tags: + - create-user + + - name: Configure the modify iDRAC users attributes + idrac_user: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + action: "present" + user_name: "user_name" + new_user_name: "new_user_name" + privilege: "Administrator" + ipmi_lan_privilege: "User" + enable: "true" + sol_enable: "true" + protocol_enable: "true" + authentication_protocol: "MD5" + privacy_protocol: "DES" + tags: + - modify-user + + - name: Configure the modify iDRAC username and password attributes. + idrac_user: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + action: "present" + user_name: "user_name" + new_user_name: "new_user_name" + user_password: "user_password" + tags: + - modify-username + + - name: Configure the delete iDRAC users attributes + idrac_user: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + user_name: "user_name" + tags: + - remove-user diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml new file mode 100644 index 00000000..9a2cc520 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml @@ -0,0 +1,107 @@ +--- +- hosts: idrac + connection: local + name: Configure the boot order settings + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + + - name: Insert image file to Remote File Share 1 using CIFS share. + idrac_virtual_media: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + virtual_media: + - insert: true + image: "//192.168.0.2/file_path/file.iso" + username: "username" + password: "password" + tags: insert-media-cifs + + - name: Insert image file to Remote File Share 2 using NFS share. + idrac_virtual_media: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + virtual_media: + - index: 2 + insert: true + image: "192.168.0.4:/file_path/file.iso" + tags: insert-media-nfs + + - name: Insert image file to Remote File Share 1 and 2 using HTTP. + idrac_virtual_media: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: "http://192.168.0.4/file_path/file.img" + - index: 2 + insert: true + image: "http://192.168.0.4/file_path/file.img" + tags: insert-media-http + + - name: Insert image file using HTTPS. + idrac_virtual_media: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: "https://192.168.0.5/file_path/file.img" + username: username + password: password + tags: insert-media-http + + - name: Eject multiple virtual media. + idrac_virtual_media: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + virtual_media: + - index: 1 + insert: false + - index: 2 + insert: false + tags: eject-media + + - name: Ejection of image file from Remote File Share 1. + idrac_virtual_media: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + insert: false + tags: eject-media-rfs1 + + - name: Insertion and ejection of image file in single task. + idrac_virtual_media: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: https://192.168.0.5/file/file.iso + username: username + password: password + - index: 2 + insert: false + tags: insert-eject-media diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml new file mode 100644 index 00000000..f77eabdd --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml @@ -0,0 +1,37 @@ +--- +- hosts: ome + connection: local + name: Configure the SMTP settings of OME and OME-M. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Update SMTP destination server configuration with authentication + ome_application_alerts_smtp: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + destination_address: "localhost" + port_number: 25 + use_ssl: true + enable_authentication: true + credentials: + username: "username" + password: "password" + tags: + - smtp_auth + - name: Update SMTP destination server configuration without authentication + ome_application_alerts_smtp: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + destination_address: "localhost" + port_number: 25 + use_ssl: false + enable_authentication: false + tags: + - smtp_no_auth \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml new file mode 100644 index 00000000..9fce647e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml @@ -0,0 +1,40 @@ +--- +- hosts: ome + connection: local + name: Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure single server to forward syslog + ome_application_alerts_syslog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + syslog_servers: + - id: 1 + enabled: true + destination_address: 192.168.0.2 + port_number: 514 + + - name: Configure multiple server to forward syslog + ome_application_alerts_syslog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + syslog_servers: + - id: 1 + port_number: 523 + - id: 2 + enabled: true + destination_address: sysloghost1.lab.com + - id: 3 + enabled: false + - id: 4 + enabled: true + destination_address: 192.168.0.4 + port_number: 514 \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml new file mode 100644 index 00000000..ab0fb9eb --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml @@ -0,0 +1,53 @@ +--- +- hosts: ome + connection: local + name: Dell OME Application Certificate Signing Request. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: generate certificate signing request. + ome_application_certificate: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "generate_csr" + distinguished_name: "hostname.com" + department_name: "Remote Access Group" + business_name: "Dell Inc." + locality: "Round Rock" + country_state: "Texas" + country: "US" + email: "support@dell.com" + register: result + tags: + - generate + + - name: copy CSR data into a file. + ansible.builtin.copy: + content: "{{ result.csr_status.CertificateData }}" + dest: "csr_data.txt" + tags: + - csr-data + + - name: upload the certificate. + ome_application_certificate: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "upload" + upload_file: "/path/certificate.cer" + tags: + - upload + + - name: "once certificate uploaded, OME cannot be accessed for few seconds, hence wait for 10 seconds." + wait_for: + host: "{{ hostname }}" + port: "{{ port }}" + delay: 10 + tags: + - upload diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml new file mode 100644 index 00000000..b0b29ae9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml @@ -0,0 +1,97 @@ +--- +- hosts: ome + connection: local + name: Dell OME Application Console Preferences. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Update Console preferences with all the settings. + ome_application_console_preferences: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + report_row_limit: 123 + device_health: + health_check_interval: 1 + health_check_interval_unit: "Hourly" + health_and_power_state_on_connection_lost: "last_known" + discovery_settings: + general_device_naming: "DNS" + server_device_naming: "IDRAC_HOSTNAME" + invalid_device_hostname: "localhost" + common_mac_addresses: "::" + server_initiated_discovery: + device_discovery_approval_policy: "Automatic" + set_trap_destination: True + mx7000_onboarding_preferences: "all" + builtin_appliance_share: + share_options: "CIFS" + cifs_options: "V1" + email_sender_settings: "admin@dell.com" + trap_forwarding_format: "Original" + metrics_collection_settings: 31 + tags: + - all_settings + + - name: Update Console preferences with report and device health settings. + ome_application_console_preferences: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + report_row_limit: 236 + device_health: + health_check_interval: 10 + health_check_interval_unit: "Hourly" + health_and_power_state_on_connection_lost: "last_known" + tags: + - valid_report_device + + - name: Update Console preferences with invalid device health settings. + ome_application_console_preferences: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_health: + health_check_interval: 65 + health_check_interval_unit: "Minutes" + tags: + - invalid_device + + - name: Update Console preferences with discovery and built in appliance share settings. + ome_application_console_preferences: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + discovery_settings: + general_device_naming: "DNS" + server_device_naming: "IDRAC_SYSTEM_HOSTNAME" + invalid_device_hostname: "localhost" + common_mac_addresses: "00:53:45:00:00:00" + builtin_appliance_share: + share_options: "CIFS" + cifs_options: "V1" + tags: + - valid_discovery + + - name: Update Console preferences with server initiated discovery, mx7000 onboarding preferences, email sender, trap forwarding format, and metrics collection settings. + ome_application_console_preferences: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + server_initiated_discovery: + device_discovery_approval_policy: "Automatic" + set_trap_destination: True + mx7000_onboarding_preferences: "chassis" + email_sender_settings: "admin@dell.com" + trap_forwarding_format: "Normalized" + metrics_collection_settings: 361 + tags: + - valid_metrics diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml new file mode 100644 index 00000000..3eff08bc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml @@ -0,0 +1,115 @@ +--- +- hosts: ome + connection: local + name: Dell OME Application network settings. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: IPv4 network settings + ome_application_network_address: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + ipv4_configuration: + enable: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: "" + reboot_delay: 5 + tags: + - ipv4_config + + - name: IPv6 network settings + ome_application_network_address: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + ipv6_configuration: + enable: true + enable_auto_configuration: true + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2 + use_dhcp_for_dns_server_names: true + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + reboot_delay: 10 + tags: + - ipv6_config + + - name: Management vLAN settings for primary interface + ome_application_network_address: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + management_vlan: + enable_vlan: true + vlan_id: 3344 + dns_configuration: + register_with_dns: false + reboot_delay: 1 + tags: + - mgmt_vlan + + - name: DNS settings + ome_application_network_address: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + ipv4_configuration: + enable: true + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "localdomainname" + reboot_delay: 1 + tags: + - dns_config + + - name: Complete network settings + ome_application_network_address: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + ipv4_configuration: + enable: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable: true + enable_auto_configuration: true + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2 + use_dhcp_for_dns_server_names: true + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "localdomainname" + reboot_delay: 1 + tags: + - all_network_config \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml new file mode 100644 index 00000000..1f4cf709 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml @@ -0,0 +1,65 @@ +--- +- hosts: ome + vars: + retries_count: 50 + polling_interval: 5 # in seconds + connection: local + name: OME - Complete network settings with details tracking + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Complete network settings + ome_application_network_address: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + ipv4_configuration: + enable: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable: true + enable_auto_configuration: true + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2 + use_dhcp_for_dns_server_names: true + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "localdomainname" + reboot_delay: 1 + register: facts_result + + # To end play when no job_info + - name: "End the play when no job_info" + meta: end_play + when: + - facts_result.changed == false + - "'job_info' not in facts_result" + + - name: "Get job details using job id from network address config task." + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ facts_result.job_info.Id }}" + register: job_result + failed_when: job_result.job_info.LastRunStatus.Name == 'Failed' + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml new file mode 100644 index 00000000..0c0e8abf --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml @@ -0,0 +1,44 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible Application network proxy setting. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Update proxy configuration and enable authentication. + ome_application_network_proxy: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: true + ip_address: "192.168.0.2" + proxy_port: 444 + enable_authentication: true + proxy_username: "root" + proxy_password: "proxy_password" + tags: setting1 + + - name: Reset proxy authentication. + ome_application_network_proxy: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: true + ip_address: "192.168.0.2" + proxy_port: 444 + enable_authentication: false + tags: setting2 + + - name: Reset proxy configuration. + ome_application_network_proxy: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: false + tags: setting3 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml new file mode 100644 index 00000000..68340ba9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml @@ -0,0 +1,73 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible Application network setting. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure universal inactivity timeout + ome_application_network_settings: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + enable_universal_timeout: true + universal_timeout: 30 + api_sessions: 90 + gui_sessions: 5 + ssh_sessions: 2 + serial_sessions: 1 + tags: + - enable_universal_timeout + - name: Configure API and GUI timeout and sessions + ome_application_network_settings: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + api_timeout: 20 + api_sessions: 100 + gui_timeout: 25 + gui_sessions: 5 + tags: + - enable_api_gui_timout_sessions + - name: Configure timeout and sessions for all parameters + ome_application_network_settings: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + api_timeout: 20 + api_sessions: 100 + gui_timeout: 15 + gui_sessions: 5 + ssh_timeout: 30 + ssh_sessions: 2 + serial_timeout: 35 + serial_sessions: 1 + tags: + - enable_all_timeout_sessions + - name: Disable universal timeout and configure timeout and sessions for other parameters + ome_application_network_settings: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + enable_universal_timeout: false + api_timeout: 20 + api_sessions: 100 + gui_timeout: 15 + gui_sessions: 5 + ssh_timeout: 30 + ssh_sessions: 2 + serial_timeout: 35 + serial_sessions: 1 + tags: + - disa_all_timeout_sessions \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml new file mode 100644 index 00000000..7dd4edad --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml @@ -0,0 +1,33 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible Application network time setting. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure system time. + ome_application_network_time: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + enable_ntp: false + system_time: "2020-03-31 21:35:18" + time_zone: "TZ_ID_11" + tags: time_setting1 + + - name: Configure NTP server for time synchronization. + ome_application_network_time: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + time_zone: "TZ_ID_66" + enable_ntp: true + primary_ntp_address: "192.168.0.2" + secondary_ntp_address1: "192.168.0.3" + secondary_ntp_address2: "192.168.0.4" + tags: time_setting2 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml new file mode 100644 index 00000000..a57e0b90 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml @@ -0,0 +1,31 @@ +--- +- hosts: ome + connection: local + gather_facts: false + name: "Ome application network time zone informaion - Ansible Module" + vars: + time_zone_uri: "/api/ApplicationService/Network/TimeZones" + + collections: + - dellemc.openmanage + + tasks: + - name: "Get list of all available times zones along with information specific to each time zone." + uri: + url: "https://{{ baseuri }}{{ time_zone_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200 + validate_certs: no + force_basic_auth: yes + register: time_zone_result + failed_when: "'value' not in time_zone_result.json" + + - name: Get specific time zone ID using time zone name + with_items: + - "{{ time_zone_result.json.value }}" + debug: + msg: "{{item['Id']}}" + when: item['Name']=='(GMT+05:30) Sri Jayawardenepura' diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml new file mode 100644 index 00000000..e445ed84 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml @@ -0,0 +1,40 @@ +--- +- hosts: ome + connection: local + name: Dell OME Application network webserver settings. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Update webserver port and session time out configuration. + ome_application_network_webserver: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + webserver_port: 443 + webserver_timeout: 10 + tags: + - port_timeout_update + + - name: Update session time out + ome_application_network_webserver: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + webserver_timeout: 30 + tags: + - timeout_update + + - name: Update web server port. + ome_application_network_webserver: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + webserver_port: 8443 + tags: + - port_update \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml new file mode 100644 index 00000000..28911b80 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml @@ -0,0 +1,61 @@ +--- +- hosts: ome + connection: local + name: "Dell OME Application network webserver port change and track web + server till the service restarts." + gather_facts: False + vars: + # 5 minutes wait max + retries_count: 30 + polling_interval: 10 + webserver_uri: "/api/ApplicationService/Network/WebServerConfiguration" + + collections: + - dellemc.openmanage + + tasks: + # Update web server configuration + - name: Update webserver port and timeout of OME + ome_application_network_webserver: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + port: "{{ ome_webserver_port }}" + webserver_port: "{{ new_port }}" + webserver_timeout: 21 + register: result + + # To end play when no port change or failure + - name: "End the play when no port change" + meta: end_play + when: + - result.changed == false + - "'webserver_configuration' not in result" + + # Loop till OME webserver is active by using the new port and webserver config GET call + - name: "Pause play until webserver URL is reachable from this host with new port" + uri: + url: "https://{{ hostname }}:{{ result.webserver_configuration.PortNumber + }}{{ webserver_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: webport_result + until: "'PortNumber' in webport_result or webport_result.status == 200" + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + # Output the webserver_configuration values to be used further + - name: "Output the webserver config" + vars: + webserver_configuration: "{{ webport_result.json }}" + debug: + var: webserver_configuration \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml new file mode 100644 index 00000000..6a259e96 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml @@ -0,0 +1,57 @@ +--- +- hosts: ome + connection: local + name: Configure login security settings + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Configure restricted allowed IP range + ome_application_security_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + restrict_allowed_ip_range: + enable_ip_range: true + ip_range: 192.1.2.3/24 + + - name: Configure login lockout policy + ome_application_security_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + login_lockout_policy: + by_user_name: true + by_ip_address: true + lockout_fail_count: 3 + lockout_fail_window: 30 + lockout_penalty_time: 900 + + - name: Configure restricted allowed IP range and login lockout policy with job wait time out of 60 seconds + ome_application_security_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + restrict_allowed_ip_range: + enable_ip_range: true + ip_range: 192.1.2.3/24 + login_lockout_policy: + by_user_name: true + by_ip_address: true + lockout_fail_count: 3 + lockout_fail_window: 30 + lockout_penalty_time: 900 + job_wait_timeout: 60 + + - name: Enable FIPS mode + ome_application_security_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + fips_mode_enable: yes diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml new file mode 100644 index 00000000..1d5f2375 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml @@ -0,0 +1,119 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible configuration compliance baseline. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Create a configuration compliance baseline using device IDs + ome_configuration_compliance_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + description: "description of baseline" + names: "baseline1" + device_ids: + - 1111 + - 2222 + tags: + - create_compliance_baseline_device_id + + - name: Create a configuration compliance baseline using device service tags + ome_configuration_compliance_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + names: "baseline1" + description: "description of baseline" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + tags: + - create_compliance_baseline_tags + + - name: Create a configuration compliance baseline using group names + ome_configuration_compliance_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + job_wait_timeout: 1000 + names: "baseline1" + description: "description of baseline" + device_group_names: + - "Group1" + - "Group2" + tags: + - create_compliance_baseline_group_id + + - name: Delete the configuration compliance baselines + ome_configuration_compliance_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: delete + names: + - baseline1 + - baseline2 + tags: + - delete_compliance_baseline + + - name: Modify a configuration compliance baseline using group names + ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + names: "baseline1" + new_name: "baseline_update" + template_name: "template2" + description: "new description of baseline" + job_wait_timeout: 1000 + device_group_names: + - Group1 + + - name: Remediate specific non-compliant devices to a configuration compliance baseline using device IDs + ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + device_ids: + - 1111 + + - name: Remediate specific non-compliant devices to a configuration compliance baseline using device service tags + ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + job_wait_timeout: 2000 + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + + - name: Remediate all the non-compliant devices to a configuration compliance baseline + ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + job_wait_timeout: 2000 + names: "baseline1" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml new file mode 100644 index 00000000..076ce84d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml @@ -0,0 +1,52 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible configuration compliance baseline workflow. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + + - name: Create a configuration compliance baseline using group names + ome_configuration_compliance_baseline: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + job_wait_timeout: 1000 + names: "baseline1" + description: "description of baseline" + device_group_names: + - "Group1" + - "Group2" + + - name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline. + ome_configuration_compliance_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline: "baseline1" + register: compliance_report + + # This tasks returns list of device ids. + # In case if you want to get devices based on service tag change attribute ServiceTag + # and next task device_ids attribute replaced with device_service_tag. + - name: Filter the non compliant device based on the retrieved compliance report. + ansible.builtin.set_fact: + non_compliance_devices: "{{ compliance_report.compliance_info | json_query(\"value[?ComplianceStatus=='NONCOMPLIANT']\") | map(attribute='Id') | list }}" + + - name: Remediate a specified non-complaint devices to a configuration compliance baseline using device IDs + ome_configuration_compliance_baseline: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + device_ids: "{{ non_compliance_devices }}" + when: "non_compliance_devices|length>0" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml new file mode 100644 index 00000000..a2455703 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml @@ -0,0 +1,35 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible Module for Device compliance information + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline. + ome_configuration_compliance_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + + - name: Retrieve the compliance report for a specific device associated with the baseline using the device ID. + ome_configuration_compliance_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + device_id: 10001 + + - name: Retrieve the compliance report for a specific device associated with the baseline using the device service tag. + ome_configuration_compliance_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + device_service_tag: 2HFGH3 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml new file mode 100644 index 00000000..48259af6 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml @@ -0,0 +1,26 @@ +--- +- hosts: ome + connection: local + gather_facts: false + name: "OME - Ansible Modules" + + collections: + - dellemc.openmanage + + tasks: + + - name: "Retrieve baseline information for specific baseline." + ome_firmware_baseline_compliance_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + register: result + + - name: "Filter out device compliance reports." + loop: "{{ result.baseline_compliance_info }}" + debug: + msg: "{{item.ComponentComplianceReports}}" + loop_control: + label: "{{ item.DeviceId }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml new file mode 100644 index 00000000..77d4eddf --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml @@ -0,0 +1,28 @@ +--- +- hosts: ome + connection: local + gather_facts: false + name: "OME - Ansible Modules" + + collections: + - dellemc.openmanage + + tasks: + + - name: "Retrieve baseline information for specified devices." + ome_firmware_baseline_compliance_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_ids: + - 11111 + - 22222 + register: result + + - name: "Filter out device compliance reports." + debug: + msg: "{{ item.DeviceComplianceReports.0.ComponentComplianceReports }}" + loop: "{{ result.baseline_compliance_info }}" + loop_control: + label: "{{ item.Name }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml new file mode 100644 index 00000000..35f0eb23 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml @@ -0,0 +1,75 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible OME firmware baseline operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Create baseline for device IDs + ome_firmware_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_ids: + - 1010 + - 2020 + + - name: Create baseline for servicetags + ome_firmware_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + + - name: create baseline for device groups without job_tracking + ome_firmware_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_group_names: + - "Group1" + - "Group2" + job_wait: no + + - name: Modify an existing baseline + ome_firmware_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "existing_baseline_name" + new_baseline_name: "new_baseline_name" + baseline_description: "new baseline_description" + catalog_name: "catalog_other" + device_group_names: + - "Group3" + - "Group4" + - "Group5" + downgrade_enabled: no + is_64_bit: yes + + - name: Delete a baseline + ome_firmware_baseline: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: absent + baseline_name: "baseline_name" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml new file mode 100644 index 00000000..cb42e174 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml @@ -0,0 +1,51 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible template inventory details. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieves device based compliance report for specified device IDs.. + ome_firmware_baseline_compliance_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_ids: + - 11111 + - 22222 + tags: device_ids + + - name: Retrieves device based compliance report for specified service Tags. + ome_firmware_baseline_compliance_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_service_tags: + - MXL1234 + - MXL4567 + tags: device_service_tags + + - name: Retrieves device based compliance report for specified group names. + ome_firmware_baseline_compliance_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_group_names: + - group1 + - group2 + tags: device_group_names + + - name: Retrieves device compliance report for a specified baseline. + ome_firmware_baseline_compliance_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + tags: baseline_device \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml new file mode 100644 index 00000000..bbbf5f0d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml @@ -0,0 +1,63 @@ +--- +- hosts: ome + connection: local + gather_facts: false + name: "OME - Ansible Modules" + + collections: + - dellemc.openmanage + + tasks: + + - name: "Retrieve baseline information for specific device ids." + ome_firmware_baseline_compliance_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_ids: + - 11111 + - 11112 + register: result + + tags: + - overall-compliance-report + + - name: "Firmware baseline compliance info based on FirmwareStatus - Non-Compliant" + set_fact: + non_compliance_fact: "{{ item }}" + when: + - item.DeviceComplianceReports.0.FirmwareStatus=='Non-Compliant' + with_items: + - "{{ result.baseline_compliance_info }}" + loop_control: + label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.FirmwareStatus }}" + + tags: + - non-compliance-report + + - name: "Firmware baseline compliance info based on Device ID" + set_fact: + device_fact: "{{ item }}" + when: + - item.DeviceComplianceReports.0.DeviceId==11111 + with_items: + - "{{ result.baseline_compliance_info }}" + loop_control: + label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.DeviceId }}" + + tags: + - device-id-report + + - name: "Firmware baseline compliance info based on Device Service Tag" + set_fact: + service_tag_fact: "{{ item }}" + when: + - item.DeviceComplianceReports.0.ServiceTag=='1X1X1' + with_items: + - "{{ result.baseline_compliance_info }}" + loop_control: + label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.ServiceTag }}" + + tags: + - device-service-tag-report \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml new file mode 100644 index 00000000..7993db51 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml @@ -0,0 +1,26 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible firmware baseline details. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieve details of all the available firmware baselines. + ome_firmware_baseline_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + tags: firmware_baselines + + - name: Retrieve details of a specific firmware baseline identified by its baseline name. + ome_firmware_baseline_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + tags: firmware_baseline \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml new file mode 100644 index 00000000..a065a3c0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml @@ -0,0 +1,121 @@ +--- +- hosts: ome + connection: local + name: "OME - Create Catalog using Repository." + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Create a catalog from HTTPS repository + ome_firmware_catalog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + catalog_name: catalog1 + catalog_description: catalog description + source: downloads.company.com + repository_type: HTTPS + source_path: "catalog" + file_name: "catalog.gz" + check_certificate: True + + - name: Create a catalog from HTTP repository + ome_firmware_catalog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "{{ catalog_name }}" + catalog_description: catalog description + source: downloads.company.com + repository_type: HTTP + source_path: "catalog" + file_name: "catalog.gz" + + - name: Create a catalog using CIFS share + ome_firmware_catalog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "{{ catalog_name }}" + catalog_description: catalog description + source: "192.166.0.1" + repository_type: CIFS + source_path: "cifs/R940" + file_name: "catalog.gz" + repository_username: "{{ repository_username }}" + repository_password: "{{ repository_password }}" + repository_domain: "{{ repository_domain }}" + + - name: Create a catalog using NFS share + ome_firmware_catalog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "{{ catalog_name }}" + catalog_description: catalog description + source: "192.166.0.2" + repository_type: NFS + source_path: "/nfs/R940" + file_name: "catalog.xml" + + - name: Create a catalog using repository from Dell.com + ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "DELL_ONLINE" + check_certificate: True + + - name: Modify a catalog using a repository from CIFS share + ome_firmware_catalog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "{{ catalog_name }}" + catalog_description: new catalog description + source: "192.166.0.2" + repository_type: CIFS + source_path: "cifs/R941" + file_name: "catalog1.gz" + repository_username: "{{ repository_username }}" + repository_password: "{{ repository_password }}" + repository_domain: "{{ repository_domain }}" + + - name: Modify a catalog using a repository from Dell.com + ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_id: 10 + repository_type: DELL_ONLINE + new_catalog_name: "new_catalog_name" + catalog_description: "new_catalog_description" + + - name: Delete catalog using catalog name + ome_firmware_catalog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: absent + catalog_name: ["catalog_name1", "catalog_name2"] + + - name: Delete catalog using catalog id + ome_firmware_catalog: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: absent + catalog_id: [11, 34] \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml new file mode 100644 index 00000000..198e2cce --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml @@ -0,0 +1,142 @@ +--- +- hosts: ome + connection: local + name: "OME - Update Firmware" + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Update firmware from a DUP file using a device ids + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 11111 + - 22222 + dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE" + + - name: Update firmware from a DUP file using a device service tags + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - KLBR111 + - KLBR222 + dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE" + + - name: Update firmware from a DUP file using a device group names + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_group_names: + - servers + dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE" + + - name: Update firmware using baseline name + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + + - name: Stage firmware for the next reboot using baseline name + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + schedule: StageForNextReboot + + - name: Update firmware using baseline name and components + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + components: + - BIOS + + - name: Update firmware of device components from a DUP file using a device ids in a baseline + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + device_id: + - 11111 + - 22222 + components: + - iDRAC with Lifecycle Controller + + - name: Update firmware of device components from a baseline using a device service tags under a baseline + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + device_service_tag: + - KLBR111 + - KLBR222 + components: + - IOM-SAS + + - name: Update firmware using baseline name with a device id and required components + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - id: 12345 + components: + - Lifecycle Controller + - id: 12346 + components: + - Enterprise UEFI Diagnostics + - BIOS + + - name: Update firmware using baseline name with a device service tag and required components + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - service_tag: ABCDE12 + components: + - PERC H740P Adapter + - BIOS + - service_tag: GHIJK34 + components: + - OS Drivers Pack + + - name: Update firmware using baseline name with a device service tag or device id and required components + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - service_tag: ABCDE12 + components: + - BOSS-S1 Adapter + - PowerEdge Server BIOS + - id: 12345 + components: + - iDRAC with Lifecycle Controller diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml new file mode 100644 index 00000000..c104f3f5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml @@ -0,0 +1,111 @@ +--- +- hosts: ome + connection: local + name: "OME - Update Firmware" + gather_facts: False + vars: + retries_count: 100 + polling_interval: 10 + all_firmware_task_tags: + - device-ids + - service-tags + - group-name + - baseline-name + - baseline-name-dup + + collections: + - dellemc.openmanage + + tasks: + - name: "Update firmware from a DUP file using a device ids." + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 11111 + - 22222 + dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE" + register: result + tags: + - device-ids + + - name: "Update firmware from a DUP file using a device service tags." + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - KLBR111 + - KLBR222 + dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE" + register: result + tags: + - service-tags + + - name: "Update firmware from a DUP file using a device group names." + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_group_names: + - servers + dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE" + register: result + tags: + - group-name + + - name: "Update firmware using baseline name." + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + register: result + tags: + - baseline-name + + - name: "Update firmware from a DUP file using a baseline names." + ome_firmware: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_devices, baseline_groups" + dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE" + tags: + - baseline-name-dup + + - name: "Track job details for the ome firmware update operation using a job id." + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ result.update_status.Id }}" + register: job_result + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' or job_result.job_info.LastRunStatus.Name == 'Warning' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + tags: "{{ all_firmware_task_tags }}" + + - name: "Set job fact details if the task status is warning." + set_fact: + ome_firmware_job_fact: "{{ job_result | combine(job_msg, recursive=true) }}" + failed_when: job_result.job_info.LastRunStatus.Name == 'Warning' + vars: + job_msg: {'msg': 'Completed with {{ job_result.job_info.LastRunStatus.Name|lower}}'} + when: job_result.job_info.LastRunStatus.Name == 'Warning' + tags: "{{ all_firmware_task_tags }}" + + - name: "Set job fact details if the task status is completed or failed." + set_fact: + ome_firmware_job_fact: "{{ job_result }}" + failed_when: job_result.job_info.LastRunStatus.Name == 'Failed' + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + when: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + tags: "{{ all_firmware_task_tags }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml new file mode 100644 index 00000000..16011809 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml @@ -0,0 +1,72 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible Active Directory service configuration. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Add Active Directory service using DNS lookup along with the test connection + ome_active_directory: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: my_ad1 + domain_server: + - domainname.com + group_domain: domainname.com + test_connection: yes + domain_username: user@domainname + domain_password: domain_password + + - name: Add Active Directory service using IP address of the domain controller with certificate validation + ome_active_directory: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + domain_controller_lookup: MANUAL + domain_server: + - 192.68.20.181 + group_domain: domainname.com + validate_certificate: yes + certificate_file: "/path/to/certificate/file.cer" + + - name: Modify domain controller IP address, network_timeout and group_domain + ome_active_directory: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + domain_controller_lookup: MANUAL + domain_server: + - 192.68.20.189 + group_domain: newdomain.in + network_timeout: 150 + + - name: Delete Active Directory service + ome_active_directory: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + state: absent + + - name: Test connection to existing Active Directory service with certificate validation + ome_active_directory: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + test_connection: yes + domain_username: user@domainname + domain_password: domain_password + validate_certificate: yes + certificate_file: "/path/to/certificate/file.cer" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml new file mode 100644 index 00000000..0099fc80 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml @@ -0,0 +1,65 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible slot name configuration. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Rename the slots in multiple chassis using slot number and chassis service tag. + ome_chassis_slots: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + slot_options: + - chassis_service_tag: ABC1234 + slots: + - slot_number: 1 + slot_name: sled_name_1 + - slot_number: 2 + slot_name: sled_name_2 + - chassis_service_tag: ABC1235 + slots: + - slot_number: 1 + slot_name: sled_name_1 + - slot_number: 2 + slot_name: sled_name_2 + + - name: Rename single slot name of the sled using sled ID + ome_chassis_slots: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_id: 10054 + slot_name: slot_device_name_1 + + - name: Rename single slot name of the sled using sled service tag + ome_chassis_slots: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_service_tag: ABC1234 + slot_name: service_tag_slot + + - name: Rename multiple slot names of the devices + ome_chassis_slots: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_id: 10054 + slot_name: sled_name_1 + - device_service_tag: ABC1234 + slot_name: sled_name_2 + - device_id: 10055 + slot_name: sled_name_3 + - device_service_tag: PQR1234 + slot_name: sled_name_4 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml new file mode 100644 index 00000000..d7af342a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml @@ -0,0 +1,167 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible device inventory details. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Add devices to a static device group by using the group name and device IDs + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + device_ids: + - 11111 + - 11112 + tags: device-id + + - name: Add devices to a static device group by using the group name and device service tags + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + device_service_tags: + - GHRT2R + - KJHDF3 + tags: device-service-tags + + - name: Add devices to a static device group by using the group ID and device service tags + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + device_service_tags: + - GHRT2R + - KJHDF3 + tags: group_id_device-service-tags + + - name: Add devices to a static device group by using the group name and IPv4 addresses + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + ip_addresses: + - 192.35.0.1 + - 192.35.0.5 + tags: group_name_ipv4 + + - name: Add devices to a static device group by using the group ID and IPv6 addresses + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + ip_addresses: + - fe80::ffff:ffff:ffff:ffff + - fe80::ffff:ffff:ffff:2222 + tags: group_id_ipv6 + + - name: Add devices to a static device group by using the group ID and supported IPv4 and IPv6 address formats. + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + ip_addresses: + - 192.35.0.1 + - 10.36.0.0-192.36.0.255 + - 192.37.0.0/24 + - fe80::ffff:ffff:ffff:ffff + - ::ffff:192.0.2.0/125 + - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff + tags: group_id_ipv4_ipv6 + + - name: Remove devices from a static device group by using the group name and device IDs + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + device_ids: + - 11111 + - 11112 + tags: device-id + + - name: Remove devices from a static device group by using the group name and device service tags + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + device_service_tags: + - GHRT2R + - KJHDF3 + tags: device-service-tags + + - name: Remove devices from a static device group by using the group ID and device service tags + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + device_service_tags: + - GHRT2R + - KJHDF3 + tags: group_id_device-service-tags + + - name: Remove devices from a static device group by using the group name and IPv4 addresses + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + ip_addresses: + - 192.35.0.1 + - 192.35.0.5 + tags: group_name_ipv4 + + - name: Remove devices from a static device group by using the group ID and IPv6 addresses + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + ip_addresses: + - fe80::ffff:ffff:ffff:ffff + - fe80::ffff:ffff:ffff:2222 + tags: group_id_ipv6 + + - name: Remove devices from a static device group by using the group ID and supported IPv4 and IPv6 address formats. + ome_device_group: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + ip_addresses: + - 192.35.0.1 + - 10.36.0.0-192.36.0.255 + - 192.37.0.0/24 + - fe80::ffff:ffff:ffff:ffff + - ::ffff:192.0.2.0/125 + - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff + tags: group_id_ipv4_ipv6 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml new file mode 100644 index 00000000..6b307749 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml @@ -0,0 +1,79 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible device inventory details. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieve basic inventory of all devices. + ome_device_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering. + ome_device_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "basic_inventory" + system_query_options: + filter: "Id eq 33333 or Id eq 11111" + + - name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222. + ome_device_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + - 22222 + + - name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567. + ome_device_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + + - name: Retrieve details of specified inventory type of specified devices identified by ID and service tags. + ome_device_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + device_service_tag: + - MXL1234 + - MXL4567 + inventory_type: "serverDeviceCards" + + - name: Retrieve subsystem health of specified devices identified by service tags. + ome_device_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "subsystem_health" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + + diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml new file mode 100644 index 00000000..6f282c8a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml @@ -0,0 +1,68 @@ +--- +- hosts: ome + connection: local + name: OpenManage Ansible Modules for local access settings. + gather_facts: false + collections: dellemc.openmanage + + tasks: + + - name: Configure KVM, direct access and power button settings of the chassis using device ID. + ome_device_local_access_configuration: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + enable_kvm_access: true + enable_chassis_direct_access: false + chassis_power_button: + enable_chassis_power_button: false + enable_lcd_override_pin: true + disabled_button_lcd_override_pin: 123456 + tags: lac-device-id + + - name: Configure Quick sync and LCD settings of the chassis using device service tag. + ome_device_local_access_configuration: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + quick_sync: + quick_sync_access: READ_ONLY + enable_read_authentication: true + enable_quick_sync_wifi: true + enable_inactivity_timeout: true + timeout_limit: 10 + timeout_limit_unit: MINUTES + lcd: + lcd_access: VIEW_ONLY + lcd_language: en + user_defined: "LCD Text" + tags: lac-tag + + - name: Configure all local access settings of the host chassis. + ome_device_local_access_configuration: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + enable_kvm_access: true + enable_chassis_direct_access: false + chassis_power_button: + enable_chassis_power_button: false + enable_lcd_override_pin: true + disabled_button_lcd_override_pin: 123456 + quick_sync: + quick_sync_access: READ_WRITE + enable_read_authentication: true + enable_quick_sync_wifi: true + enable_inactivity_timeout: true + timeout_limit: 120 + timeout_limit_unit: SECONDS + lcd: + lcd_access: VIEW_MODIFY + lcd_language: en + user_defined: "LCD Text" + tags: lac-host diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml new file mode 100644 index 00000000..d2d86050 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml @@ -0,0 +1,52 @@ +--- +- hosts: ome + connection: local + name: OpenManage Ansible Modules + gather_facts: false + collections: dellemc.openmanage + + tasks: + + - name: Update device location settings of a chassis using the device ID. + ome_device_location: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + data_center: data center 1 + room: room 1 + aisle: aisle 1 + rack: rack 1 + rack_slot: 2 + location: location 1 + tags: location-device-id + + - name: Update device location settings of a chassis using the device service tag. + ome_device_location: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + data_center: data center 1 + room: room 1 + aisle: aisle 1 + rack: rack 1 + rack_slot: 2 + location: location 1 + tags: location-device-service-tag + + - name: Update device location settings of the host chassis. + ome_device_location: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + data_center: data center 1 + room: room 1 + aisle: aisle 1 + rack: rack 1 + rack_slot: 2 + location: location 1 + tags: location-chassis diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml new file mode 100644 index 00000000..e05a3772 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml @@ -0,0 +1,105 @@ +--- +- hosts: ome + connection: local + name: Dell OME Modular device network settings. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Network settings for chassis + ome_device_mgmt_network: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: CHAS123 + delay: 10 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_to_obtain_dns_server_address: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcpv6_to_obtain_dns_server_address: false + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: MX-SVCTAG + dns_domain_name: dnslocaldomain + auto_negotiation: no + network_speed: 100_MB + + - name: Network settings for server + ome_device_mgmt_network: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: SRVR123 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_to_obtain_dns_server_address: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcpv6_to_obtain_dns_server_address: false + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + + - name: Network settings for I/O module + ome_device_mgmt_network: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: IOM1234 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + dns_server_settings: + preferred_dns_server: 192.168.0.4 + alternate_dns_server1: 192.168.0.5 + + - name: Management VLAN configuration of chassis using device id + ome_device_mgmt_network: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_id: 12345 + management_vlan: + enable_vlan: true + vlan_id: 2345 + dns_configuration: + register_with_dns: false \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml new file mode 100644 index 00000000..0a47d2dd --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml @@ -0,0 +1,59 @@ +--- +- hosts: ome + connection: local + name: OpenManage Ansible Modules for network services settings. + gather_facts: false + collections: dellemc.openmanage + + tasks: + + - name: Update network services settings of a chassis using the device ID. + ome_device_power_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + snmp_settings: + enabled: true + port_number: 161 + community_name: public + ssh_settings: + enabled: false + remote_racadm_settings: + enabled: false + tags: snmp-settings + + - name: Update network services settings of a chassis using the device service tag. + ome_device_power_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + snmp_settings: + enabled: false + ssh_settings: + enabled: true + port_number: 22 + max_sessions: 1 + max_auth_retries: 3 + idle_timeout: 1 + remote_racadm_settings: + enabled: false + tags: ssh-settings + + - name: Update network services settings of the host chassis. + ome_device_power_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25012 + snmp_settings: + enabled: false + ssh_settings: + enabled: false + remote_racadm_settings: + enabled: true + tags: racadm-settings diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml new file mode 100644 index 00000000..4b68a29b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml @@ -0,0 +1,54 @@ +--- +- hosts: ome + connection: local + name: OpenManage Ansible Modules + gather_facts: false + collections: dellemc.openmanage + + tasks: + + - name: Update power configuration settings of a chassis using the device ID. + ome_device_power_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + power_configuration: + enable_power_cap: true + power_cap: 3424 + tags: power-config + + - name: Update redundancy configuration settings of a chassis using the device service tag. + ome_device_power_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + redundancy_configuration: + redundancy_policy: GRID_REDUNDANCY + tags: redundancy-config + + - name: Update hot spare configuration settings of a chassis using device ID. + ome_device_power_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25012 + hot_spare_configuration: + enable_hot_spare: true + primary_grid: GRID_1 + tags: hostspare-config + + - name: Update power configuration settings of a host chassis. + ome_device_power_settings: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + power_configuration: + enable_power_cap: true + power_cap: 3425 + tags: power-config-chassis diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml new file mode 100644 index 00000000..71a07e68 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml @@ -0,0 +1,66 @@ +--- +- hosts: ome + connection: local + name: OpenManage Ansible Modules for Quick Deploy settings. + gather_facts: false + collections: dellemc.openmanage + + tasks: + + - name: Configure server Quick Deploy settings of the chassis using device ID. + ome_device_quick_deploy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + setting_type: ServerQuickDeploy + quick_deploy_options: + password: "password" + ipv4_enabled: True + ipv4_network_type: Static + ipv4_subnet_mask: 255.255.255.0 + ipv4_gateway: 192.168.0.1 + ipv6_enabled: True + ipv6_network_type: Static + ipv6_prefix_length: 1 + ipv6_gateway: "::" + slots: + - slot_id: 1 + slot_ipv4_address: 192.168.0.2 + slot_ipv6_address: "::" + vlan_id: 1 + - slot_id: 2 + slot_ipv4_address: 192.168.0.3 + slot_ipv6_address: "::" + vlan_id: 2 + tags: server-quick-deploy + + - name: Configure server Quick Deploy settings of the chassis using device service tag. + ome_device_quick_deploy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + setting_type: IOMQuickDeploy + quick_deploy_options: + password: "password" + ipv4_enabled: True + ipv4_network_type: Static + ipv4_subnet_mask: 255.255.255.0 + ipv4_gateway: 192.168.0.1 + ipv6_enabled: True + ipv6_network_type: Static + ipv6_prefix_length: 1 + ipv6_gateway: "::" + slots: + - slot_id: 1 + slot_ipv4_address: 192.168.0.2 + slot_ipv6_address: "::" + vlan_id: 1 + - slot_id: 2 + slot_ipv4_address: 192.168.0.3 + slot_ipv6_address: "::" + vlan_id: 2 + tags: iom-quick-deploy diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml new file mode 100644 index 00000000..ba93eb00 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml @@ -0,0 +1,60 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible device operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Refresh Inventory + ome_devices: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_action: refresh_inventory + device_service_tags: + - 2HB7NX2 + + - name: Clear iDRAC job queue + ome_devices: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_action: clear_idrac_job_queue + device_service_tags: + - 2HB7NX2 + + - name: Reset iDRAC using the service tag + ome_devices: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_action: reset_idrac + device_service_tags: + - 2H7HNX2 + + - name: Remove devices using servicetags + ome_devices: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: absent + device_service_tags: + - SVCTAG1 + - SVCTAF2 + + - name: Remove devices using IDs + ome_devices: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: absent + device_ids: + - 10235 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml new file mode 100644 index 00000000..b5f0fc97 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml @@ -0,0 +1,72 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible diagnostics operation. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Application log extraction using CIFS share location + ome_diagnostics: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + share_type: CIFS + share_address: "{{ share_address }}" + share_user: "{{ share_username }}" + share_password: "{{ share_password }}" + share_name: "{{ share_name }}" + log_type: application + mask_sensitive_info: false + test_connection: true + tags: app-cifs-log + + - name: Application log extraction using NFS share location + ome_diagnostics: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + share_address: "{{ share_address }}" + share_type: NFS + share_name: "{{ share_name }}" + log_type: application + mask_sensitive_info: true + test_connection: true + tags: app-nfs-log + + - name: Support assist log extraction using CIFS share location + ome_diagnostics: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + share_address: "{{ share_address }}" + share_user: "{{ share_username }}" + share_password: "{{ share_password }}" + share_name: "{{ share_name }}" + share_type: CIFS + log_type: support_assist_collection + device_ids: + - 10011 + - 10022 + log_selectors: [OS_LOGS] + test_connection: true + tags: tsr-cifs-log + + - name: Support assist log extraction using NFS share location + ome_diagnostics: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + share_address: "{{ share_address }}" + share_type: NFS + share_name: "{{ share_name }}" + log_type: support_assist_collection + device_group_name: group_name + test_connection: true + tags: tsr-nfs-log diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml new file mode 100644 index 00000000..1a16e328 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml @@ -0,0 +1,189 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible discovery operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Discover servers in a range + ome_discovery: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_server_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - SERVER + wsman: + username: user + password: password + tags: + - server_discovery + + - name: Discover chassis in a range + ome_discovery: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_chassis_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - CHASSIS + wsman: + username: user + password: password + tags: + - chassis_discovery + + - name: Discover switches in a range + ome_discovery: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discover_switch_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - NETWORK SWITCH + snmp: + community: snmp_creds + tags: + - switch_discovery + + - name: Discover storage in a range + ome_discovery: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discover_storage_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - STORAGE + storage: + username: user + password: password + snmp: + community: community_str + tags: + - storage_discovery + + - name: Delete a discovery job + ome_discovery: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + discovery_job_name: "Discovery-123" + tags: + - delete_discovery + + - name: Schedule the discovery of multiple devices ignoring partial failure and enable trap to receive alerts + ome_discovery: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + discovery_job_name: "Discovery-123" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + - 192.96.0.0/24 + - 192.96.26.108 + device_types: + - SERVER + - CHASSIS + - STORAGE + - NETWORK SWITCH + wsman: + username: wsman_user + password: wsman_pwd + redfish: + username: redfish_user + password: redfish_pwd + snmp: + community: snmp_community + - network_address_detail: + - 192.96.25.1-192.96.25.255 + - ipmihost + - esxiserver + - sshserver + device_types: + - SERVER + ssh: + username: ssh_user + password: ssh_pwd + vmware: + username: vm_user + password: vmware_pwd + ipmi: + username: ipmi_user + password: ipmi_pwd + schedule: RunLater + cron: "0 0 9 ? * MON,WED,FRI *" + ignore_partial_failure: True + trap_destination: True + community_string: True + email_recipient: test_email@company.com + tags: + - schedule_discovery + + - name: Discover servers with ca check enabled + ome_discovery: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_server_ca1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.108 + device_types: + - SERVER + wsman: + username: user + password: password + ca_check: True + certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}" + tags: + - server_ca_check + + - name: Discover chassis with ca check enabled data + ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_chassis_ca1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.108 + device_types: + - CHASSIS + redfish: + username: user + password: password + ca_check: True + certificate_data: "-----BEGIN CERTIFICATE-----\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + aqwertyuiopasdfghjklzxcvbnmasdasagasvv=\r\n + -----END CERTIFICATE-----" + tags: + - chassis_ca_check_data \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml new file mode 100644 index 00000000..7229f638 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml @@ -0,0 +1,59 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible AD directory user group operation. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + + - name: Create Active Directory user groups. + ome_domain_user_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: present + group_name: account operators + directory_name: directory_name + role: administrator + domain_username: username@domain + domain_password: domain_password + tags: user-group-add + + - name: Create Active Directory user groups with different domain format. + ome_domain_user_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: present + group_name: account operators + directory_name: directory_name + role: administrator + domain_username: domain\\username + domain_password: domain_password + tags: user-group-add-domain + + - name: Update Active Directory user groups. + ome_domain_user_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: present + group_name: account operators + role: chassis administrator + tags: user-group-update + + - name: Remove Active Directory user groups. + ome_domain_user_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_name: "Administrators" + tags: user-group-remove diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml new file mode 100644 index 00000000..08b03786 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml @@ -0,0 +1,69 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible group device operations. + gather_facts: False + vars: + group_name: Dell iDRAC Servers + device_action: refresh_inventory #other options are clear_idrac_job_queue, reset_idrac + validate_certs: True + ca_path: "/path/to/ca_cert.pem" + + tasks: + - name: Retrieve group ID based on group name. + ansible.builtin.uri: + url: "https://{{ hostname }}/api/GroupService/Groups?Name={{ group_name }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200 + return_content: yes + validate_certs: "{{ validate_certs }}" + ca_path: "{{ ca_path }}" + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: group_id + + - name: Assign group ID to a variable. + set_fact: + group_id_value: "{{ group_id.json.value[0].Id }}" + + - name: Retrieve all devices under the group ID. + ansible.builtin.uri: + url: "https://{{ hostname }}/api/GroupService/Groups({{ group_id_value }})/AllLeafDevices" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200 + return_content: yes + validate_certs: "{{ validate_certs }}" + ca_path: "{{ ca_path }}" + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: all_devices + + - name: Empty list to store device IDs. + set_fact: + devices_list: [] + + - name: Add devices retrieved from a group to the list. + set_fact: + devices_list: "{{ devices_list + [item.Id] }}" + with_items: + - "{{ all_devices.json.value }}" + + - name: Perform device action tasks on devices. + dellemc.openmanage.ome_devices: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + ca_path: "{{ ca_path }}" + device_action: "{{ device_action }}" + device_ids: "{{ devices_list }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml new file mode 100644 index 00000000..027a53d0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml @@ -0,0 +1,57 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible Group configuration. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Create a new device group + ome_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + name: "group 1" + description: "Group 1 description" + parent_group_name: "group parent 1" + tags: + - create_group + + - name: Modify a device group using the group ID + ome_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + group_id: 1234 + description: "Group description updated" + parent_group_name: "group parent 2" + tags: + - modify_group + + - name: Delete a device group using the device group name + ome_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: absent + name: "group 1" + tags: + - delete_name + + - name: Delete multiple device groups using the group IDs + ome_groups: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: absent + group_id: + - 1234 + - 5678 + tags: + - delete_ids diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml new file mode 100644 index 00000000..b5d960ca --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml @@ -0,0 +1,134 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible identity pool operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: "Create an identity pool using ethernet, FCoE, iSCSI and FC settings." + ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + pool_name: "pool1" + pool_description: "Identity pool with Ethernet, FCoE, ISCSI and FC settings" + ethernet_settings: + starting_mac_address: "50:50:50:50:50:00" + identity_count: 60 + fcoe_settings: + starting_mac_address: "aabb.ccdd.7070" + identity_count: 75 + iscsi_settings: + starting_mac_address: "60:60:60:60:60:00" + identity_count: 30 + initiator_config: + iqn_prefix: "iqn.myprefix." + initiator_ip_pool_settings: + ip_range: "10.33.0.1-10.33.0.255" + subnet_mask: "255.255.255.0" + gateway: "192.168.4.1" + primary_dns_server: "10.8.8.8" + secondary_dns_server: "8.8.8.8" + fc_settings: + starting_address: "10-10-10-10-10-10" + identity_count: 45 + tags: create1 + + - name: "Create an identity pool using only ethernet settings." + ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool2" + pool_description: "Identity pool with ethernet" + ethernet_settings: + starting_mac_address: "aa-bb-cc-dd-ee-aa" + identity_count: 80 + tags: create2 + + - name: "Create an identity pool using only iSCSI settings" + ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool3" + pool_description: "Identity pool with iscsi" + iscsi_settings: + starting_mac_address: "10:10:10:10:10:00" + identity_count: 30 + initiator_config: + iqn_prefix: "iqn.myprefix." + initiator_ip_pool_settings: + ip_range: "20.33.0.1-20.33.0.255" + subnet_mask: "255.255.255.0" + gateway: "192.168.4.1" + primary_dns_server: "10.8.8.8" + secondary_dns_server: "8.8.8.8" + tags: create3 + + - name: "Modify an identity pool using FC settings." + ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool2" + pool_description: "Identity pool with fc_settings" + fc_settings: + starting_address: "40:40:40:40:40:22" + identity_count: 48 + tags: modify1 + + - name: "Modify an identity pool." + ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool1" + new_pool_name: "pool_new" + pool_description: "modifying identity pool with ethernet and fcoe settings" + ethernet_settings: + starting_mac_address: "90-90-90-90-90-90" + identity_count: 61 + fcoe_settings: + starting_mac_address: "aabb.ccdd.5050" + identity_count: 77 + tags: modify2 + + - name: "Modify an identity pool using iSCSI and FC settings." + ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool_new" + new_pool_name: "pool_new2" + pool_description: "modifying identity pool with iscsi and fc settings" + iscsi_settings: + identity_count: 99 + initiator_config: + iqn_prefix: "iqn1.myprefix2." + initiator_ip_pool_settings: + gateway: "192.168.4.5" + fc_settings: + starting_address: "10:10:10:10:10:10" + identity_count: 98 + tags: modify3 + + - name: "Delete an identity pool" + ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + pool_name: "pool1" + tags: delete diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml new file mode 100644 index 00000000..f90892ad --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml @@ -0,0 +1,35 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible job details. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Get all jobs details. + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + + - name: Get job details for id. + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: 12345 + + - name: Get filtered job details. + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + top: 2 + skip: 1 + filter: "JobType/Id eq 8" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml new file mode 100644 index 00000000..c9a8db75 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml @@ -0,0 +1,32 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage ansible port breakout configuration. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + + - name: Port breakout configuration. + ome_network_port_breakout: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + target_port: "2HB7NX2:phy-port1/1/11" + breakout_type: "1X40GE" + tags: + - port-config + + - name: Revoke the default breakout configuration. + ome_network_port_breakout: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + target_port: "2HB7NX2:phy-port1/1/11" + breakout_type: "HardwareDefault" + tags: + - port-default diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml new file mode 100644 index 00000000..b94b6b48 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml @@ -0,0 +1,37 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage ansible port breakout configuration. + gather_facts: False + vars: + retries_count: 50 + polling_interval: 5 # in seconds + + collections: + - dellemc.openmanage + + tasks: + + - name: Port breakout configuration. + ome_network_port_breakout: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + target_port: "2HB7NX2:phy-port1/1/11" + breakout_type: "1X40GE" + register: result + + - name: "Get job details using job id from port breakout configuration task." + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ result.breakout_status.Id }}" + register: job_result + failed_when: job_result.job_info.LastRunStatus.Name == 'Failed' + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml new file mode 100644 index 00000000..d92ef99f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml @@ -0,0 +1,62 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible VLAN operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: "Create a VLAN range" + ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan1" + description: "VLAN desc" + type: "General Purpose (Bronze)" + vlan_minimum: 35 + vlan_maximum: 40 + tags: create_vlan_range + + - name: "Create a VLAN with a single value" + ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan2" + description: "VLAN desc" + type: "General Purpose (Bronze)" + vlan_minimum: 127 + vlan_maximum: 127 + tags: create_vlan_single + + - name: "Modify a VLAN" + ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan1" + new_name: "vlan_gold1" + description: "new description" + type: "General Purpose (Gold)" + vlan_minimum: 45 + vlan_maximum: 50 + tags: modify_vlan + + - name: "Delete a VLAN" + ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "vlan1" + tags: delete_vlan diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml new file mode 100644 index 00000000..3cf9c3c2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml @@ -0,0 +1,32 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible OpenManage Enterprise network vlan details. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieve information about all network VLANs(s) available in the device. + ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve information about a network VLAN using the VLAN ID. + ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + id: 12345 + + - name: Retrieve information about a network VLAN using the VLAN name. + ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Network VLAN - 1" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml new file mode 100644 index 00000000..87c124b8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml @@ -0,0 +1,33 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible server interface profile information. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieves the server interface profiles of all the device using device ID. + ome_server_interface_profile_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 10001 + - 10002 + tags: + - sip-device-id + + - name: Retrieves the server interface profiles of all the device using device service tag. + ome_server_interface_profile_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - 6GHH6H2 + - 6KHH6H3 + tags: + - sip-service-tag diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml new file mode 100644 index 00000000..485a1a24 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml @@ -0,0 +1,125 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible server interface profile workflow. + gather_facts: False + vars: + retries_count: 100 + polling_interval: 10 #in seconds + src_service_tag: 7GHH6H1 + + collections: + - dellemc.openmanage + + tasks: + + - name: Create a smart fabric. + ome_smart_fabric: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "fabric1" + description: "fabric desc" + fabric_design: "2xMX9116n_Fabric_Switching_Engines_in_same_chassis" + primary_switch_service_tag: "6H7J6Z2" + secondary_switch_service_tag: "59HW8X2" + override_LLDP_configuration: "Enabled" + register: fabric_result + + - name: "sleep for 300 seconds and continue with play" + wait_for: + timeout: 300 + when: fabric_result.changed == True + + - name: Create a template from a reference device service tag. + ome_template: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "{{ src_service_tag }}" + attributes: + Name: "New_Template_2" + Description: "New Template description" + register: result + failed_when: "'return_id' not in result" + + - name: "Get the job id using return id from template." + ome_template_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + template_id: "{{ result.return_id }}" + register: facts_result + + - name: "Get job details using job id from template task." + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ facts_result.template_info[hostname].TaskId }}" + register: job_result + failed_when: job_result.job_info.LastRunStatus.Name == 'Failed' + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + - name: Deploy template on multiple devices + dellemc.openmanage.ome_template: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: "{{ result.return_id }}" + device_service_tag: + - 6GHH6H1 + - 6GHH6H2 + register: deploy_result + + - name: "sleep for 10 seconds and continue with play" + wait_for: timeout=10 + + - name: "Track the deploy job till completion" + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ deploy_result.return_id }}" + register: deploy_job_result + failed_when: "'job_info' not in deploy_job_result" + until: deploy_job_result.job_info.LastRunStatus.Name == 'Completed' or deploy_job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + - name: Modify Server Interface Profile for the server using the service tag. + ome_server_interface_profiles: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - 6GHH6H2 + nic_teaming: NoTeaming + nic_configuration: + - nic_identifier: NIC.Mezzanine.1A-1-1 + team: no + untagged_network: 2 + tagged_networks: + names: + - vlan + + - name: Retrieves the server interface profiles of all the device using device service tag. + ome_server_interface_profile_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - 6GHH6H2 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml new file mode 100644 index 00000000..c003b714 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml @@ -0,0 +1,57 @@ +--- +- hosts: omem + connection: local + name: Dell OpenManage Ansible server interface profiles configuration. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Modify Server Interface Profile for the server using the service tag + ome_server_interface_profiles: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - SVCTAG1 + - SVCTAG2 + nic_teaming: LACP + nic_configuration: + - nic_identifier: NIC.Mezzanine.1A-1-1 + team: no + untagged_network: 2 + tagged_networks: + names: + - vlan1 + - nic_identifier: NIC.Mezzanine.1A-2-1 + team: yes + untagged_network: 3 + tagged_networks: + names: + - range120-125 + + - name: Modify Server Interface Profile for the server using the id + ome_server_interface_profiles: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 34523 + - 48999 + nic_teaming: NoTeaming + nic_configuration: + - nic_identifier: NIC.Mezzanine.1A-1-1 + team: no + untagged_network: 2 + tagged_networks: + names: + - vlan2 + - nic_identifier: NIC.Mezzanine.1A-2-1 + team: yes + untagged_network: 3 + tagged_networks: + names: + - range120-125 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml new file mode 100644 index 00000000..3813458a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml @@ -0,0 +1,47 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible smart fabric operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: "Create a smart fabric" + ome_smart_fabric: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "fabric1" + description: "fabric desc" + fabric_design: "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" + primary_switch_service_tag: "SVTG123" + secondary_switch_service_tag: "PXYT456" + override_LLDP_configuration: "Enabled" + tags: create_smart_fabric + + - name: "Modify a smart fabric" + ome_smart_fabric: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "fabric1" + new_name: "fabric_gold1" + description: "new description" + tags: modify_smart_fabric + + + - name: "Delete a smart fabric" + ome_smart_fabric: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "fabric1" + tags: delete_smart_fabric diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml new file mode 100644 index 00000000..88b5cc62 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml @@ -0,0 +1,119 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible smart fabric uplink configuration. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: "Create a Uplink" + ome_smart_fabric_uplink: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + description: "CREATED from OMAM" + uplink_type: "Ethernet" + ufd_enable: "Enabled" + primary_switch_service_tag: "ABC1234" + primary_switch_ports: + - ethernet1/1/13 + - ethernet1/1/14 + secondary_switch_service_tag: "XYZ1234" + secondary_switch_ports: + - ethernet1/1/13 + - ethernet1/1/14 + tagged_networks: + - vlan1 + - vlan3 + untagged_network: vlan2 + tags: create_uplink + + - name: "modify a existing uplink1" + ome_smart_fabric_uplink: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + new_name: "uplink2" + description: "Modified from OMAM" + uplink_type: "Ethernet" + ufd_enable: "Disabled" + primary_switch_service_tag: "DEF1234" + primary_switch_ports: + - ethernet1/2/13 + - ethernet1/2/14 + secondary_switch_service_tag: "TUV1234" + secondary_switch_ports: + - ethernet1/2/13 + - ethernet1/2/14 + tagged_networks: + - vlan11 + - vlan33 + untagged_network: vlan22 + tags: modify_uplink + + - name: "Delete a Uplink" + ome_smart_fabric_uplink: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + fabric_name: "fabric1" + name: "uplink1" + tags: delete_uplink + + - name: "Modify the Uplink name" + ome_smart_fabric_uplink: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + new_name: "uplink2" + tags: modify_uplink_name + + - name: "Modify a Uplink ports" + ome_smart_fabric_uplink: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + description: "uplink ports modified" + primary_switch_service_tag: "ABC1234" + primary_switch_ports: + - ethernet1/1/6 + - ethernet1/1/7 + secondary_switch_service_tag: "XYZ1234" + secondary_switch_ports: + - ethernet1/1/9 + - ethernet1/1/10 + tags: modify_ports + + - name: "Modify Uplink networks" + ome_smart_fabric_uplink: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "create1" + description: "uplink networks modified" + tagged_networks: + - vlan4 + tags: modify_networks diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml new file mode 100644 index 00000000..433954aa --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml @@ -0,0 +1,31 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible template identity pool attach and detach operation. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + + - name: Attach an identity pool to a template. + ome_template_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + template_name: template_name + identity_pool_name: identity_pool_name + tags: + - attach + + - name: Detach an identity pool from a template. + ome_template_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + template_name: template_name + tags: + - detach \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml new file mode 100644 index 00000000..517ff118 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml @@ -0,0 +1,51 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible - OME Power state operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Power state operation based on device id. + ome_powerstate: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 11111 + power_state: "off" + + - name: Power state operation based on device service tag. + ome_powerstate: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "KLBR111" + power_state: "on" + + - name: Power state operation based on list of device ids. + ome_powerstate: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: "{{ item.device_id }}" + power_state: "{{ item.state }}" + with_items: + - { "device_id": 11111, "state": "on" } + - { "device_id": 22222, "state": "off" } + + - name: Power state operation based on list of device service tags. + ome_powerstate: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "{{ item.service_tag }}" + power_state: "{{ item.state }}" + with_items: + - { "service_tag": "KLBR111", "state": "on" } + - { "service_tag": "KLBR222", "state": "off" } \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml new file mode 100644 index 00000000..8393992a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml @@ -0,0 +1,36 @@ +--- +- hosts: ome + vars: + retries_count: 5 + polling_interval: 5 #in seconds + connection: local + name: "OME - Power state management job tracking." + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: "Power state operation based on device id" + ome_powerstate: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + power_state: "off" + device_id: 11111 + register: result + failed_when: "'job_status' not in result" + + - name: "Get job details using job id from power state operation." + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{result.job_status.Id}}" + register: job_result + failed_when: "'job_info' not in job_result" + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml new file mode 100644 index 00000000..14d43e6a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml @@ -0,0 +1,212 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible profile operations. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Create two profiles from a template + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 2 + tags: + - create_profile + + - name: Create profile with NFS share + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 1 + boot_to_network_iso: + boot_to_network: True + share_type: "NFS" + share_ip: "192.168.0.1" + iso_path: "/path/to/my_iso.iso" + iso_timeout: 8 + tags: + - create_profile_nfs + + - name: Create profile with CIFS share + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 1 + boot_to_network_iso: + boot_to_network: True + share_type: CIFS + share_ip: "192.168.0.2" + share_user: "username" + share_password: "password" + workgroup: "workgroup" + iso_path: "\\path\\to\\my_iso.iso" + iso_timeout: 8 + tags: + - create_profile_cifs + + - name: Modify profile name with NFS share and attributes + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: modify + name: "Profile 00001" + new_name: "modified profile" + description: "new description" + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.1" + iso_path: "/path/to/my_iso.iso" + iso_timeout: 8 + attributes: + Attributes: + - Id: 4506 + Value: "server attr 1" + IsIgnored: true + - Id: 4507 + Value: "server attr 2" + IsIgnored: true + - DisplayName: 'System, Server Topology, ServerTopology 1 Aisle Name' + Value: Aisle 5 + IsIgnored: false + tags: + - modify_profile + + - name: Delete using profile name + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + name: "Profile 00003" + tags: + - delete_profile_name + + - name: Delete using filter + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + filters: + SelectAll: True + Filters: =contains(ProfileName,'Profile 00002') + tags: + - delete_filter + + - name: Delete using profile list filter + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + filters: + ProfileIds: + - 17123 + - 12124 + tags: + - delete_profile_ids + + - name: Assign profile name with network share + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: assign + name: "Profile 00001" + device_id: 12456 + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.1" + iso_path: "/path/to/my_iso.iso" + iso_timeout: 8 + attributes: + Attributes: + - Id: 4506 + Value: "server attr 1" + IsIgnored: true + Options: + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + EndHostPowerState: 1 + StrictCheckingVlan: True + Schedule: + RunNow: True + RunLater: False + tags: + - assign_profile + + - name: Unassign using profile name + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + name: "Profile 00003" + tags: + - unassign_profile_name + + - name: "Unassign using filters" + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + filters: + SelectAll: True + Filters: =contains(ProfileName,'Profile 00003') + tags: + - unassign_filter + + - name: Unassign using filter + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + filters: + ProfileIds: + - 17123 + - 16123 + tags: + - unassign_profile_list + + - name: Migrate profile + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "migrate" + name: "Profile 0001" + device_id: 12456 + tags: + - migrate_profile \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml new file mode 100644 index 00000000..d4c9c772 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml @@ -0,0 +1,47 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible profile operations. + gather_facts: False + vars: + retries_count: 120 + polling_interval: 30 # 30 seconds x 120 times = 1 hour + failed_states: ['Failed', 'Warning', 'Aborted', 'Paused', 'Stopped', + 'Canceled'] + completed_states: ['Completed', 'Failed', 'Warning', 'Aborted', 'Paused', + 'Stopped', 'Canceled'] + + collections: + - dellemc.openmanage + + tasks: + - name: Assign a profile to target + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "assign" + name: "Profile 00001" + device_id: 12456 + register: result + + - name: End play when no job_id in result + meta: end_play + when: + - result.changed == false + - "'job_id' not in result" + + - name: Get job details using job id + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ result.job_id }}" + register: job_result + failed_when: job_result.job_info.LastRunStatus.Name in "{{ failed_states }}" + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + until: job_result.job_info.LastRunStatus.Name in "{{ completed_states }}" + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml new file mode 100644 index 00000000..ae7f732b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml @@ -0,0 +1,48 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible profile operations. + gather_facts: False + vars: + retries_count: 120 + polling_interval: 30 # 30 seconds x 120 times = 1 hour + failed_states: ['Failed', 'Warning', 'Aborted', 'Paused', 'Stopped', + 'Canceled'] + completed_states: ['Completed', 'Failed', 'Warning', 'Aborted', 'Paused', + 'Stopped', 'Canceled'] + + collections: + - dellemc.openmanage + + tasks: + + - name: Migrate a profile + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "migrate" + name: "Profile 00001" + device_id: 12456 + register: result + + - name: End play when no job_id in result + meta: end_play + when: + - result.changed == false + - "'job_id' not in result" + + - name: Get job details using job id + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ result.job_id }}" + register: job_result + failed_when: job_result.job_info.LastRunStatus.Name in "{{ failed_states }}" + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + until: job_result.job_info.LastRunStatus.Name in "{{ completed_states }}" + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml new file mode 100644 index 00000000..b1a21312 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml @@ -0,0 +1,47 @@ +--- +- hosts: ome + connection: local + name: Dell EMC OpenManage Ansible profile operations. + gather_facts: False + vars: + retries_count: 120 + polling_interval: 30 # 30 seconds x 120 times = 1 hour + failed_states: ['Failed', 'Warning', 'Aborted', 'Paused', 'Stopped', + 'Canceled'] + completed_states: ['Completed', 'Failed', 'Warning', 'Aborted', 'Paused', + 'Stopped', 'Canceled'] + + collections: + - dellemc.openmanage + + tasks: + + - name: Unassign using profile name + ome_profile: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + name: "Profile 00003" + register: result + + - name: End play when no job_id in result + meta: end_play + when: + - result.changed == false + - "'job_id' not in result" + + - name: Get job details using job id + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ result.job_id }}" + register: job_result + failed_when: job_result.job_info.LastRunStatus.Name in "{{ failed_states }}" + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + until: job_result.job_info.LastRunStatus.Name in "{{ completed_states }}" + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml new file mode 100644 index 00000000..58ac15ff --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml @@ -0,0 +1,338 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible device Template service. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: "Create a template from a reference device." + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + device_id: 25123 + attributes: + Name: "New Template" + Description: "New Template description" + + - name: "Modify template name, description, and attribute value." + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "modify" + template_id: 12 + attributes: + Name: "New Custom Template" + Description: "Custom Template Description" + # Attributes to be modified in the template. + # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails + # This section is optional + Attributes: + - Id: 1234 + Value: "Test Attribute" + IsIgnored: false + + - name: Modify template name, description, and attribute using detailed view + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "modify" + template_id: 12 + attributes: + Name: "New Custom Template" + Description: "Custom Template Description" + Attributes: + # Enter the comma separated string as appearing in the Detailed view on GUI + # NIC -> NIC.Integrated.1-1-1 -> NIC Configuration -> Wake On LAN1 + - DisplayName: 'NIC, NIC.Integrated.1-1-1, NIC Configuration, Wake On LAN' + Value: Enabled + IsIgnored: false + # System -> LCD Configuration -> LCD 1 User Defined String for LCD + - DisplayName: 'System, LCD Configuration, LCD 1 User Defined String for LCD' + Value: LCD str by OMAM + IsIgnored: false + + - name: "Deploy template on multiple devices " + ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + - 'SVTG456' + + - name: Deploy template on groups + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_group_names: + - server_group_1 + - server_group_2 + + - name: "Deploy template on multiple devices along attributes modification for target device" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + attributes: + # Device specific attributes to be modified during deployment. + # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails + # This section is optional + Attributes: + # specific device where attribute to be modified at deployment run-time. + # The DeviceId should be mentioned above in the 'device_id' section. + # Service tags not allowed. + - DeviceId: 12765 + Attributes: + - Id: 15645 + Value: "0.0.0.0" + IsIgnored: false + - DeviceId: 10173 + Attributes: + - Id: 18968, + Value: "hostname-1" + IsIgnored: false + + - name: "Deploy template and Operating System (OS) on multiple devices" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + device_service_tag: + - 'SVTG123' + attributes: + # Include this to install OS on the devices. + # This section is optional + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "NFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + + - name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed, + install OS using its image." + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + - 'SVTG456' + attributes: + Attributes: + - DeviceId: 12765 + Attributes: + - Id: 15645 + Value: "0.0.0.0" + IsIgnored: false + - DeviceId: 10173 + Attributes: + - Id: 18968, + Value: "hostname-1" + IsIgnored: false + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "NFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + + - name: "delete template" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + template_id: 12 + + - name: "export a template" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "export" + template_id: 12 + + # Start of example to export template to a local xml file + - name: "export template to a local xml file" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "export" + template_name: "my_template" + register: result + tags: + - export_xml_to_file + - ansible.builtin.copy: + content: "{{ result.Content}}" + dest: "/path/to/exported_template.xml" + tags: + - export_xml_to_file + # End of example to export template to a local xml file + + - name: "clone a template" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "clone" + template_id: 12 + attributes: + Name: "New Cloned Template Name" + + - name: "import template from XML content" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "import" + attributes: + Name: "Imported Template Name" + # Template Type from TemplateService/TemplateTypes + Type: 2 + # xml string content + Content: "\n\nTrue\nClear\n + \n\nReady + \nNo\n\n + \nReady\n + No\n\n\n" + + - name: "import template from local XML file" + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "import" + attributes: + Name: "Imported Template Name" + Type: 2 + Content: "{{ lookup('ansible.builtin.file', '/path/to/xmlfile') }}" + + - name: "Deploy template and Operating System (OS) on multiple devices." + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + device_service_tag: + - 'SVTG123' + attributes: + # Include this to install OS on the devices. + # This section is optional + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "CIFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + + - name: Create a compliance template from reference device + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "create" + device_service_tag: "SVTG123" + template_view_type: "Compliance" + attributes: + Name: "Configuration Compliance" + Description: "Configuration Compliance Template" + Fqdds: "BIOS" + + - name: Import a compliance template from XML file + ome_template: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + command: "import" + template_view_type: "Compliance" + attributes: + Name: "Configuration Compliance" + Content: "{{ lookup('ansible.builtin.file', './test.xml') }}" + Type: 2 diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml new file mode 100644 index 00000000..40f4c002 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml @@ -0,0 +1,129 @@ +--- +- hosts: ome + connection: local + name: "Creates a new template from the provided reference server device. + Track the template creation job till completion. + Fetch the Attribute specific to LCD Configuration settings from the attribute view of the created template. + Modify the created template with the user defined LCD string." + gather_facts: False + vars: + retries_count: 50 + polling_interval: 5 + reference_device: "MXL4567" + template_name: "LCD String Deploy Template" + lcd_display_string: "LCD Custom Display Message" + + collections: + - dellemc.openmanage + + tasks: + - name: "create template from the reference server" + ome_template: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "{{ reference_device }}" + attributes: + Name: "{{ template_name }}" + Description: "LCD Template description" + register: result + + - name: "sleep for 30 seconds and continue with play" + wait_for: timeout=30 + + - name: "Fetch the Task ID from the Template Details using the Template ID" + ome_template_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + template_id: "{{ result.return_id }}" + register: template_result + + - name: "Track the Template Creation Job till Completion" + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ template_result.template_info[hostname].TaskId }}" + register: job_result + failed_when: "'job_info' not in job_result" + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + - name: "Retrieve the Attribute ID specific to LCD Configuration" + uri: + url: "https://{{ hostname }}/api/TemplateService/Templates({{ result.return_id }})/Views(1)/AttributeViewDetails" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: config_result + + - name: "System Attribute Groups" + set_fact: + lcd_fact: "{{ item }}" + when: + - item.DisplayName=='System' + with_items: + - "{{ config_result.json.AttributeGroups }}" + loop_control: + label: "{{ config_result.json.Name }}" + + - name: "LCD System Attributes Groups" + set_fact: + lcdconfig: "{{ item }}" + when: + - item.DisplayName=='LCD Configuration' + with_items: + - "{{ lcd_fact.SubAttributeGroups }}" + loop_control: + label: "{{ item.DisplayName }}" + + - name: "Retrieve LCD Display Attribute ID" + set_fact: + lcdattrid: "{{ item.AttributeId }}" + when: + - item.DisplayName=='LCD 1 User Defined String for LCD' + with_items: + - "{{ lcdconfig.Attributes }}" + loop_control: + label: "{{ item.DisplayName }}" + + - name: "Retrieve LCD Config Attribute ID" + set_fact: + lcdconfigattrid: "{{ item.AttributeId }}" + when: + - item.DisplayName=='LCD 1 LCD Configuration' + with_items: + - "{{ lcdconfig.Attributes }}" + loop_control: + label: "{{ item.DisplayName }}" + + - name: "Modify the created with Custom LCD String to be displayed" + ome_template: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "modify" + template_id: "{{ result.return_id }}" + attributes: + Name: "{{ template_name }}" + Attributes: + - Id: "{{ lcdattrid }}" + Value: "{{ lcd_display_string }}" + IsIgnored: false + - Id: "{{ lcdconfigattrid }}" + Value: "User Defined" + IsIgnored: false \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml new file mode 100644 index 00000000..3fd200c0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml @@ -0,0 +1,33 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible template inventory details. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieve basic details of all templates. + ome_template_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve details of a specific template identified by its template ID. + ome_template_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + template_id: "{{template_id}}" + + - name: Get filtered template info based on name. + ome_template_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + filter: "Name eq 'new template'" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml new file mode 100644 index 00000000..eb040c9c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml @@ -0,0 +1,27 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible device Template service. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: get template with filter option. + register: result + failed_when: "'template_info' not in result or result.template_info['{{hostname}}']['@odata.count'] == 0" + ome_template_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + filter: "Name eq 'template_name'" + - name: get specific template from result + with_subelements: + - "{{ result.template_info }}" + - value + debug: + msg: "{{item.1}}" + when: item.1['Name']=='template_name' diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml new file mode 100644 index 00000000..afb472fa --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml @@ -0,0 +1,46 @@ +--- +- hosts: ome + connection: local + name: + - Deploy this template with desired LCD string on the target servers. + - Track the template deploy operation job till completion. + gather_facts: False + vars: + retries_count: 50 + polling_interval: 5 + template_name: "LCD Srting Deploy Template" + deployable_servicetag: + - 'MXL1234' + - 'MXL4567' + + collections: + - dellemc.openmanage + + tasks: + - name: "Deploy Previously created LCD Template " + ome_template: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "deploy" + template_name: "{{ template_name }}" + device_service_tag: "{{ deployable_servicetag }}" + register: result + tags: + - deploy + + - name: "Track the deploy job till completion" + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ result.return_id }}" + register: job_result + failed_when: "'job_info' not in job_result" + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + tags: + - track_deploy \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml new file mode 100644 index 00000000..fee07b4e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml @@ -0,0 +1,66 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible template tag and untag. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Tag or untag vlans in template + ome_template_network_vlan: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + template_id: 78 + nic_identifier: NIC Slot 4 + untagged_networks: + - port: 1 + untagged_network_id: 12765 + - port: 2 + untagged_network_name: vlan2 + tagged_networks: + - port: 1 + tagged_network_ids: + - 12767 + - 12768 + - port: 4 + tagged_network_ids: + - 12767 + - 12768 + tagged_network_names: + - vlan3 + - port: 2 + tagged_network_names: + - vlan4 + - vlan1 + tags: + - tag_untag_vlan + + - name: Clear the tagged and untagged vLANs + ome_template_network_vlan: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + template_id: 78 + nic_identifier: NIC Slot 4 + untagged_networks: + # For removing the untagged vLANs for the port 1 and 2 + - port: 1 + untagged_network_id: 0 + - port: 2 + untagged_network_name: 0 + tagged_networks: + # For removing the tagged vLANs for port 1 and 4 + - port: 1 + tagged_network_ids: [] + - port: 4 + tagged_network_ids: [] + tagged_network_names: [] + - port: 2 + tagged_network_names: [] + tags: + - clear_tagged_untagged \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml new file mode 100644 index 00000000..9f93bbdf --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml @@ -0,0 +1,48 @@ +--- +- hosts: ome + vars: + retries_count: 50 + polling_interval: 5 #in seconds + connection: local + name: "OME - Create Template details tracking" + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: "Create template based on device id." + ome_template: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + device_id: 12475 + attributes: + Name: "New Template" + Description: "New Template description" + register: result + failed_when: "'return_id' not in result" + + - name: "Get the job id using return id from template." + ome_template_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + template_id: "{{ result.return_id }}" + register: facts_result + + - name: "Get job details using job id from template task." + ome_job_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + job_id: "{{ facts_result.template_info[hostname].TaskId }}" + register: job_result + failed_when: job_result.job_info.LastRunStatus.Name == 'Failed' + changed_when: job_result.job_info.LastRunStatus.Name == 'Completed' + until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml new file mode 100644 index 00000000..b1589cae --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml @@ -0,0 +1,70 @@ +--- +- hosts: ome + connection: local + name: Dell OpenManage Ansible User service. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: create new user. + ome_user: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + attributes: + UserName: "user1" + Password: "UserPassword" + RoleId: "10" + Enabled: True + + - name: create user with all parameters + ome_user: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + attributes: + UserName: "user2" + Description: "user2 description" + Password: "UserPassword" + RoleId: "10" + Enabled: True + DirectoryServiceId: 0 + UserTypeId: 1 + Locked: False + Name: "user2" + + - name: modify existing user + ome_user: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + attributes: + UserName: "user3" + RoleId: "10" + Enabled: True + Description: "Modify user Description" + + - name: delete existing user using id. + ome_user: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + user_id: 61874 + + - name: delete existing user using name. + ome_user: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "name" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml new file mode 100644 index 00000000..6016d502 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml @@ -0,0 +1,33 @@ +--- +- hosts: ome + connection: local + name: Fetching ome user facts. + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Retrieve basic details of all accounts. + ome_user_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + + - name: Retrieve details of a specific account identified by its account ID. + ome_user_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + account_id: "{{account_id}}" + + - name: Retrieve details of a specific user using filter with UserName. + ome_user_info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + filter: "UserName eq 'test'" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml new file mode 100644 index 00000000..15fa188d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml @@ -0,0 +1,32 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Ansible Module for Simple Firmware Update" + + collections: + - dellemc.openmanage + + tasks: + + - name: "Update the firmware from a single executable file available in a local path" + redfish_firmware: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + image_uri: "/home/firmware_repo/component.exe" + + tags: + - local-update + + - name: "Update the firmware from a single executable file available in a HTTP protocol" + redfish_firmware: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + image_uri: "http://192.168.0.1/firmware_repo/component.exe" + + tags: + - http-update diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml new file mode 100644 index 00000000..105f4189 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml @@ -0,0 +1,92 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Ansible Module for Simple Firmware Update" + vars: + retries_count: 100 + polling_interval: 5 + reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" + + collections: + - dellemc.openmanage + + tasks: + + - name: "Update the firmware from a single executable file available in a HTTP protocol" + redfish_firmware: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + image_uri: "http://192.168.0.1/firmware_repo/component.exe" + register: result + + - name: "Update the firmware from a single executable with job tracking till completion" + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: job_result + until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + - name: "Update the firmware from a single executable reboot." + uri: + url: "https://{{ baseuri }}{{ reboot_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "POST" + body_format: raw + body: '{"ResetType": "ForceRestart"}' + use_proxy: yes + status_code: 204 + return_content: no + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: reboot_result + changed_when: reboot_result.status == 204 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "Update the firmware from a single executable Waits for 4 minutes." + wait_for: + timeout: 240 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "Update the firmware from a single executable with job tracking till completion." + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: final_result + until: final_result.json.TaskState == 'Completed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + - name: "Update the firmware from a single executable fact." + set_fact: + job_details: "{{ final_result.json }}" + failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK" + changed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus == "OK" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml new file mode 100644 index 00000000..8ea91cc3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml @@ -0,0 +1,92 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Ansible Module for Simple Firmware Update" + vars: + retries_count: 100 + polling_interval: 5 + reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" + + collections: + - dellemc.openmanage + + tasks: + + - name: "Update the firmware from a single executable file available in a local path" + redfish_firmware: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + image_uri: "/home/firmware_repo/component.exe" + register: result + + - name: "Update the firmware from a single executable with job tracking till completion." + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: job_result + until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + - name: "Update the firmware from a single executable reboot." + uri: + url: "https://{{ baseuri }}{{ reboot_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "POST" + body_format: raw + body: '{"ResetType": "ForceRestart"}' + use_proxy: yes + status_code: 204 + return_content: no + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: reboot_result + changed_when: reboot_result.status == 204 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "Update the firmware from a single executable Waits for 4 minutes." + wait_for: + timeout: 240 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "Update the firmware from a single executable with job tracking till completion." + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: final_result + until: final_result.json.TaskState == 'Completed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + + - name: "Update the firmware from a single executable fact." + set_fact: + job_details: "{{ final_result.json }}" + failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK" + changed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus == "OK" \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml new file mode 100644 index 00000000..7fa5e40c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml @@ -0,0 +1,46 @@ +--- +- hosts: redfish + connection: local + name: Configure Redfish subscriptions + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Add Redfish metric subscription + redfish_event_subscription: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + destination: "https://192.168.1.100:8188" + event_type: MetricReport + event_format_type: MetricReport + state: present + + tags: add_metric_subscription + + - name: Add Redfish alert subscription + redfish_event_subscription: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + destination: "https://server01.example.com:8188" + event_type: Alert + event_format_type: Event + state: present + + tags: add_alert_subscription + + - name: Delete Redfish subscription with a specified destination + redfish_event_subscription: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + destination: "https://server01.example.com:8188" + state: absent + + tags: delete_subscription \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml new file mode 100644 index 00000000..bacce0cc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml @@ -0,0 +1,26 @@ +--- +- hosts: redfish + connection: local + name: Configure Server Power Setting + gather_facts: False + + collections: + - dellemc.openmanage + + tasks: + - name: Manage power state of the first device. + redfish_powerstate: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + reset_type: "On" + + - name: Manage power state of a specified device. + redfish_powerstate: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + reset_type: "ForceOff" + resource_id: "System.Embedded.1" diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml new file mode 100644 index 00000000..0c1380a0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml @@ -0,0 +1,85 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Redfish Storage Volume - Ansible Module" + vars: + retries_count: 15 + polling_interval: 5 + + collections: + - dellemc.openmanage + + tasks: + + - name: Create a volume with supported options. + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + volume_type: "Mirrored" + name: "VD0" + controller_id: "RAID.Slot.1-1" + drives: + - Disk.Bay.5:Enclosure.Internal.0-1:RAID.Slot.1-1 + - Disk.Bay.6:Enclosure.Internal.0-1:RAID.Slot.1-1 + block_size_bytes: 512 + capacity_bytes: 299439751168 + optimum_io_size_bytes: 65536 + encryption_types: NativeDriveEncryption + encrypted: true + register: result + tags: + - create_volume1 + + - name: Create a volume with minimum options. + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + controller_id: "RAID.Slot.1-1" + volume_type: "NonRedundant" + drives: + - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1 + tags: + - create_volume2 + + - name: Modify a volume's encryption type settings. + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + volume_id: "Disk.Virtual.5:RAID.Slot.1-1" + encryption_types: "ControllerAssisted" + encrypted: true + tags: + - modify_volume + + - name: Initialize an existing volume. + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "initialize" + volume_id: "Disk.Virtual.5:RAID.Slot.1-1" + initialize_type: "Slow" + tags: + - initialize_volume + + - name: Delete an existing volume. + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + volume_id: "Disk.Virtual.5:RAID.Slot.1-1" + tags: + - delete_volume diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml new file mode 100644 index 00000000..fcf596cd --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml @@ -0,0 +1,93 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Redfish Storage Volume - Ansible Module" + vars: + retries_count: 100 + polling_interval: 10 + reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" + + collections: + - dellemc.openmanage + + tasks: + - name: "Create a storage volume" + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + name: "VD_BOSS" + controller_id: "AHCI.Slot.6-1" + drives: + - Disk.Direct.1-1:AHCI.Slot.6-1 + - Disk.Direct.0-0:AHCI.Slot.6-1 + optimum_io_size_bytes: 65536 + volume_type: Mirrored + register: result + tags: + - create_volume + + - name: "View the job details to track the status of the create storage volume task" + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: job_result + failed_when: "'json' not in job_result" + until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + tags: + - job-tracking + + - name: "Reboot the system if the job status is pending." + uri: + url: "https://{{ baseuri }}{{ reboot_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "POST" + body_format: raw + body: '{"ResetType": "ForceRestart"}' + use_proxy: yes + status_code: 204 + return_content: no + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: reboot_result + changed_when: reboot_result.status == 204 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "View the job details to verify if the task status is completed." + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: final_result + failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK" + until: final_result.json.TaskState == 'Completed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml new file mode 100644 index 00000000..34a821d7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml @@ -0,0 +1,87 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Redfish Storage Volume - Ansible Module" + vars: + retries_count: 100 + polling_interval: 10 + reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" + + collections: + - dellemc.openmanage + + tasks: + - name: "Delete an existing volume." + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + volume_id: "Disk.Virtual.1:RAID.Slot.1-1" + register: result + tags: + - delete_volume + + - name: "View the job details to track the status of the delete storage volume task" + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: job_result + failed_when: "'json' not in job_result" + until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + tags: + - job-tracking + + - name: "Reboot the system if the job status is pending." + uri: + url: "https://{{ baseuri }}{{ reboot_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "POST" + body_format: raw + body: '{"ResetType": "ForceRestart"}' + use_proxy: yes + status_code: 204 + return_content: no + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: reboot_result + changed_when: reboot_result.status == 204 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "View the job details to verify if the task status is completed." + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: final_result + failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK" + until: final_result.json.TaskState == 'Completed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml new file mode 100644 index 00000000..fb79a288 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml @@ -0,0 +1,88 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Redfish Storage Volume - Ansible Module" + vars: + retries_count: 100 + polling_interval: 10 + reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" + + collections: + - dellemc.openmanage + + tasks: + - name: "Initialize an existing volume." + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "initialize" + volume_id: "Disk.Virtual.1:RAID.Slot.1-1" + initialize_type: "Slow" + register: result + tags: + - initialize_volume + + - name: "View the job details to track the status of the initialization task" + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: job_result + failed_when: "'json' not in job_result" + until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + tags: + - job-tracking + + - name: "Reboot the system if the job status is pending." + uri: + url: "https://{{ baseuri }}{{ reboot_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "POST" + body_format: raw + body: '{"ResetType": "ForceRestart"}' + use_proxy: yes + status_code: 204 + return_content: no + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: reboot_result + changed_when: reboot_result.status == 204 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "View the job details to verify if the task status is completed." + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: final_result + failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK" + until: final_result.json.TaskState == 'Completed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml new file mode 100644 index 00000000..02bbc19d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml @@ -0,0 +1,89 @@ +--- +- hosts: redfish_hosts + connection: local + gather_facts: false + name: "Redfish Storage Volume - Ansible Module" + vars: + retries_count: 100 + polling_interval: 10 + reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" + + collections: + - dellemc.openmanage + + tasks: + - name: "Modify storage volume encryption settings." + redfish_storage_volume: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + state: "present" + volume_id: "Disk.Virtual.1:RAID.Slot.1-1" + encryption_types: "ControllerAssisted" + encrypted: true + register: result + tags: + - modify_volume + + - name: "View the job details to track the status of the modify storage volume encryption task" + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: job_result + failed_when: "'json' not in job_result" + until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" + tags: + - job-tracking + + - name: "Reboot the system if the job status is pending." + uri: + url: "https://{{ baseuri }}{{ reboot_uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "POST" + body_format: raw + body: '{"ResetType": "ForceRestart"}' + use_proxy: yes + status_code: 204 + return_content: no + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: reboot_result + changed_when: reboot_result.status == 204 + when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.' + + - name: "View the job details to verify if the task status is completed." + uri: + url: "https://{{ baseuri }}{{ result.task.uri }}" + user: "{{ username }}" + password: "{{ password }}" + method: "GET" + use_proxy: yes + status_code: 200, 202 + return_content: yes + validate_certs: no + force_basic_auth: yes + headers: + Content-Type: "application/json" + Accept: "application/json" + register: final_result + failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK" + until: final_result.json.TaskState == 'Completed' + retries: "{{ retries_count }}" + delay: "{{ polling_interval }}" diff --git a/ansible_collections/dellemc/openmanage/plugins/README.md b/ansible_collections/dellemc/openmanage/plugins/README.md new file mode 100644 index 00000000..e5200a2d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/README.md @@ -0,0 +1,100 @@ +# dellemc.openmanage collections Plugins Directory + +Here are the list of modules and module_utils supported by Dell. + +``` +├── doc_fragments + ├── idrac_auth_options.py + ├── network_share_options.py + ├── ome_auth_options.py + ├── omem_auth_options.py + ├── oment_auth_options.py + └── redfish_auth_options.py +├── module_utils + ├── dellemc_idrac.py + ├── idrac_redfish.py + ├── ome.py + ├── redfish.py + └── utils.py +└── modules + ├── dellemc_configure_idrac_eventing.py + ├── dellemc_configure_idrac_services.py + ├── dellemc_get_firmware_inventory.py + ├── dellemc_get_system_inventory.py + ├── dellemc_idrac_lc_attributes.py + ├── dellemc_idrac_storage_volume.py + ├── dellemc_system_lockdown_mode.py + ├── idrac_attributes.py + ├── idrac_bios.py + ├── idrac_boot.py + ├── idrac_certificates.py + ├── idrac_firmware.py + ├── idrac_firmware_info.py + ├── idrac_lifecycle_controller_job_status_info.py + ├── idrac_lifecycle_controller_jobs.py + ├── idrac_lifecycle_controller_logs.py + ├── idrac_lifecycle_controller_status_info.py + ├── idrac_network.py + ├── idrac_os_deployment.py + ├── idrac_redfish_storage_controller.py + ├── idrac_reset.py + ├── idrac_server_config_profile.py + ├── idrac_syslog.py + ├── idrac_system_info.py + ├── idrac_timezone_ntp.py + ├── idrac_user.py + ├── idrac_virtual_media.py + ├── ome_active_directory.py + ├── ome_application_alerts_smtp.py + ├── ome_application_alerts_syslog.py + ├── ome_application_certificate.py + ├── ome_application_console_preferences.py + ├── ome_application_network_address.py + ├── ome_application_network_proxy.py + ├── ome_application_network_settings.py + ├── ome_application_network_time.py + ├── ome_application_network_webserver.py + ├── ome_application_security_settings.py + ├── ome_chassis_slots.py + ├── ome_configuration_compliance_baseline.py + ├── ome_configuration_compliance_info.py + ├── ome_device_group.py + ├── ome_device_info.py + ├── ome_device_local_access_configuration.py + ├── ome_device_location.py + ├── ome_device_mgmt_network.py + ├── ome_device_network_services.py + ├── ome_device_power_settings.py + ├── ome_device_quick_deploy.py + ├── ome_devices.py + ├── ome_diagnostics.py + ├── ome_discovery.py + ├── ome_domain_user_groups.py + ├── ome_firmware.py + ├── ome_firmware_baseline.py + ├── ome_firmware_baseline_compliance_info.py + ├── ome_firmware_baseline_info.py + ├── ome_firmware_catalog.py + ├── ome_groups.py + ├── ome_identity_pool.py + ├── ome_job_info.py + ├── ome_network_port_breakout.py + ├── ome_network_vlan.py + ├── ome_network_vlan_info.py + ├── ome_powerstate.py + ├── ome_profile.py + ├── ome_server_interface_profile_info.py + ├── ome_server_interface_profiles.py + ├── ome_smart_fabric.py + ├── ome_smart_fabric_uplink.py + ├── ome_template.py + ├── ome_template_identity_pool.py + ├── ome_template_info.py + ├── ome_template_network_vlan.py + ├── ome_user.py + ├── ome_user_info.py + ├── redfish_event_subscription.py + ├── redfish_firmware.py + ├── redfish_powerstate.py + └── redfish_storage_volume.py +``` \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/__init__.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py new file mode 100644 index 00000000..5ca16d6d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + idrac_ip: + required: True + type: str + description: iDRAC IP Address. + idrac_user: + required: True + type: str + description: iDRAC username. + idrac_password: + required: True + type: str + description: iDRAC user password. + aliases: ['idrac_pwd'] + idrac_port: + type: int + description: iDRAC port. + default: 443 + validate_certs: + description: + - If C(False), the SSL certificates will not be validated. + - Configure C(False) only on personally controlled sites where self-signed certificates are used. + - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default. + type: bool + default: True + version_added: 5.0.0 + ca_path: + description: + - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + type: path + version_added: 5.0.0 + timeout: + description: The socket level timeout in seconds. + type: int + default: 30 + version_added: 5.0.0 +''' diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py new file mode 100644 index 00000000..f0ebb7e3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.0.0 +# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + share_name: + required: True + type: str + description: Network share or a local path. + share_user: + type: str + description: Network share user name. Use the format 'user@domain' or 'domain\\user' if user is part of a domain. + This option is mandatory for CIFS share. + share_password: + type: str + description: Network share user password. This option is mandatory for CIFS share. + aliases: ['share_pwd'] + share_mnt: + type: str + description: Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for network shares. +''' diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py new file mode 100644 index 00000000..b84c50d5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + hostname: + description: OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname. + type: str + required: True + username: + description: OpenManage Enterprise or OpenManage Enterprise Modular username. + type: str + required: True + password: + description: OpenManage Enterprise or OpenManage Enterprise Modular password. + type: str + required: True + port: + description: OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port. + type: int + default: 443 + validate_certs: + description: + - If C(False), the SSL certificates will not be validated. + - Configure C(False) only on personally controlled sites where self-signed certificates are used. + - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default. + type: bool + default: True + version_added: 5.0.0 + ca_path: + description: + - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + type: path + version_added: 5.0.0 + timeout: + description: The socket level timeout in seconds. + type: int + default: 30 + version_added: 5.0.0 +''' diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py new file mode 100644 index 00000000..d8c616b2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + hostname: + description: OpenManage Enterprise Modular IP address or hostname. + type: str + required: True + username: + description: OpenManage Enterprise Modular username. + type: str + required: True + password: + description: OpenManage Enterprise Modular password. + type: str + required: True + port: + description: OpenManage Enterprise Modular HTTPS port. + type: int + default: 443 + validate_certs: + description: + - If C(False), the SSL certificates will not be validated. + - Configure C(False) only on personally controlled sites where self-signed certificates are used. + - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default. + type: bool + default: True + version_added: 5.0.0 + ca_path: + description: + - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + type: path + version_added: 5.0.0 + timeout: + description: The socket level timeout in seconds. + type: int + default: 30 + version_added: 5.0.0 +''' diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py new file mode 100644 index 00000000..85b1553f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + hostname: + description: OpenManage Enterprise IP address or hostname. + type: str + required: True + username: + description: OpenManage Enterprise username. + type: str + required: True + password: + description: OpenManage Enterprise password. + type: str + required: True + port: + description: OpenManage Enterprise HTTPS port. + type: int + default: 443 + validate_certs: + description: + - If C(False), the SSL certificates will not be validated. + - Configure C(False) only on personally controlled sites where self-signed certificates are used. + - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default. + type: bool + default: True + version_added: 5.0.0 + ca_path: + description: + - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + type: path + version_added: 5.0.0 + timeout: + description: The socket level timeout in seconds. + type: int + default: 30 + version_added: 5.0.0 +''' diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py new file mode 100644 index 00000000..8eb1eda1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + baseuri: + description: "IP address of the target out-of-band controller. For example- :." + type: str + required: True + username: + description: Username of the target out-of-band controller. + type: str + required: True + password: + description: Password of the target out-of-band controller. + type: str + required: True + validate_certs: + description: + - If C(False), the SSL certificates will not be validated. + - Configure C(False) only on personally controlled sites where self-signed certificates are used. + - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default. + type: bool + default: True + version_added: 5.0.0 + ca_path: + description: + - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + type: path + version_added: 5.0.0 + timeout: + description: The socket level timeout in seconds. + type: int + default: 30 + version_added: 5.0.0 +''' diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/__init__.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py new file mode 100644 index 00000000..fee5339c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- + +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import os +try: + from omsdk.sdkinfra import sdkinfra + from omsdk.sdkcreds import UserCredentials + from omsdk.sdkfile import FileOnShare, file_share_manager + from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum + from omsdk.http.sdkwsmanbase import WsManOptions + HAS_OMSDK = True +except ImportError: + HAS_OMSDK = False + + +idrac_auth_params = { + "idrac_ip": {"required": True, "type": 'str'}, + "idrac_user": {"required": True, "type": 'str'}, + "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True}, + "idrac_port": {"required": False, "default": 443, "type": 'int'}, + "validate_certs": {"type": "bool", "default": True}, + "ca_path": {"type": "path"}, + "timeout": {"type": "int", "default": 30}, +} + + +class iDRACConnection: + + def __init__(self, module_params): + if not HAS_OMSDK: + raise ImportError("Dell EMC OMSDK library is required for this module") + self.idrac_ip = module_params['idrac_ip'] + self.idrac_user = module_params['idrac_user'] + self.idrac_pwd = module_params['idrac_password'] + self.idrac_port = module_params['idrac_port'] + if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)): + raise ValueError("hostname, username and password required") + self.handle = None + self.creds = UserCredentials(self.idrac_user, self.idrac_pwd) + self.validate_certs = module_params.get("validate_certs", False) + self.ca_path = module_params.get("ca_path") + verify_ssl = False + if self.validate_certs is True: + if self.ca_path is None: + self.ca_path = self._get_omam_ca_env() + verify_ssl = self.ca_path + timeout = module_params.get("timeout", 30) + if not timeout or type(timeout) != int: + timeout = 30 + self.pOp = WsManOptions(port=self.idrac_port, read_timeout=timeout, verify_ssl=verify_ssl) + self.sdk = sdkinfra() + if self.sdk is None: + msg = "Could not initialize iDRAC drivers." + raise RuntimeError(msg) + + def __enter__(self): + self.sdk.importPath() + protopref = ProtoPreference(ProtocolEnum.WSMAN) + protopref.include_only(ProtocolEnum.WSMAN) + self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, + protopref=protopref, pOptions=self.pOp) + if self.handle is None: + msg = "Unable to communicate with iDRAC {0}. This may be due to one of the following: " \ + "Incorrect username or password, unreachable iDRAC IP or " \ + "a failure in TLS/SSL handshake.".format(self.idrac_ip) + raise RuntimeError(msg) + return self.handle + + def __exit__(self, exc_type, exc_val, exc_tb): + self.handle.disconnect() + return False + + def _get_omam_ca_env(self): + """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or True as ssl has to + be validated from omsdk with single param and is default to false in omsdk""" + return (os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") + or os.environ.get("OMAM_CA_BUNDLE") or True) diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py new file mode 100644 index 00000000..168c8277 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py @@ -0,0 +1,377 @@ +# -*- coding: utf-8 -*- + +# Dell EMC OpenManage Ansible Modules +# Version 5.5.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import re +import time +import os +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.six.moves.urllib.parse import urlencode + +idrac_auth_params = { + "idrac_ip": {"required": True, "type": 'str'}, + "idrac_user": {"required": True, "type": 'str'}, + "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True}, + "idrac_port": {"required": False, "default": 443, "type": 'int'}, + "validate_certs": {"type": "bool", "default": True}, + "ca_path": {"type": "path"}, + "timeout": {"type": "int", "default": 30}, + +} + +SESSION_RESOURCE_COLLECTION = { + "SESSION": "/redfish/v1/Sessions", + "SESSION_ID": "/redfish/v1/Sessions/{Id}", +} +MANAGER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1" +EXPORT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Oem/EID_674_Manager.ExportSystemConfiguration" +IMPORT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Oem/EID_674_Manager.ImportSystemConfiguration" +IMPORT_PREVIEW = "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Oem/EID_674_Manager.ImportSystemConfigurationPreview" + + +class OpenURLResponse(object): + """Handles HTTPResponse""" + + def __init__(self, resp): + self.body = None + self.resp = resp + if self.resp: + self.body = self.resp.read() + + @property + def json_data(self): + try: + return json.loads(self.body) + except ValueError: + raise ValueError("Unable to parse json") + + @property + def status_code(self): + return self.resp.getcode() + + @property + def success(self): + status = self.status_code + return status >= 200 & status <= 299 + + @property + def headers(self): + return self.resp.headers + + @property + def reason(self): + return self.resp.reason + + +class iDRACRedfishAPI(object): + """REST api for iDRAC modules.""" + + def __init__(self, module_params, req_session=False): + self.ipaddress = module_params['idrac_ip'] + self.username = module_params['idrac_user'] + self.password = module_params['idrac_password'] + self.port = module_params['idrac_port'] + self.validate_certs = module_params.get("validate_certs", False) + self.ca_path = module_params.get("ca_path") + self.timeout = module_params.get("timeout", 30) + self.use_proxy = module_params.get("use_proxy", True) + self.req_session = req_session + self.session_id = None + self.protocol = 'https' + self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} + + def _get_url(self, uri): + return "{0}://{1}:{2}{3}".format(self.protocol, self.ipaddress, self.port, uri) + + def _build_url(self, path, query_param=None): + """builds complete url""" + url = path + base_uri = self._get_url(url) + if path: + url = base_uri + if query_param: + url += "?{0}".format(urlencode(query_param)) + return url + + def _url_common_args_spec(self, method, api_timeout, headers=None): + """Creates an argument common spec""" + req_header = self._headers + if headers: + req_header.update(headers) + if api_timeout is None: + api_timeout = self.timeout + if self.ca_path is None: + self.ca_path = self._get_omam_ca_env() + url_kwargs = { + "method": method, + "validate_certs": self.validate_certs, + "ca_path": self.ca_path, + "use_proxy": self.use_proxy, + "headers": req_header, + "timeout": api_timeout, + "follow_redirects": 'all', + } + return url_kwargs + + def _args_without_session(self, path, method, api_timeout, headers=None): + """Creates an argument spec in case of basic authentication""" + req_header = self._headers + if headers: + req_header.update(headers) + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + if not (path == SESSION_RESOURCE_COLLECTION["SESSION"] and method == 'POST'): + url_kwargs["url_username"] = self.username + url_kwargs["url_password"] = self.password + url_kwargs["force_basic_auth"] = True + return url_kwargs + + def _args_with_session(self, method, api_timeout, headers=None): + """Creates an argument spec, in case of authentication with session""" + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + url_kwargs["force_basic_auth"] = False + return url_kwargs + + def invoke_request(self, uri, method, data=None, query_param=None, headers=None, api_timeout=None, dump=True): + try: + if 'X-Auth-Token' in self._headers: + url_kwargs = self._args_with_session(method, api_timeout, headers=headers) + else: + url_kwargs = self._args_without_session(uri, method, api_timeout, headers=headers) + if data and dump: + data = json.dumps(data) + url = self._build_url(uri, query_param=query_param) + resp = open_url(url, data=data, **url_kwargs) + resp_data = OpenURLResponse(resp) + except (HTTPError, URLError, SSLValidationError, ConnectionError) as err: + raise err + return resp_data + + def __enter__(self): + """Creates sessions by passing it to header""" + if self.req_session: + payload = {'UserName': self.username, + 'Password': self.password} + path = SESSION_RESOURCE_COLLECTION["SESSION"] + resp = self.invoke_request(path, 'POST', data=payload) + if resp and resp.success: + self.session_id = resp.json_data.get("Id") + self._headers["X-Auth-Token"] = resp.headers.get('X-Auth-Token') + else: + msg = "Could not create the session" + raise ConnectionError(msg) + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Deletes a session id, which is in use for request""" + if self.session_id: + path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id) + self.invoke_request(path, 'DELETE') + return False + + @property + def get_server_generation(self): + """ + This method fetches the connected server generation. + :return: 14, 4.11.11.11 + """ + model, firmware_version = None, None + response = self.invoke_request(MANAGER_URI, 'GET') + if response.status_code == 200: + generation = int(re.search(r"\d+(?=G)", response.json_data["Model"]).group()) + firmware_version = response.json_data["FirmwareVersion"] + return generation, firmware_version + + def wait_for_job_complete(self, task_uri, job_wait=False): + """ + This function wait till the job completion. + :param task_uri: uri to track job. + :param job_wait: True or False decide whether to wait till the job completion. + :return: object + """ + response = None + while job_wait: + try: + response = self.invoke_request(task_uri, "GET") + if response.json_data.get("TaskState") == "Running": + time.sleep(10) + else: + break + except ValueError: + response = response.body + break + return response + + def wait_for_job_completion(self, job_uri, job_wait=False, reboot=False, apply_update=False): + """ + This function wait till the job completion. + :param job_uri: uri to track job. + :param job_wait: True or False decide whether to wait till the job completion. + :return: object + """ + time.sleep(5) + response = self.invoke_request(job_uri, "GET") + while job_wait: + response = self.invoke_request(job_uri, "GET") + if response.json_data.get("PercentComplete") == 100 and \ + response.json_data.get("JobState") == "Completed": + break + if response.json_data.get("JobState") == "Starting" and not reboot and apply_update: + break + time.sleep(30) + return response + + def export_scp(self, export_format=None, export_use=None, target=None, + job_wait=False, share=None): + """ + This method exports system configuration details from the system. + :param export_format: XML or JSON. + :param export_use: Default or Clone or Replace. + :param target: IDRAC or NIC or ALL or BIOS or RAID. + :param job_wait: True or False decide whether to wait till the job completion. + :return: exported data in requested format. + """ + payload = {"ExportFormat": export_format, "ExportUse": export_use, + "ShareParameters": {"Target": target}} + if share is None: + share = {} + if share.get("share_ip") is not None: + payload["ShareParameters"]["IPAddress"] = share["share_ip"] + if share.get("share_name") is not None and share.get("share_name"): + payload["ShareParameters"]["ShareName"] = share["share_name"] + if share.get("share_type") is not None: + payload["ShareParameters"]["ShareType"] = share["share_type"] + if share.get("file_name") is not None: + payload["ShareParameters"]["FileName"] = share["file_name"] + if share.get("username") is not None: + payload["ShareParameters"]["Username"] = share["username"] + if share.get("password") is not None: + payload["ShareParameters"]["Password"] = share["password"] + response = self.invoke_request(EXPORT_URI, "POST", data=payload) + if response.status_code == 202 and job_wait: + task_uri = response.headers["Location"] + response = self.wait_for_job_complete(task_uri, job_wait=job_wait) + return response + + def import_scp_share(self, shutdown_type=None, host_powerstate=None, job_wait=True, + target=None, import_buffer=None, share=None): + """ + This method imports system configuration using share. + :param shutdown_type: graceful + :param host_powerstate: on + :param file_name: import.xml + :param job_wait: True + :param target: iDRAC + :param share: dictionary which has all the share details. + :return: json response + """ + payload = {"ShutdownType": shutdown_type, "EndHostPowerState": host_powerstate, + "ShareParameters": {"Target": target}} + if import_buffer is not None: + payload["ImportBuffer"] = import_buffer + if share is None: + share = {} + if share.get("share_ip") is not None: + payload["ShareParameters"]["IPAddress"] = share["share_ip"] + if share.get("share_name") is not None and share.get("share_name"): + payload["ShareParameters"]["ShareName"] = share["share_name"] + if share.get("share_type") is not None: + payload["ShareParameters"]["ShareType"] = share["share_type"] + if share.get("file_name") is not None: + payload["ShareParameters"]["FileName"] = share["file_name"] + if share.get("username") is not None: + payload["ShareParameters"]["Username"] = share["username"] + if share.get("password") is not None: + payload["ShareParameters"]["Password"] = share["password"] + response = self.invoke_request(IMPORT_URI, "POST", data=payload) + if response.status_code == 202 and job_wait: + task_uri = response.headers["Location"] + response = self.wait_for_job_complete(task_uri, job_wait=job_wait) + return response + + def import_preview(self, import_buffer=None, target=None, share=None, job_wait=False): + payload = {"ShareParameters": {"Target": target}} + if import_buffer is not None: + payload["ImportBuffer"] = import_buffer + if share is None: + share = {} + if share.get("share_ip") is not None: + payload["ShareParameters"]["IPAddress"] = share["share_ip"] + if share.get("share_name") is not None and share.get("share_name"): + payload["ShareParameters"]["ShareName"] = share["share_name"] + if share.get("share_type") is not None: + payload["ShareParameters"]["ShareType"] = share["share_type"] + if share.get("file_name") is not None: + payload["ShareParameters"]["FileName"] = share["file_name"] + if share.get("username") is not None: + payload["ShareParameters"]["Username"] = share["username"] + if share.get("password") is not None: + payload["ShareParameters"]["Password"] = share["password"] + response = self.invoke_request(IMPORT_PREVIEW, "POST", data=payload) + if response.status_code == 202 and job_wait: + task_uri = response.headers["Location"] + response = self.wait_for_job_complete(task_uri, job_wait=job_wait) + return response + + def import_scp(self, import_buffer=None, target=None, job_wait=False): + """ + This method imports system configuration details to the system. + :param import_buffer: import buffer payload content xml or json format + :param target: IDRAC or NIC or ALL or BIOS or RAID. + :param job_wait: True or False decide whether to wait till the job completion. + :return: json response + """ + payload = {"ImportBuffer": import_buffer, "ShareParameters": {"Target": target}} + response = self.invoke_request(IMPORT_URI, "POST", data=payload) + if response.status_code == 202 and job_wait: + task_uri = response.headers["Location"] + response = self.wait_for_job_complete(task_uri, job_wait=job_wait) + return response + + def get_idrac_local_account_attr(self, idrac_attribues, fqdd=None): + """ + This method filtered from all the user attributes from the given idrac attributes. + :param idrac_attribues: all the idrac attribues in json data format. + :return: user attributes in dictionary format + """ + user_attr = None + if "SystemConfiguration" in idrac_attribues: + sys_config = idrac_attribues.get("SystemConfiguration") + for comp in sys_config.get("Components"): + if comp.get("FQDD") == fqdd: + attributes = comp.get("Attributes") + break + user_attr = dict([(attr["Name"], attr["Value"]) for attr in attributes if attr["Name"].startswith("Users.")]) + return user_attr + + def _get_omam_ca_env(self): + """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or returns None""" + return os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or os.environ.get("OMAM_CA_BUNDLE") diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py new file mode 100644 index 00000000..cdb5ddf2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py @@ -0,0 +1,399 @@ +# -*- coding: utf-8 -*- + +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import os +import time +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.six.moves.urllib.parse import urlencode + +ome_auth_params = { + "hostname": {"required": True, "type": "str"}, + "username": {"required": True, "type": "str"}, + "password": {"required": True, "type": "str", "no_log": True}, + "port": {"type": "int", "default": 443}, + "validate_certs": {"type": "bool", "default": True}, + "ca_path": {"type": "path"}, + "timeout": {"type": "int", "default": 30}, +} + +SESSION_RESOURCE_COLLECTION = { + "SESSION": "SessionService/Sessions", + "SESSION_ID": "SessionService/Sessions('{Id}')", +} + +JOB_URI = "JobService/Jobs({job_id})" +JOB_SERVICE_URI = "JobService/Jobs" + + +class OpenURLResponse(object): + """Handles HTTPResponse""" + + def __init__(self, resp): + self.body = None + self.resp = resp + if self.resp: + self.body = self.resp.read() + + @property + def json_data(self): + try: + return json.loads(self.body) + except ValueError: + raise ValueError("Unable to parse json") + + @property + def status_code(self): + return self.resp.getcode() + + @property + def success(self): + return self.status_code in (200, 201, 202, 204) + + @property + def token_header(self): + return self.resp.headers.get('X-Auth-Token') + + +class RestOME(object): + """Handles OME API requests""" + + def __init__(self, module_params=None, req_session=False): + self.module_params = module_params + self.hostname = self.module_params["hostname"] + self.username = self.module_params["username"] + self.password = self.module_params["password"] + self.port = self.module_params["port"] + self.validate_certs = self.module_params.get("validate_certs", True) + self.ca_path = self.module_params.get("ca_path") + self.timeout = self.module_params.get("timeout", 30) + self.req_session = req_session + self.session_id = None + self.protocol = 'https' + self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} + + def _get_base_url(self): + """builds base url""" + return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port) + + def _build_url(self, path, query_param=None): + """builds complete url""" + url = path + base_uri = self._get_base_url() + if path: + url = '{0}/{1}'.format(base_uri, path) + if query_param: + """Ome filtering does not work as expected when '+' is passed, + urlencode will encode spaces as '+' so replace it to '%20'""" + url += "?{0}".format(urlencode(query_param).replace('+', '%20')) + return url + + def _url_common_args_spec(self, method, api_timeout, headers=None): + """Creates an argument common spec""" + req_header = self._headers + if headers: + req_header.update(headers) + if api_timeout is None: + api_timeout = self.timeout + if self.ca_path is None: + self.ca_path = self._get_omam_ca_env() + url_kwargs = { + "method": method, + "validate_certs": self.validate_certs, + "ca_path": self.ca_path, + "use_proxy": True, + "headers": req_header, + "timeout": api_timeout, + "follow_redirects": 'all', + } + return url_kwargs + + def _args_without_session(self, method, api_timeout, headers=None): + """Creates an argument spec in case of basic authentication""" + req_header = self._headers + if headers: + req_header.update(headers) + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + url_kwargs["url_username"] = self.username + url_kwargs["url_password"] = self.password + url_kwargs["force_basic_auth"] = True + return url_kwargs + + def _args_with_session(self, method, api_timeout, headers=None): + """Creates an argument spec, in case of authentication with session""" + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + url_kwargs["force_basic_auth"] = False + return url_kwargs + + def invoke_request(self, method, path, data=None, query_param=None, headers=None, + api_timeout=None, dump=True): + """ + Sends a request through open_url + Returns :class:`OpenURLResponse` object. + :arg method: HTTP verb to use for the request + :arg path: path to request without query parameter + :arg data: (optional) Payload to send with the request + :arg query_param: (optional) Dictionary of query parameter to send with request + :arg headers: (optional) Dictionary of HTTP Headers to send with the + request + :arg api_timeout: (optional) How long to wait for the server to send + data before giving up + :arg dump: (Optional) boolean value for dumping payload data. + :returns: OpenURLResponse + """ + try: + if 'X-Auth-Token' in self._headers: + url_kwargs = self._args_with_session(method, api_timeout, headers=headers) + else: + url_kwargs = self._args_without_session(method, api_timeout, headers=headers) + if data and dump: + data = json.dumps(data) + url = self._build_url(path, query_param=query_param) + resp = open_url(url, data=data, **url_kwargs) + resp_data = OpenURLResponse(resp) + except (HTTPError, URLError, SSLValidationError, ConnectionError) as err: + raise err + return resp_data + + def __enter__(self): + """Creates sessions by passing it to header""" + if self.req_session: + payload = {'UserName': self.username, + 'Password': self.password, + 'SessionType': 'API', } + path = SESSION_RESOURCE_COLLECTION["SESSION"] + resp = self.invoke_request('POST', path, data=payload) + if resp and resp.success: + self.session_id = resp.json_data.get("Id") + self._headers["X-Auth-Token"] = resp.token_header + else: + msg = "Could not create the session" + raise ConnectionError(msg) + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Deletes a session id, which is in use for request""" + if self.session_id: + path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id) + self.invoke_request('DELETE', path) + return False + + def get_all_report_details(self, uri): + """ + This implementation mainly dependent on '@odata.count' value. + Currently first request without query string, always returns total number of available + reports in '@odata.count'. + """ + try: + resp = self.invoke_request('GET', uri) + data = resp.json_data + report_list = data["value"] + total_count = data['@odata.count'] + remaining_count = total_count - len(report_list) + first_page_count = len(report_list) + while remaining_count > 0: + resp = self.invoke_request('GET', uri, + query_param={"$top": first_page_count, "$skip": len(report_list)}) + data = resp.json_data + value = data["value"] + report_list.extend(value) + remaining_count = remaining_count - len(value) + return {"resp_obj": resp, "report_list": report_list} + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + def get_job_type_id(self, jobtype_name): + """This provides an ID of the job type.""" + job_type_id = None + resp = self.invoke_request('GET', "JobService/JobTypes") + data = resp.json_data["value"] + for each in data: + if each["Name"] == jobtype_name: + job_type_id = each["Id"] + break + return job_type_id + + def get_device_id_from_service_tag(self, service_tag): + """ + :param service_tag: service tag of the device + :return: dict + Id: int: device id + value: dict: device id details + not_found_msg: str: message if service tag not found + """ + device_id = None + query = "DeviceServiceTag eq '{0}'".format(service_tag) + response = self.invoke_request("GET", "DeviceService/Devices", query_param={"$filter": query}) + value = response.json_data.get("value", []) + device_info = {} + if value: + device_info = value[0] + device_id = device_info["Id"] + return {"Id": device_id, "value": device_info} + + def get_all_items_with_pagination(self, uri): + """ + This implementation mainly to get all available items from ome for pagination + supported GET uri + :param uri: uri which supports pagination + :return: dict. + """ + try: + resp = self.invoke_request('GET', uri) + data = resp.json_data + total_items = data.get("value", []) + total_count = data.get('@odata.count', 0) + next_link = data.get('@odata.nextLink', '') + while next_link: + resp = self.invoke_request('GET', next_link.split('/api')[-1]) + data = resp.json_data + value = data["value"] + next_link = data.get('@odata.nextLink', '') + total_items.extend(value) + return {"total_count": total_count, "value": total_items} + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + def get_device_type(self): + """ + Returns device type map where as key is type and value is type name + eg: {1000: "SERVER", 2000: "CHASSIS", 4000: "NETWORK_IOM", "8000": "STORAGE_IOM", 3000: "STORAGE"} + :return: dict, first item dict gives device type map + """ + device_map = {} + response = self.invoke_request("GET", "DeviceService/DeviceType") + if response.json_data.get("value"): + device_map = dict([(item["DeviceType"], item["Name"]) for item in response.json_data["value"]]) + return device_map + + def get_job_info(self, job_id): + try: + job_status_map = { + 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "Completed", + 2070: "Failed", 2090: "Warning", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped", + 2103: "Canceled" + } + failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103] + job_url = JOB_URI.format(job_id=job_id) + job_resp = self.invoke_request('GET', job_url) + job_dict = job_resp.json_data + job_status = job_dict['LastRunStatus']['Id'] + if job_status in [2060, 2020]: + job_failed = False + message = "Job {0} successfully.".format(job_status_map[job_status]) + exit_poll = True + return exit_poll, job_failed, message + elif job_status in failed_job_status: + exit_poll = True + job_failed = True + message = "Job is in {0} state, and is not completed.".format(job_status_map[job_status]) + return exit_poll, job_failed, message + return False, False, None + except HTTPError: + job_failed = True + message = "Unable to track the job status of {0}.".format(job_id) + exit_poll = True + return exit_poll, job_failed, message + + def job_tracking(self, job_id, job_wait_sec=600, sleep_time=60): + """ + job_id: job id + job_wait_sec: Maximum time to wait to fetch the final job details in seconds + sleep_time: Maximum time to sleep in seconds in each job details fetch + """ + max_sleep_time = job_wait_sec + sleep_interval = sleep_time + while max_sleep_time: + if max_sleep_time > sleep_interval: + max_sleep_time = max_sleep_time - sleep_interval + else: + sleep_interval = max_sleep_time + max_sleep_time = 0 + time.sleep(sleep_interval) + exit_poll, job_failed, job_message = self.get_job_info(job_id) + if exit_poll is True: + return job_failed, job_message + return True, "The job is not complete after {0} seconds.".format(job_wait_sec) + + def strip_substr_dict(self, odata_dict, chkstr='@odata.'): + cp = odata_dict.copy() + klist = cp.keys() + for k in klist: + if chkstr in str(k).lower(): + odata_dict.pop(k) + return odata_dict + + def job_submission(self, job_name, job_desc, targets, params, job_type, + schedule="startnow", state="Enabled"): + job_payload = {"JobName": job_name, "JobDescription": job_desc, + "Schedule": schedule, "State": state, "Targets": targets, + "Params": params, "JobType": job_type} + response = self.invoke_request("POST", JOB_SERVICE_URI, data=job_payload) + return response + + def test_network_connection(self, share_address, share_path, share_type, + share_user=None, share_password=None, share_domain=None): + job_type = {"Id": 56, "Name": "ValidateNWFileShare_Task"} + params = [ + {"Key": "checkPathOnly", "Value": "false"}, + {"Key": "shareType", "Value": share_type}, + {"Key": "ShareNetworkFilePath", "Value": share_path}, + {"Key": "shareAddress", "Value": share_address}, + {"Key": "testShareWriteAccess", "Value": "true"} + ] + if share_user is not None: + params.append({"Key": "UserName", "Value": share_user}) + if share_password is not None: + params.append({"Key": "Password", "Value": share_password}) + if share_domain is not None: + params.append({"Key": "domainName", "Value": share_domain}) + job_response = self.job_submission("Validate Share", "Validate Share", [], params, job_type) + return job_response + + def check_existing_job_state(self, job_type_name): + query_param = {"$filter": "LastRunStatus/Id eq 2030 or LastRunStatus/Id eq 2040 or LastRunStatus/Id eq 2050"} + job_resp = self.invoke_request("GET", JOB_SERVICE_URI, query_param=query_param) + job_lst = job_resp.json_data["value"] if job_resp.json_data.get("value") is not None else [] + for job in job_lst: + if job["JobType"]["Name"] == job_type_name: + job_allowed = False + available_jobs = job + break + else: + job_allowed = True + available_jobs = job_lst + return job_allowed, available_jobs + + def _get_omam_ca_env(self): + """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or returns None""" + return os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or os.environ.get("OMAM_CA_BUNDLE") diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py new file mode 100644 index 00000000..59c46705 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- + +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.six.moves.urllib.parse import urlencode + +redfish_auth_params = { + "baseuri": {"required": True, "type": "str"}, + "username": {"required": True, "type": "str"}, + "password": {"required": True, "type": "str", "no_log": True}, + "validate_certs": {"type": "bool", "default": True}, + "ca_path": {"type": "path"}, + "timeout": {"type": "int", "default": 30}, +} + +SESSION_RESOURCE_COLLECTION = { + "SESSION": "/redfish/v1/Sessions", + "SESSION_ID": "/redfish/v1/Sessions/{Id}", +} + + +class OpenURLResponse(object): + """Handles HTTPResponse""" + + def __init__(self, resp): + self.body = None + self.resp = resp + if self.resp: + self.body = self.resp.read() + + @property + def json_data(self): + try: + return json.loads(self.body) + except ValueError: + raise ValueError("Unable to parse json") + + @property + def status_code(self): + return self.resp.getcode() + + @property + def success(self): + status = self.status_code + return status >= 200 & status <= 299 + + @property + def headers(self): + return self.resp.headers + + @property + def reason(self): + return self.resp.reason + + +class Redfish(object): + """Handles iDRAC Redfish API requests""" + + def __init__(self, module_params=None, req_session=False): + self.module_params = module_params + self.hostname = self.module_params["baseuri"] + self.username = self.module_params["username"] + self.password = self.module_params["password"] + self.validate_certs = self.module_params.get("validate_certs", True) + self.ca_path = self.module_params.get("ca_path") + self.timeout = self.module_params.get("timeout", 30) + self.use_proxy = self.module_params.get("use_proxy", True) + self.req_session = req_session + self.session_id = None + self.protocol = 'https' + self.root_uri = '/redfish/v1/' + self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} + + def _get_base_url(self): + """builds base url""" + return '{0}://{1}'.format(self.protocol, self.hostname) + + def _build_url(self, path, query_param=None): + """builds complete url""" + url = path + base_uri = self._get_base_url() + if path: + url = base_uri + path + if query_param: + url += "?{0}".format(urlencode(query_param)) + return url + + def _url_common_args_spec(self, method, api_timeout, headers=None): + """Creates an argument common spec""" + req_header = self._headers + if headers: + req_header.update(headers) + if api_timeout is None: + api_timeout = self.timeout + if self.ca_path is None: + self.ca_path = self._get_omam_ca_env() + url_kwargs = { + "method": method, + "validate_certs": self.validate_certs, + "ca_path": self.ca_path, + "use_proxy": self.use_proxy, + "headers": req_header, + "timeout": api_timeout, + "follow_redirects": 'all', + } + return url_kwargs + + def _args_without_session(self, path, method, api_timeout, headers=None): + """Creates an argument spec in case of basic authentication""" + req_header = self._headers + if headers: + req_header.update(headers) + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + if not (path == SESSION_RESOURCE_COLLECTION["SESSION"] and method == 'POST'): + url_kwargs["url_username"] = self.username + url_kwargs["url_password"] = self.password + url_kwargs["force_basic_auth"] = True + return url_kwargs + + def _args_with_session(self, method, api_timeout, headers=None): + """Creates an argument spec, in case of authentication with session""" + url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) + url_kwargs["force_basic_auth"] = False + return url_kwargs + + def invoke_request(self, method, path, data=None, query_param=None, headers=None, + api_timeout=None, dump=True): + """ + Sends a request through open_url + Returns :class:`OpenURLResponse` object. + :arg method: HTTP verb to use for the request + :arg path: path to request without query parameter + :arg data: (optional) Payload to send with the request + :arg query_param: (optional) Dictionary of query parameter to send with request + :arg headers: (optional) Dictionary of HTTP Headers to send with the + request + :arg api_timeout: (optional) How long to wait for the server to send + data before giving up + :arg dump: (Optional) boolean value for dumping payload data. + :returns: OpenURLResponse + """ + try: + if 'X-Auth-Token' in self._headers: + url_kwargs = self._args_with_session(method, api_timeout, headers=headers) + else: + url_kwargs = self._args_without_session(path, method, api_timeout, headers=headers) + if data and dump: + data = json.dumps(data) + url = self._build_url(path, query_param=query_param) + resp = open_url(url, data=data, **url_kwargs) + resp_data = OpenURLResponse(resp) + except (HTTPError, URLError, SSLValidationError, ConnectionError) as err: + raise err + return resp_data + + def __enter__(self): + """Creates sessions by passing it to header""" + if self.req_session: + payload = {'UserName': self.username, + 'Password': self.password} + path = SESSION_RESOURCE_COLLECTION["SESSION"] + resp = self.invoke_request('POST', path, data=payload) + if resp and resp.success: + self.session_id = resp.json_data.get("Id") + self._headers["X-Auth-Token"] = resp.headers.get('X-Auth-Token') + else: + msg = "Could not create the session" + raise ConnectionError(msg) + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Deletes a session id, which is in use for request""" + if self.session_id: + path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id) + self.invoke_request('DELETE', path) + return False + + def strip_substr_dict(self, odata_dict, chkstr='@odata.'): + cp = odata_dict.copy() + klist = cp.keys() + for k in klist: + if chkstr in str(k).lower(): + odata_dict.pop(k) + return odata_dict + + def _get_omam_ca_env(self): + """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or returns None""" + return os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or os.environ.get("OMAM_CA_BUNDLE") diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py new file mode 100644 index 00000000..d0da26e5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- + +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +CHANGES_MSG = "Changes found to be applied." +NO_CHANGES_MSG = "No changes found to be applied." +RESET_UNTRACK = "iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply." +RESET_SUCCESS = "iDRAC has been reset successfully." +RESET_FAIL = "Unable to reset the iDRAC. For changes to reflect, manually reset the iDRAC." +SYSTEM_ID = "System.Embedded.1" +MANAGER_ID = "iDRAC.Embedded.1" +SYSTEMS_URI = "/redfish/v1/Systems" +MANAGERS_URI = "/redfish/v1/Managers" +IDRAC_RESET_URI = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset" +SYSTEM_RESET_URI = "/redfish/v1/Systems/{res_id}/Actions/ComputerSystem.Reset" +MANAGER_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)" +MANAGER_JOB_ID_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{0}" + + +import time +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + + +def strip_substr_dict(odata_dict, chkstr='@odata.', case_sensitive=False): + ''' + :param odata_dict: the dict to be stripped of unwanted keys + :param chkstr: the substring to be checked among the keys + :param case_sensitive: should the match be case sensitive or not + :return: dict + ''' + cp = odata_dict.copy() + klist = cp.keys() + if not case_sensitive: + chkstr = chkstr.lower() + for k in klist: + if case_sensitive: + lk = k + else: + lk = str(k).lower() + if chkstr in lk: + odata_dict.pop(k, None) + return odata_dict + + +def job_tracking(rest_obj, job_uri, max_job_wait_sec=600, job_state_var=('LastRunStatus', 'Id'), + job_complete_states=(2060, 2020, 2090), job_fail_states=(2070, 2101, 2102, 2103), + job_running_states=(2050, 2040, 2030, 2100), + sleep_interval_secs=10, max_unresponsive_wait=30, initial_wait=1): + ''' + :param rest_obj: the rest_obj either of the below + ansible_collections.dellemc.openmanage.plugins.module_utils.ome.RestOME + :param job_uri: the uri to fetch the job response dict + :param max_job_wait_sec: max time the job will wait + :param job_state_var: The nested dict traversal path + :param job_complete_states: + :param job_fail_states: + :param job_running_states: + :param sleep_interval_secs: + :param max_unresponsive_wait: + :param initial_wait: + :return: + ''' + # ome_job_status_map = { + # 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully", + # 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped", + # 2103: "Canceled" + # } + # ensure job states are mutually exclusive + max_retries = max_job_wait_sec // sleep_interval_secs + unresp = max_unresponsive_wait // sleep_interval_secs + loop_ctr = 0 + job_failed = True + job_dict = {} + wait_time = 0 + if set(job_complete_states) & set(job_fail_states): + return job_failed, "Overlapping job states found.", job_dict, wait_time + msg = "Job tracking started." + time.sleep(initial_wait) + while loop_ctr < max_retries: + loop_ctr += 1 + try: + job_resp = rest_obj.invoke_request('GET', job_uri) + job_dict = job_resp.json_data + job_status = job_dict + for x in job_state_var: + job_status = job_status.get(x, {}) + if job_status in job_complete_states: + job_failed = False + msg = "Job tracking completed." + loop_ctr = max_retries + elif job_status in job_fail_states: + job_failed = True + msg = "Job is in Failed state." + loop_ctr = max_retries + if job_running_states: + if job_status in job_running_states: + time.sleep(sleep_interval_secs) + wait_time = wait_time + sleep_interval_secs + else: + time.sleep(sleep_interval_secs) + wait_time = wait_time + sleep_interval_secs + except Exception as err: + if unresp: + time.sleep(sleep_interval_secs) + wait_time = wait_time + sleep_interval_secs + else: + job_failed = True + msg = "Exception in job tracking " + str(err) + break + unresp = unresp - 1 + return job_failed, msg, job_dict, wait_time + + +def idrac_redfish_job_tracking( + rest_obj, job_uri, max_job_wait_sec=600, job_state_var='JobState', + job_complete_states=("Completed", "Downloaded", "CompletedWithErrors", "RebootCompleted"), + job_fail_states=("Failed", "RebootFailed", "Unknown"), + job_running_states=("Running", "RebootPending", "Scheduling", "Scheduled", "Downloading", "Waiting", "Paused", + "New", "PendingActivation", "ReadyForExecution"), + sleep_interval_secs=10, max_unresponsive_wait=30, initial_wait=1): + # idrac_redfish_job_sates = [ "New", "Scheduled", "Running", "Completed", "Downloading", "Downloaded", + # "Scheduling", "ReadyForExecution", "Waiting", "Paused", "Failed", "CompletedWithErrors", "RebootPending", + # "RebootFailed", "RebootCompleted", "PendingActivation", "Unknown"] + max_retries = max_job_wait_sec // sleep_interval_secs + unresp = max_unresponsive_wait // sleep_interval_secs + loop_ctr = 0 + job_failed = True + job_dict = {} + wait_time = 0 + if set(job_complete_states) & set(job_fail_states): + return job_failed, "Overlapping job states found.", job_dict, wait_time + msg = "Job tracking started." + time.sleep(initial_wait) + while loop_ctr < max_retries: + loop_ctr += 1 + try: + job_resp = rest_obj.invoke_request(job_uri, 'GET') + job_dict = job_resp.json_data + job_status = job_dict + job_status = job_status.get(job_state_var, "Unknown") + if job_status in job_running_states: + time.sleep(sleep_interval_secs) + wait_time = wait_time + sleep_interval_secs + elif job_status in job_complete_states: + job_failed = False + msg = "Job tracking completed." + loop_ctr = max_retries + elif job_status in job_fail_states: + job_failed = True + msg = "Job is in {0} state.".format(job_status) + loop_ctr = max_retries + else: # unrecognised states, just wait + time.sleep(sleep_interval_secs) + wait_time = wait_time + sleep_interval_secs + except Exception as err: + if unresp: + time.sleep(sleep_interval_secs) + wait_time = wait_time + sleep_interval_secs + else: + job_failed = True + msg = "Exception in job tracking " + str(err) + break + unresp = unresp - 1 + return job_failed, msg, job_dict, wait_time + + +def get_rest_items(rest_obj, uri="DeviceService/Devices", key="Id", value="Identifier", selector="value"): + item_dict = {} + resp = rest_obj.get_all_items_with_pagination(uri) + if resp.get(selector): + item_dict = dict((item.get(key), item.get(value)) for item in resp[selector]) + return item_dict + + +def get_item_and_list(rest_obj, name, uri, key='Name', value='value'): + resp = rest_obj.invoke_request('GET', uri) + tlist = [] + if resp.success and resp.json_data.get(value): + tlist = resp.json_data.get(value, []) + for xtype in tlist: + if xtype.get(key, "") == name: + return xtype, tlist + return {}, tlist + + +def apply_diff_key(src, dest, klist): + diff_cnt = 0 + for k in klist: + v = src.get(k) + if v is not None and v != dest.get(k): + dest[k] = v + diff_cnt = diff_cnt + 1 + return diff_cnt + + +def wait_for_job_completion(redfish_obj, uri, job_wait=True, wait_timeout=120, sleep_time=10): + max_sleep_time = wait_timeout + sleep_interval = sleep_time + if job_wait: + while max_sleep_time: + if max_sleep_time > sleep_interval: + max_sleep_time = max_sleep_time - sleep_interval + else: + sleep_interval = max_sleep_time + max_sleep_time = 0 + time.sleep(sleep_interval) + job_resp = redfish_obj.invoke_request("GET", uri) + if job_resp.json_data.get("PercentComplete") == 100: + time.sleep(10) + return job_resp, "" + else: + job_resp = redfish_obj.invoke_request("GET", uri) + time.sleep(10) + return job_resp, "" + return {}, "The job is not complete after {0} seconds.".format(wait_timeout) + + +def wait_after_idrac_reset(idrac, wait_time_sec, interval=30): + time.sleep(interval // 2) + msg = RESET_UNTRACK + wait = wait_time_sec + track_failed = True + while wait > 0: + try: + idrac.invoke_request(MANAGERS_URI, 'GET') + time.sleep(interval // 2) + msg = RESET_SUCCESS + track_failed = False + break + except Exception: + time.sleep(interval) + wait = wait - interval + return track_failed, msg + + +# Can this be in idrac_redfish??? +def reset_idrac(idrac_restobj, wait_time_sec=300, res_id=MANAGER_ID, interval=30): + track_failed = True + reset_msg = "iDRAC reset triggered successfully." + try: + resp = idrac_restobj.invoke_request(IDRAC_RESET_URI.format(res_id=res_id), 'POST', + data={"ResetType": "GracefulRestart"}) + if wait_time_sec: + track_failed, reset_msg = wait_after_idrac_reset(idrac_restobj, wait_time_sec, interval) + reset = True + except Exception: + reset = False + reset_msg = RESET_FAIL + return reset, track_failed, reset_msg + + +def get_manager_res_id(idrac): + try: + resp = idrac.invoke_request(MANAGERS_URI, "GET") + membs = resp.json_data.get("Members") + res_uri = membs[0].get('@odata.id') + res_id = res_uri.split("/")[-1] + except HTTPError: + res_id = MANAGER_ID + return res_id + + +def wait_for_idrac_job_completion(idrac, uri, job_wait=True, wait_timeout=120, sleep_time=10): + max_sleep_time = wait_timeout + sleep_interval = sleep_time + job_msg = "The job is not complete after {0} seconds.".format(wait_timeout) + if job_wait: + while max_sleep_time: + if max_sleep_time > sleep_interval: + max_sleep_time = max_sleep_time - sleep_interval + else: + sleep_interval = max_sleep_time + max_sleep_time = 0 + time.sleep(sleep_interval) + job_resp = idrac.invoke_request(uri, "GET") + if job_resp.json_data.get("PercentComplete") == 100: + time.sleep(10) + return job_resp, "" + if job_resp.json_data.get("JobState") == "RebootFailed": + time.sleep(10) + return job_resp, job_msg + else: + job_resp = idrac.invoke_request(uri, "GET") + time.sleep(10) + return job_resp, "" + return {}, "The job is not complete after {0} seconds.".format(wait_timeout) + + +def idrac_system_reset(idrac, res_id, payload=None, job_wait=True, wait_time_sec=300, interval=30): + track_failed, reset, job_resp = True, False, {} + reset_msg = RESET_UNTRACK + try: + idrac.invoke_request(SYSTEM_RESET_URI.format(res_id=res_id), 'POST', data=payload) + time.sleep(10) + if wait_time_sec: + resp = idrac.invoke_request(MANAGER_JOB_URI, "GET") + job = list(filter(lambda d: d["JobState"] in ["RebootPending"], resp.json_data["Members"])) + if job: + job_resp, msg = wait_for_idrac_job_completion(idrac, MANAGER_JOB_ID_URI.format(job[0]["Id"]), + job_wait=job_wait, wait_timeout=wait_time_sec) + if "job is not complete" in msg: + reset, reset_msg = False, msg + if not msg: + reset = True + except Exception: + reset = False + reset_msg = RESET_FAIL + return reset, track_failed, reset_msg, job_resp + + +def get_system_res_id(idrac): + res_id = SYSTEM_ID + error_msg = "" + try: + resp = idrac.invoke_request(SYSTEMS_URI, "GET") + except HTTPError: + error_msg = "Unable to complete the request because the resource URI " \ + "does not exist or is not implemented." + else: + member = resp.json_data.get("Members") + res_uri = member[0].get('@odata.id') + res_id = res_uri.split("/")[-1] + return res_id, error_msg diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/__init__.py b/ansible_collections/dellemc/openmanage/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py new file mode 100644 index 00000000..945fd90e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: dellemc_configure_idrac_eventing +short_description: Configures the iDRAC eventing related attributes +version_added: "1.0.0" +deprecated: + removed_at_date: "2024-07-31" + why: Replaced with M(dellemc.openmanage.idrac_attributes). + alternative: Use M(dellemc.openmanage.idrac_attributes) instead. + removed_from_collection: dellemc.openmanage +description: + - This module allows to configure the iDRAC eventing related attributes. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + type: str + description: + - (deprecated)Network share or a local path. + - This option is deprecated and will be removed in the later version. + share_user: + type: str + description: + - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + share_password: + type: str + description: + - (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + aliases: ['share_pwd'] + share_mnt: + type: str + description: + - (deprecated)Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for Network Share. + - This option is deprecated and will be removed in the later version. + destination_number: + type: int + description: Destination number for SNMP Trap. + destination: + type: str + description: Destination for SNMP Trap. + snmp_v3_username: + type: str + description: SNMP v3 username for SNMP Trap. + snmp_trap_state: + type: str + description: Whether to Enable or Disable SNMP alert. + choices: [Enabled, Disabled] + email_alert_state: + type: str + description: Whether to Enable or Disable Email alert. + choices: [Enabled, Disabled] + alert_number: + type: int + description: Alert number for Email configuration. + address: + type: str + description: Email address for SNMP Trap. + custom_message: + type: str + description: Custom message for SNMP Trap reference. + enable_alerts: + type: str + description: Whether to Enable or Disable iDRAC alerts. + choices: [Enabled, Disabled] + authentication: + type: str + description: Simple Mail Transfer Protocol Authentication. + choices: [Enabled, Disabled] + smtp_ip_address: + type: str + description: SMTP IP address for communication. + smtp_port: + type: str + description: SMTP Port number for access. + username: + type: str + description: Username for SMTP authentication. + password: + type: str + description: Password for SMTP authentication. +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Configure the iDRAC eventing attributes + dellemc.openmanage.dellemc_configure_idrac_eventing: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination_number: "2" + destination: "1.1.1.1" + snmp_v3_username: "None" + snmp_trap_state: "Enabled" + email_alert_state: "Disabled" + alert_number: "1" + address: "alert_email@company.com" + custom_message: "Custom Message" + enable_alerts: "Disabled" + authentication: "Enabled" + smtp_ip_address: "192.168.0.1" + smtp_port: "25" + username: "username" + password: "password" +""" + +RETURN = r''' +--- +msg: + description: Successfully configured the iDRAC eventing settings. + returned: always + type: str + sample: Successfully configured the iDRAC eventing settings. +eventing_status: + description: Configures the iDRAC eventing attributes. + returned: success + type: dict + sample: { + "CompletionTime": "2020-04-02T02:43:28", + "Description": "Job Instance", + "EndTime": null, + "Id": "JID_12345123456", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageId": "SYS053", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import os +import tempfile +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +try: + from omdrivers.enums.iDRAC.iDRAC import (State_SNMPAlertTypes, Enable_EmailAlertTypes, + AlertEnable_IPMILanTypes, + SMTPAuthentication_RemoteHostsTypes) + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +def run_idrac_eventing_config(idrac, module): + """ + Get Lifecycle Controller status + + Keyword arguments: + idrac -- iDRAC handle + module -- Ansible module + """ + idrac.use_redfish = True + share_path = tempfile.gettempdir() + os.sep + upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True) + if not upd_share.IsValid: + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + set_liason = idrac.config_mgr.set_liason_share(upd_share) + if set_liason['Status'] == "Failed": + try: + message = set_liason['Data']['Message'] + except (IndexError, KeyError): + message = set_liason['Message'] + module.fail_json(msg=message) + + if module.params["destination_number"] is not None: + if module.params["destination"] is not None: + idrac.config_mgr.configure_snmp_trap_destination( + destination=module.params["destination"], + destination_number=module.params["destination_number"] + ) + if module.params["snmp_v3_username"] is not None: + idrac.config_mgr.configure_snmp_trap_destination( + snmp_v3_username=module.params["snmp_v3_username"], + destination_number=module.params["destination_number"] + ) + if module.params["snmp_trap_state"] is not None: + idrac.config_mgr.configure_snmp_trap_destination( + state=State_SNMPAlertTypes[module.params["snmp_trap_state"]], + destination_number=module.params["destination_number"] + ) + + if module.params["alert_number"] is not None: + if module.params["email_alert_state"] is not None: + idrac.config_mgr.configure_email_alerts( + state=Enable_EmailAlertTypes[module.params["email_alert_state"]], + alert_number=module.params["alert_number"] + ) + if module.params["address"] is not None: + idrac.config_mgr.configure_email_alerts( + address=module.params["address"], + alert_number=module.params["alert_number"] + ) + if module.params["custom_message"] is not None: + idrac.config_mgr.configure_email_alerts( + custom_message=module.params["custom_message"], + alert_number=module.params["alert_number"] + ) + + if module.params["enable_alerts"] is not None: + idrac.config_mgr.configure_idrac_alerts( + enable_alerts=AlertEnable_IPMILanTypes[module.params["enable_alerts"]], + ) + + if module.params['authentication'] is not None: + idrac.config_mgr.configure_smtp_server_settings( + authentication=SMTPAuthentication_RemoteHostsTypes[module.params['authentication']]) + if module.params['smtp_ip_address'] is not None: + idrac.config_mgr.configure_smtp_server_settings( + smtp_ip_address=module.params['smtp_ip_address']) + if module.params['smtp_port'] is not None: + idrac.config_mgr.configure_smtp_server_settings( + smtp_port=module.params['smtp_port']) + if module.params['username'] is not None: + idrac.config_mgr.configure_smtp_server_settings( + username=module.params['username']) + if module.params['password'] is not None: + idrac.config_mgr.configure_smtp_server_settings( + password=module.params['password']) + + if module.check_mode: + status = idrac.config_mgr.is_change_applicable() + if status.get("changes_applicable"): + module.exit_json(msg="Changes found to commit!", changed=True) + else: + module.exit_json(msg="No changes found to commit!") + else: + status = idrac.config_mgr.apply_changes(reboot=False) + + return status + + +def main(): + specs = dict( + share_name=dict(required=False, type='str'), + share_password=dict(required=False, type='str', aliases=['share_pwd'], no_log=True), + share_user=dict(required=False, type='str'), + share_mnt=dict(required=False, type='str'), + # setup SNMP Trap Destination + destination_number=dict(required=False, type="int"), + destination=dict(required=False, type="str"), + snmp_v3_username=dict(required=False, type="str"), + snmp_trap_state=dict(required=False, choices=["Enabled", "Disabled"], default=None), + # setup Email Alerts + alert_number=dict(required=False, type="int"), + address=dict(required=False, default=None, type="str"), + custom_message=dict(required=False, default=None, type="str"), + email_alert_state=dict(required=False, choices=["Enabled", "Disabled"], default=None), + # setup iDRAC Alerts + enable_alerts=dict(required=False, choices=["Enabled", "Disabled"], default=None), + # setup SMTP + authentication=dict(required=False, choices=['Enabled', 'Disabled'], default=None), + smtp_ip_address=dict(required=False, default=None, type='str'), + smtp_port=dict(required=False, type='str'), + username=dict(required=False, type="str"), + password=dict(required=False, type="str", no_log=True), + ) + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + status = run_idrac_eventing_config(idrac, module) + msg, changed = "Successfully configured the iDRAC eventing settings.", True + if status.get('Status') == "Success": + if (status.get('Message') == "No changes found to commit!") or \ + ("No changes were applied" in status.get('Message')): + msg = status.get('Message') + changed = False + elif status.get('Status') == "Failed": + module.fail_json(msg="Failed to configure the iDRAC eventing settings") + module.exit_json(msg=msg, eventing_status=status, changed=changed) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py new file mode 100644 index 00000000..5a0eacf1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py @@ -0,0 +1,394 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: dellemc_configure_idrac_services +short_description: Configures the iDRAC services related attributes +version_added: "1.0.0" +deprecated: + removed_at_date: "2024-07-31" + why: Replaced with M(dellemc.openmanage.idrac_attributes). + alternative: Use M(dellemc.openmanage.idrac_attributes) instead. + removed_from_collection: dellemc.openmanage +description: + - This module allows to configure the iDRAC services related attributes. +options: + idrac_ip: + required: True + type: str + description: iDRAC IP Address. + idrac_user: + required: True + type: str + description: iDRAC username. + idrac_password: + required: True + type: str + description: iDRAC user password. + aliases: ['idrac_pwd'] + idrac_port: + type: int + description: iDRAC port. + default: 443 + validate_certs: + description: + - If C(False), the SSL certificates will not be validated. + - Configure C(False) only on personally controlled sites where self-signed certificates are used. + - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default. + type: bool + default: True + version_added: 5.0.0 + ca_path: + description: + - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation. + type: path + version_added: 5.0.0 + share_name: + type: str + description: + - (deprecated)Network share or a local path. + - This option is deprecated and will be removed in the later version. + share_user: + type: str + description: + - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + share_password: + type: str + description: + - (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + aliases: ['share_pwd'] + share_mnt: + type: str + description: + - (deprecated)Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for Network Share. + - This option is deprecated and will be removed in the later version. + enable_web_server: + type: str + description: Whether to Enable or Disable webserver configuration for iDRAC. + choices: [Enabled, Disabled] + ssl_encryption: + type: str + description: Secure Socket Layer encryption for webserver. + choices: [Auto_Negotiate, T_128_Bit_or_higher, T_168_Bit_or_higher, T_256_Bit_or_higher] + tls_protocol: + type: str + description: Transport Layer Security for webserver. + choices: [TLS_1_0_and_Higher, TLS_1_1_and_Higher, TLS_1_2_Only] + https_port: + type: int + description: HTTPS access port. + http_port: + type: int + description: HTTP access port. + timeout: + type: str + description: Timeout value. + snmp_enable: + type: str + description: Whether to Enable or Disable SNMP protocol for iDRAC. + choices: [Enabled, Disabled] + snmp_protocol: + type: str + description: Type of the SNMP protocol. + choices: [All, SNMPv3] + community_name: + type: str + description: SNMP community name for iDRAC. It is used by iDRAC to validate SNMP queries + received from remote systems requesting SNMP data access. + alert_port: + type: int + description: The iDRAC port number that must be used for SNMP traps. + The default value is 162, and the acceptable range is between 1 to 65535. + default: 162 + discovery_port: + type: int + description: The SNMP agent port on the iDRAC. The default value is 161, + and the acceptable range is between 1 to 65535. + default: 161 + trap_format: + type: str + description: SNMP trap format for iDRAC. + choices: [SNMPv1, SNMPv2, SNMPv3] + ipmi_lan: + type: dict + description: Community name set on iDRAC for SNMP settings. + suboptions: + community_name: + type: str + description: This option is used by iDRAC when it sends out SNMP and IPMI traps. + The community name is checked by the remote system to which the traps are sent. +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Configure the iDRAC services attributes + dellemc.openmanage.dellemc_configure_idrac_services: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + enable_web_server: "Enabled" + http_port: 80 + https_port: 443 + ssl_encryption: "Auto_Negotiate" + tls_protocol: "TLS_1_2_Only" + timeout: "1800" + snmp_enable: "Enabled" + snmp_protocol: "SNMPv3" + community_name: "public" + alert_port: 162 + discovery_port: 161 + trap_format: "SNMPv3" + ipmi_lan: + community_name: "public" +""" + +RETURN = r''' +--- +msg: + description: Overall status of iDRAC service attributes configuration. + returned: always + type: str + sample: Successfully configured the iDRAC services settings. +service_status: + description: Details of iDRAC services attributes configuration. + returned: success + type: dict + sample: { + "CompletionTime": "2020-04-02T02:43:28", + "Description": "Job Instance", + "EndTime": null, + "Id": "JID_12345123456", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageId": "SYS053", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true +} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import os +import tempfile +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +try: + from omdrivers.enums.iDRAC.iDRAC import (Enable_WebServerTypes, + SSLEncryptionBitLength_WebServerTypes, + TLSProtocol_WebServerTypes, + AgentEnable_SNMPTypes, + SNMPProtocol_SNMPTypes) + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +def run_idrac_services_config(idrac, module): + """ + Get Lifecycle Controller status + + Keyword arguments: + idrac -- iDRAC handle + module -- Ansible module + """ + idrac.use_redfish = True + share_path = tempfile.gettempdir() + os.sep + upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True) + if not upd_share.IsValid: + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + set_liason = idrac.config_mgr.set_liason_share(upd_share) + if set_liason['Status'] == "Failed": + try: + message = set_liason['Data']['Message'] + except (IndexError, KeyError): + message = set_liason['Message'] + module.fail_json(msg=message) + + if module.params['enable_web_server'] is not None: + idrac.config_mgr.configure_web_server( + enable_web_server=Enable_WebServerTypes[module.params['enable_web_server']] + ) + if module.params['http_port'] is not None: + idrac.config_mgr.configure_web_server( + http_port=module.params['http_port'] + ) + if module.params['https_port'] is not None: + idrac.config_mgr.configure_web_server( + https_port=module.params['https_port'] + ) + if module.params['timeout'] is not None: + idrac.config_mgr.configure_web_server( + timeout=module.params['timeout'] + ) + if module.params['ssl_encryption'] is not None: + idrac.config_mgr.configure_web_server( + ssl_encryption=SSLEncryptionBitLength_WebServerTypes[module.params['ssl_encryption']] + ) + if module.params['tls_protocol'] is not None: + idrac.config_mgr.configure_web_server( + tls_protocol=TLSProtocol_WebServerTypes[module.params['tls_protocol']] + ) + + if module.params['snmp_enable'] is not None: + idrac.config_mgr.configure_snmp( + snmp_enable=AgentEnable_SNMPTypes[module.params['snmp_enable']] + ) + if module.params['community_name'] is not None: + idrac.config_mgr.configure_snmp( + community_name=module.params['community_name'] + ) + if module.params['snmp_protocol'] is not None: + idrac.config_mgr.configure_snmp( + snmp_protocol=SNMPProtocol_SNMPTypes[module.params['snmp_protocol']] + ) + if module.params['alert_port'] is not None: + idrac.config_mgr.configure_snmp( + alert_port=module.params['alert_port'] + ) + if module.params['discovery_port'] is not None: + idrac.config_mgr.configure_snmp( + discovery_port=module.params['discovery_port'] + ) + if module.params['trap_format'] is not None: + idrac.config_mgr.configure_snmp( + trap_format=module.params['trap_format'] + ) + if module.params['ipmi_lan'] is not None: + ipmi_option = module.params.get('ipmi_lan') + community_name = ipmi_option.get('community_name') + if community_name is not None: + idrac.config_mgr.configure_snmp(ipmi_community=community_name) + + if module.check_mode: + status = idrac.config_mgr.is_change_applicable() + if status.get('changes_applicable'): + module.exit_json(msg="Changes found to commit!", changed=True) + else: + module.exit_json(msg="No changes found to commit!") + else: + return idrac.config_mgr.apply_changes(reboot=False) + + +# Main +def main(): + module = AnsibleModule( + argument_spec=dict( + + # iDRAC credentials + idrac_ip=dict(required=True, type='str'), + idrac_user=dict(required=True, type='str'), + idrac_password=dict(required=True, type='str', aliases=['idrac_pwd'], no_log=True), + idrac_port=dict(required=False, default=443, type='int'), + validate_certs=dict(type='bool', default=True), + ca_path=dict(type='path'), + # Export Destination + share_name=dict(required=False, type='str'), + share_password=dict(required=False, type='str', aliases=['share_pwd'], no_log=True), + share_user=dict(required=False, type='str'), + share_mnt=dict(required=False, type='str'), + + # setup Webserver + enable_web_server=dict(required=False, choices=['Enabled', 'Disabled'], default=None), + http_port=dict(required=False, default=None, type='int'), + https_port=dict(required=False, default=None, type='int'), + ssl_encryption=dict(required=False, choices=['Auto_Negotiate', 'T_128_Bit_or_higher', + 'T_168_Bit_or_higher', 'T_256_Bit_or_higher'], + default=None), + tls_protocol=dict(required=False, choices=['TLS_1_0_and_Higher', + 'TLS_1_1_and_Higher', 'TLS_1_2_Only'], default=None), + timeout=dict(required=False, default=None, type="str"), + + # set up SNMP settings + snmp_enable=dict(required=False, choices=['Enabled', 'Disabled'], default=None), + community_name=dict(required=False, type='str'), + snmp_protocol=dict(required=False, choices=['All', 'SNMPv3'], default=None), + discovery_port=dict(required=False, type="int", default=161), + + # set up SNMP settings + ipmi_lan=dict(required=False, type='dict', options=dict(community_name=dict(required=False, type='str'))), + alert_port=dict(required=False, type='int', default=162), + trap_format=dict(required=False, choices=['SNMPv1', 'SNMPv2', 'SNMPv3'], default=None), + + ), + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + status = run_idrac_services_config(idrac, module) + if status.get('Status') == "Success": + changed = True + msg = "Successfully configured the iDRAC services settings." + if status.get('Message') and (status.get('Message') == "No changes found to commit!" or + "No changes were applied" in status.get('Message')): + msg = status.get('Message') + changed = False + module.exit_json(msg=msg, service_status=status, changed=changed) + else: + module.fail_json(msg="Failed to configure the iDRAC services.") + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py new file mode 100644 index 00000000..d667c916 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: dellemc_get_firmware_inventory +short_description: Get Firmware Inventory +version_added: "1.0.0" +deprecated: + removed_at_date: "2023-01-15" + why: Replaced with M(dellemc.openmanage.idrac_firmware_info). + alternative: Use M(dellemc.openmanage.idrac_firmware_info) instead. + removed_from_collection: dellemc.openmanage +description: Get Firmware Inventory. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Rajeev Arakkal (@rajeevarakkal)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Get Installed Firmware Inventory + dellemc.openmanage.dellemc_get_firmware_inventory: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" +""" + +RETURNS = """ +ansible_facts: + description: Displays components and their firmware versions. Also, list of the firmware + dictionaries (one dictionary per firmware). + returned: success + type: complex + sample: { + [ + { + "BuildNumber": "0", + "Classifications": "10", + "ComponentID": "101100", + "ComponentType": "FRMW", + "DeviceID": null, + "ElementName": "Power Supply.Slot.1", + "FQDD": "PSU.Slot.1", + "IdentityInfoType": "OrgID:ComponentType:ComponentID", + "IdentityInfoValue": "DCIM:firmware:101100", + "InstallationDate": "2018-01-18T07:25:08Z", + "InstanceID": "DCIM:INSTALLED#0x15__PSU.Slot.1", + "IsEntity": "true", + "Key": "DCIM:INSTALLED#0x15__PSU.Slot.1", + "MajorVersion": "0", + "MinorVersion": "1", + "RevisionNumber": "7", + "RevisionString": null, + "Status": "Installed", + "SubDeviceID": null, + "SubVendorID": null, + "Updateable": "true", + "VendorID": null, + "VersionString": "00.1D.7D", + "impactsTPMmeasurements": "false" + } + ] + } +""" + + +import traceback +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +try: + from omsdk.sdkfile import LocalFile + from omsdk.catalog.sdkupdatemgr import UpdateManager + from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper + HAS_OMSDK = True +except ImportError: + HAS_OMSDK = False + + +def run_get_firmware_inventory(idrac, module): + """ + Get Firmware Inventory + Keyword arguments: + idrac -- iDRAC handle + module -- Ansible module + """ + + msg = {} + # msg['changed'] = False + msg['failed'] = False + msg['msg'] = {} + error = False + + try: + # idrac.use_redfish = True + msg['msg'] = idrac.update_mgr.InstalledFirmware + if "Status" in msg['msg']: + if msg['msg']['Status'] != "Success": + msg['failed'] = True + + except Exception as err: + error = True + msg['msg'] = "Error: %s" % str(err) + msg['exception'] = traceback.format_exc() + msg['failed'] = True + + return msg, error + + +# Main +def main(): + module = AnsibleModule( + argument_spec=idrac_auth_params, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + msg, err = run_get_firmware_inventory(idrac, module) + except (ImportError, ValueError, RuntimeError) as e: + module.fail_json(msg=str(e)) + + if err: + module.fail_json(**msg) + module.exit_json(ansible_facts={idrac.ipaddr: {'Firmware Inventory': msg['msg']}}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py new file mode 100644 index 00000000..e6a2d9ea --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: dellemc_get_system_inventory +short_description: Get the PowerEdge Server System Inventory +version_added: "1.0.0" +deprecated: + removed_at_date: "2023-01-15" + why: Replaced with M(dellemc.openmanage.idrac_system_info). + alternative: Use M(dellemc.openmanage.idrac_system_info) instead. + removed_from_collection: dellemc.openmanage +description: + - Get the PowerEdge Server System Inventory. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Rajeev Arakkal (@rajeevarakkal)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Get System Inventory + dellemc.openmanage.dellemc_get_system_inventory: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" +""" + +RETURNS = """ +ansible_facts: + description: Displays the Dell EMC PowerEdge Server System Inventory. + returned: success + type: complex + sample: { + "SystemInventory": { + "BIOS": [ + { + "BIOSReleaseDate": "10/19/2017", + "FQDD": "BIOS.Setup.1-1", + "InstanceID": "DCIM:INSTALLED#741__BIOS.Setup.00", + "Key": "DCIM:INSTALLED#741__BIOS.Setup.00", + "SMBIOSPresent": "True", + "VersionString": "1.2.11" + } + ], + "CPU": [ + { + "CPUFamily": "Intel(R) Xeon(TM)", + "Characteristics": "64-bit capable", + "CurrentClockSpeed": "2.3 GHz", + "DeviceDescription": "CPU 1", + "ExecuteDisabledCapable": "Yes", + } + ] + } +} +msg: + description: Details of the Error occurred. + returned: on error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule + + +# Get System Inventory +def run_get_system_inventory(idrac, module): + msg = {} + msg['changed'] = False + msg['failed'] = False + err = False + + try: + # idrac.use_redfish = True + idrac.get_entityjson() + msg['msg'] = idrac.get_json_device() + except Exception as e: + err = True + msg['msg'] = "Error: %s" % str(e) + msg['failed'] = True + return msg, err + + +# Main +def main(): + module = AnsibleModule( + argument_spec=idrac_auth_params, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + msg, err = run_get_system_inventory(idrac, module) + except (ImportError, ValueError, RuntimeError) as e: + module.fail_json(msg=str(e)) + + if err: + module.fail_json(**msg) + module.exit_json(ansible_facts={idrac.ipaddr: {'SystemInventory': msg['msg']}}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py new file mode 100644 index 00000000..eec09c1c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: dellemc_idrac_lc_attributes +short_description: Enable or disable Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs +version_added: "1.0.0" +deprecated: + removed_at_date: "2024-07-31" + why: Replaced with M(dellemc.openmanage.idrac_attributes). + alternative: Use M(dellemc.openmanage.idrac_attributes) instead. + removed_from_collection: dellemc.openmanage +description: + - This module is responsible for enabling or disabling of Collect System Inventory on Restart (CSIOR) + property for all iDRAC/LC jobs. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + type: str + description: + - (deprecated)Network share or a local path. + - This option is deprecated and will be removed in the later version. + share_user: + type: str + description: + - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + share_password: + type: str + description: + - (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + aliases: ['share_pwd'] + share_mnt: + type: str + description: + - (deprecated)Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for Network Share. + - This option is deprecated and will be removed in the later version. + csior: + type: str + description: Whether to Enable or Disable Collect System Inventory on Restart (CSIOR) + property for all iDRAC/LC jobs. + choices: [Enabled, Disabled] + default: Enabled +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Set up iDRAC LC Attributes + dellemc.openmanage.dellemc_idrac_lc_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + csior: "Enabled" +""" + +RETURN = r''' +--- +msg: + description: Overall status of iDRAC LC attributes configuration. + returned: always + type: str + sample: Successfully configured the iDRAC LC attributes. +lc_attribute_status: + description: Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs is configured. + returned: success + type: dict + sample: { + "CompletionTime": "2020-03-30T00:06:53", + "Description": "Job Instance", + "EndTime": null, + "Id": "JID_1234512345", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "SYS053", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import os +import tempfile +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +try: + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +# Get Lifecycle Controller status +def run_setup_idrac_csior(idrac, module): + """ + Get Lifecycle Controller status + + Keyword arguments: + idrac -- iDRAC handle + module -- Ansible module + """ + idrac.use_redfish = True + share_path = tempfile.gettempdir() + os.sep + upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True) + if not upd_share.IsValid: + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + set_liason = idrac.config_mgr.set_liason_share(upd_share) + if set_liason['Status'] == "Failed": + try: + message = set_liason['Data']['Message'] + except (IndexError, KeyError): + message = set_liason['Message'] + module.fail_json(msg=message) + if module.params['csior'] == 'Enabled': + # Enable csior + idrac.config_mgr.enable_csior() + elif module.params['csior'] == 'Disabled': + # Disable csior + idrac.config_mgr.disable_csior() + + if module.check_mode: + status = idrac.config_mgr.is_change_applicable() + if status.get("changes_applicable"): + module.exit_json(msg="Changes found to commit!", changed=True) + else: + module.exit_json(msg="No changes found to commit!") + else: + return idrac.config_mgr.apply_changes(reboot=False) + + +# Main +def main(): + specs = dict( + share_name=dict(required=False, type='str'), + share_password=dict(required=False, type='str', aliases=['share_pwd'], no_log=True), + share_user=dict(required=False, type='str'), + share_mnt=dict(required=False, type='str'), + csior=dict(required=False, choices=['Enabled', 'Disabled'], default='Enabled') + ) + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + status = run_setup_idrac_csior(idrac, module) + if status.get('Status') == "Success": + changed = True + msg = "Successfully configured the iDRAC LC attributes." + if status.get('Message') and (status.get('Message') == "No changes found to commit!" or + "No changes were applied" in status.get('Message')): + msg = status.get('Message') + changed = False + module.exit_json(msg=msg, lc_attribute_status=status, changed=changed) + else: + module.fail_json(msg="Failed to configure the iDRAC LC attributes.") + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py new file mode 100644 index 00000000..01c915ea --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py @@ -0,0 +1,505 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: dellemc_idrac_storage_volume +short_description: Configures the RAID configuration attributes +version_added: "2.0.0" +description: + - This module is responsible for configuring the RAID attributes. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + state: + type: str + description: + - C(create), performs create volume operation. + - C(delete), performs remove volume operation. + - C(view), returns storage view. + choices: ['create', 'delete', 'view'] + default: 'view' + span_depth: + type: int + description: + - Number of spans in the RAID configuration. + - I(span_depth) is required for C(create) and its value depends on I(volume_type). + default: 1 + span_length: + type: int + description: + - Number of disks in a span. + - I(span_length) is required for C(create) and its value depends on I(volume_type). + default: 1 + number_dedicated_hot_spare: + type: int + description: Number of Dedicated Hot Spare. + default: 0 + volume_type: + type: str + description: Provide the the required RAID level. + choices: ['RAID 0', 'RAID 1', 'RAID 5', 'RAID 6', 'RAID 10', 'RAID 50', 'RAID 60'] + default: 'RAID 0' + disk_cache_policy: + type: str + description: Disk Cache Policy. + choices: ["Default", "Enabled", "Disabled"] + default: "Default" + write_cache_policy: + type: str + description: Write cache policy. + choices: ["WriteThrough", "WriteBack", "WriteBackForce"] + default: "WriteThrough" + read_cache_policy: + type: str + description: Read cache policy. + choices: ["NoReadAhead", "ReadAhead", "AdaptiveReadAhead"] + default: "NoReadAhead" + stripe_size: + type: int + description: Stripe size value to be provided in multiples of 64 * 1024. + default: 65536 + controller_id: + type: str + description: + - >- + Fully Qualified Device Descriptor (FQDD) of the storage controller, for example 'RAID.Integrated.1-1'. + Controller FQDD is required for C(create) RAID configuration. + media_type: + type: str + description: Media type. + choices: ['HDD', 'SSD'] + protocol: + type: str + description: Bus protocol. + choices: ['SAS', 'SATA'] + volume_id: + type: str + description: + - >- + Fully Qualified Device Descriptor (FQDD) of the virtual disk, for example 'Disk.virtual.0:RAID.Slot.1-1'. + This option is used to get the virtual disk information. + volumes: + type: list + elements: dict + description: + - >- + A list of virtual disk specific iDRAC attributes. This is applicable for C(create) and C(delete) operations. + - >- + For C(create) operation, name and drives are applicable options, other volume options can also be specified. + - >- + The drives is a required option for C(create) operation and accepts either location (list of drive slot) + or id (list of drive fqdd). + - >- + For C(delete) operation, only name option is applicable. + - See the examples for more details. + capacity: + type: float + description: Virtual disk size in GB. + raid_reset_config: + type: str + description: + - >- + This option represents whether a reset config operation needs to be performed on the RAID controller. + Reset Config operation deletes all the virtual disks present on the RAID controller. + choices: ['True', 'False'] + default: 'False' + raid_init_operation: + type: str + description: This option represents initialization configuration operation to be performed on the virtual disk. + choices: [None, Fast] + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create single volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "create" + controller_id: "RAID.Slot.1-1" + volumes: + - drives: + location: [5] + +- name: Create multiple volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + raid_reset_config: "True" + state: "create" + controller_id: "RAID.Slot.1-1" + volume_type: "RAID 1" + span_depth: 1 + span_length: 2 + number_dedicated_hot_spare: 1 + disk_cache_policy: "Enabled" + write_cache_policy: "WriteBackForce" + read_cache_policy: "ReadAhead" + stripe_size: 65536 + capacity: 100 + raid_init_operation: "Fast" + volumes: + - name: "volume_1" + drives: + id: ["Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1", "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1"] + - name: "volume_2" + volume_type: "RAID 5" + span_length: 3 + span_depth: 1 + drives: + location: [7,3,5] + disk_cache_policy: "Disabled" + write_cache_policy: "WriteBack" + read_cache_policy: "NoReadAhead" + stripe_size: 131072 + capacity: "200" + raid_init_operation: "None" + +- name: View all volume details + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "view" + +- name: View specific volume details + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "view" + controller_id: "RAID.Slot.1-1" + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + +- name: Delete single volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "delete" + volumes: + - name: "volume_1" + +- name: Delete multiple volume + dellemc.openmanage.dellemc_idrac_storage_volume: + idrac_ip: "192.168.0.1" + idrac_user: "username" + idrac_password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "delete" + volumes: + - name: "volume_1" + - name: "volume_2" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the storage configuration operation. + returned: always + sample: "Successfully completed the view storage volume operation" +storage_status: + type: dict + description: Storage configuration job and progress details from the iDRAC. + returned: success + sample: + { + "Id": "JID_XXXXXXXXX", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageId": "XXX123", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true + } +''' + + +import os +import tempfile +import copy +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +try: + from omdrivers.types.iDRAC.RAID import RAIDactionTypes, RAIDdefaultReadPolicyTypes, RAIDinitOperationTypes, \ + DiskCachePolicyTypes, RAIDresetConfigTypes + from omsdk.sdkfile import file_share_manager +except ImportError: + pass + + +def error_handling_for_negative_num(option, val): + return "{0} cannot be a negative number or zero,got {1}".format(option, val) + + +def set_liason_share(idrac, module): + idrac.use_redfish = True + share_name = tempfile.gettempdir() + os.sep + storage_share = file_share_manager.create_share_obj(share_path=share_name, + isFolder=True) + set_liason = idrac.config_mgr.set_liason_share(storage_share) + if set_liason['Status'] == "Failed": + liason_data = set_liason.get('Data', set_liason) + module.fail_json(msg=liason_data.get('Message', "Failed to set Liason share")) + + +def view_storage(idrac, module): + idrac.get_entityjson() + storage_status = idrac.config_mgr.RaidHelper.view_storage(controller=module.params["controller_id"], + virtual_disk=module.params['volume_id']) + if storage_status['Status'] == 'Failed': + module.fail_json(msg="Failed to fetch storage details", storage_status=storage_status) + return storage_status + + +def create_storage(idrac, module): + pd_filter = '((disk.parent.parent is Controller and ' \ + 'disk.parent.parent.FQDD._value == "{0}")' \ + .format(module.params["controller_id"]) + pd_filter += ' or (disk.parent is Controller and ' \ + 'disk.parent.FQDD._value == "{0}"))' \ + .format(module.params["controller_id"]) + + vd_values = [] + if module.params['volumes'] is not None: + for each in module.params['volumes']: + mod_args = copy.deepcopy(module.params) + each_vd = multiple_vd_config(mod_args=mod_args, + each_vd=each, pd_filter=pd_filter) + vd_values.append(each_vd) + else: + each_vd = multiple_vd_config(mod_args=module.params, + pd_filter=pd_filter) + vd_values.append(each_vd) + storage_status = idrac.config_mgr.RaidHelper.new_virtual_disk(multiple_vd=vd_values, + apply_changes=not module.check_mode) + return storage_status + + +def delete_storage(idrac, module): + names = [key.get("name") for key in module.params['volumes']] + storage_status = idrac.config_mgr.RaidHelper.delete_virtual_disk(vd_names=names, + apply_changes=not module.check_mode) + return storage_status + + +def _validate_options(options): + if options['state'] == "create": + if options["controller_id"] is None or options["controller_id"] == "": + raise ValueError('Controller ID is required.') + capacity = options.get("capacity") + if capacity is not None: + size_check = float(capacity) + if size_check <= 0: + raise ValueError(error_handling_for_negative_num("capacity", capacity)) + stripe_size = options.get('stripe_size') + if stripe_size is not None: + stripe_size_check = int(stripe_size) + if stripe_size_check <= 0: + raise ValueError(error_handling_for_negative_num("stripe_size", stripe_size)) + # validating for each vd options + if options['volumes'] is not None: + for each in options['volumes']: + drives = each.get("drives") + if drives: + if "id" in drives and "location" in drives: + raise ValueError("Either {0} or {1} is allowed".format("id", "location")) + elif "id" not in drives and "location" not in drives: + raise ValueError("Either {0} or {1} should be specified".format("id", "location")) + else: + raise ValueError("Drives must be defined for volume creation.") + capacity = each.get("capacity") + if capacity is not None: + size_check = float(capacity) + if size_check <= 0: + raise ValueError(error_handling_for_negative_num("capacity", capacity)) + stripe_size = each.get('stripe_size') + if stripe_size is not None: + stripe_size_check = int(stripe_size) + if stripe_size_check <= 0: + raise ValueError(error_handling_for_negative_num("stripe_size", stripe_size)) + elif options['state'] == "delete": + message = "Virtual disk name is a required parameter for remove virtual disk operations." + if options['volumes'] is None or None in options['volumes']: + raise ValueError(message) + elif options['volumes']: + if not all("name" in each for each in options['volumes']): + raise ValueError(message) + + +def multiple_vd_config(mod_args=None, pd_filter="", each_vd=None): + if mod_args is None: + mod_args = {} + if each_vd is None: + each_vd = {} + if each_vd: + mod_args.update(each_vd) + disk_size = None + location_list = [] + id_list = [] + size = mod_args.get("capacity") + drives = mod_args.get("drives") + if drives: + if "location" in drives: + location_list = drives.get("location") + elif "id" in drives: + id_list = drives.get("id") + if size is not None: + size_check = float(size) + disk_size = "{0}".format(int(size_check * 1073741824)) + + if mod_args['media_type'] is not None: + pd_filter += ' and disk.MediaType == "{0}"'.format(mod_args['media_type']) + if mod_args["protocol"] is not None: + pd_filter += ' and disk.BusProtocol == "{0}"'.format(mod_args["protocol"]) + pd_selection = pd_filter + + if location_list: + slots = "" + for i in location_list: + slots += "\"" + str(i) + "\"," + slots_list = "[" + slots[0:-1] + "]" + pd_selection += " and disk.Slot._value in " + slots_list + elif id_list: + pd_selection += " and disk.FQDD._value in " + str(id_list) + + raid_init_operation, raid_reset_config = "None", "False" + if mod_args['raid_init_operation'] == "None": + raid_init_operation = RAIDinitOperationTypes.T_None + if mod_args['raid_init_operation'] == "Fast": + raid_init_operation = RAIDinitOperationTypes.Fast + + if mod_args['raid_reset_config'] == "False": + raid_reset_config = RAIDresetConfigTypes.T_False + if mod_args['raid_reset_config'] == "True": + raid_reset_config = RAIDresetConfigTypes.T_True + + vd_value = dict( + Name=mod_args.get("name"), + SpanDepth=int(mod_args['span_depth']), + SpanLength=int(mod_args['span_length']), + NumberDedicatedHotSpare=int(mod_args['number_dedicated_hot_spare']), + RAIDTypes=mod_args["volume_type"], + DiskCachePolicy=DiskCachePolicyTypes[mod_args['disk_cache_policy']], + RAIDdefaultWritePolicy=mod_args['write_cache_policy'], + RAIDdefaultReadPolicy=RAIDdefaultReadPolicyTypes[mod_args['read_cache_policy']], + StripeSize=int(mod_args['stripe_size']), + RAIDforeignConfig="Clear", + RAIDaction=RAIDactionTypes.Create, + PhysicalDiskFilter=pd_selection, + Size=disk_size, + RAIDresetConfig=raid_reset_config, + RAIDinitOperation=raid_init_operation, + PDSlots=location_list, + ControllerFQDD=mod_args.get("controller_id"), + mediatype=mod_args['media_type'], + busprotocol=mod_args["protocol"], + FQDD=id_list + ) + return vd_value + + +def run_server_raid_config(idrac, module): + if module.params['state'] == "view": + storage_status = view_storage(idrac, module) + if module.params['state'] == "create": + set_liason_share(idrac, module) + storage_status = create_storage(idrac, module) + if module.params['state'] == "delete": + set_liason_share(idrac, module) + storage_status = delete_storage(idrac, module) + return storage_status + + +def main(): + specs = { + "state": {"required": False, "choices": ['create', 'delete', 'view'], "default": 'view'}, + "volume_id": {"required": False, "type": 'str'}, + "volumes": {"required": False, "type": 'list', "elements": 'dict'}, + "span_depth": {"required": False, "type": 'int', "default": 1}, + "span_length": {"required": False, "type": 'int', "default": 1}, + "number_dedicated_hot_spare": {"required": False, "type": 'int', "default": 0}, + "volume_type": {"required": False, + "choices": ['RAID 0', 'RAID 1', 'RAID 5', 'RAID 6', 'RAID 10', 'RAID 50', 'RAID 60'], + "default": 'RAID 0'}, + "disk_cache_policy": {"required": False, "choices": ["Default", "Enabled", "Disabled"], + "default": "Default"}, + "write_cache_policy": {"required": False, "choices": ["WriteThrough", "WriteBack", "WriteBackForce"], + "default": "WriteThrough"}, + "read_cache_policy": {"required": False, "choices": ["NoReadAhead", "ReadAhead", "AdaptiveReadAhead"], + "default": "NoReadAhead"}, + "stripe_size": {"required": False, "type": 'int', "default": 64 * 1024}, + "capacity": {"required": False, "type": 'float'}, + "controller_id": {"required": False, "type": 'str'}, + "media_type": {"required": False, "choices": ['HDD', 'SSD']}, + "protocol": {"required": False, "choices": ['SAS', 'SATA']}, + "raid_reset_config": {"required": False, "choices": ['True', 'False'], "default": 'False'}, + "raid_init_operation": {"required": False, "choices": ['None', 'Fast']} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + _validate_options(module.params) + with iDRACConnection(module.params) as idrac: + storage_status = run_server_raid_config(idrac, module) + changed = False + if 'changes_applicable' in storage_status: + changed = storage_status['changes_applicable'] + elif module.params['state'] != 'view': + if storage_status.get("Status", "") == "Success": + changed = True + if storage_status.get("Message", "") == "No changes found to commit!" \ + or storage_status.get("Message", "") == "Unable to find the virtual disk": + changed = False + module.exit_json(msg=storage_status.get('Message', ""), + changed=changed, storage_status=storage_status) + elif storage_status.get("Status") == "Failed": + module.fail_json(msg=storage_status.get("Message")) + else: + module.fail_json(msg="Failed to perform storage operation") + except (ImportError, ValueError, RuntimeError, TypeError) as e: + module.fail_json(msg=str(e)) + msg = "Successfully completed the {0} storage volume operation".format(module.params['state']) + if module.check_mode and module.params['state'] != 'view': + msg = storage_status.get("Message", "") + module.exit_json(msg=msg, changed=changed, storage_status=storage_status) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py new file mode 100644 index 00000000..3be038e4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: dellemc_system_lockdown_mode +short_description: Configures system lockdown mode for iDRAC +version_added: "1.0.0" +deprecated: + removed_at_date: "2024-07-31" + why: Replaced with M(dellemc.openmanage.idrac_attributes). + alternative: Use M(dellemc.openmanage.idrac_attributes) instead. + removed_from_collection: dellemc.openmanage +description: + - This module is allows to Enable or Disable System lockdown Mode. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + type: str + description: + - (deprecated)Network share or a local path. + - This option is deprecated and will be removed in the later version. + share_user: + type: str + description: + - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + share_password: + type: str + description: + - (deprecated)Network share user password. This option is mandatory for CIFS Network Share. + - This option is deprecated and will be removed in the later version. + aliases: ['share_pwd'] + share_mnt: + type: str + description: + - (deprecated)Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for Network Share. + - This option is deprecated and will be removed in the later version. + lockdown_mode: + required: True + type: str + description: Whether to Enable or Disable system lockdown mode. + choices: [Enabled, Disabled] +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module does not support C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Check System Lockdown Mode + dellemc.openmanage.dellemc_system_lockdown_mode: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + lockdown_mode: "Disabled" +""" + +RETURN = r''' +--- +msg: + description: "Lockdown mode of the system is configured." + returned: always + type: str + sample: "Successfully completed the lockdown mode operations." +system_lockdown_status: + type: dict + description: Storage configuration job and progress details from the iDRAC. + returned: success + sample: + { + "Data": { + "StatusCode": 200, + "body": { + "@Message.ExtendedInfo": [ + { + "Message": "Successfully Completed Request", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.0.Success", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + } + ] + } + }, + "Message": "none", + "Status": "Success", + "StatusCode": 200, + "retval": true + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import os +import tempfile +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +try: + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +# Get Lifecycle Controller status +def run_system_lockdown_mode(idrac, module): + """ + Get Lifecycle Controller status + + Keyword arguments: + idrac -- iDRAC handle + module -- Ansible module + """ + msg = {'changed': False, 'failed': False, 'msg': "Successfully completed the lockdown mode operations."} + idrac.use_redfish = True + share_path = tempfile.gettempdir() + os.sep + upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True) + if not upd_share.IsValid: + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + set_liason = idrac.config_mgr.set_liason_share(upd_share) + if set_liason['Status'] == "Failed": + try: + message = set_liason['Data']['Message'] + except (IndexError, KeyError): + message = set_liason['Message'] + module.fail_json(msg=message) + if module.params['lockdown_mode'] == 'Enabled': + msg["system_lockdown_status"] = idrac.config_mgr.enable_system_lockdown() + elif module.params['lockdown_mode'] == 'Disabled': + msg["system_lockdown_status"] = idrac.config_mgr.disable_system_lockdown() + + if msg.get("system_lockdown_status") and "Status" in msg['system_lockdown_status']: + if msg['system_lockdown_status']['Status'] == "Success": + msg['changed'] = True + else: + module.fail_json(msg="Failed to complete the lockdown mode operations.") + return msg + + +# Main +def main(): + specs = dict( + share_name=dict(required=False, type='str'), + share_password=dict(required=False, type='str', + aliases=['share_pwd'], no_log=True), + share_user=dict(required=False, type='str'), + share_mnt=dict(required=False, type='str'), + lockdown_mode=dict(required=True, choices=['Enabled', 'Disabled']) + ) + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=False) + + try: + with iDRACConnection(module.params) as idrac: + msg = run_system_lockdown_mode(idrac, module) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg=msg["msg"], system_lockdown_status=msg["system_lockdown_status"], changed=msg["changed"]) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py new file mode 100644 index 00000000..c9c80854 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py @@ -0,0 +1,524 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: idrac_attributes +short_description: Configure the iDRAC attributes. +version_added: "6.0.0" +description: + - This module allows to configure the iDRAC attributes. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + idrac_attributes: + type: dict + description: + - "Dictionary of iDRAC attributes and value. The attributes should be + part of the Integrated Dell Remote Access Controller Attribute Registry. + To view the list of attributes in Attribute Registry for iDRAC9 and above, + see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1) + and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)." + - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. + If the manager attribute name in Server Configuration Profile is .# + (for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is + .. (for Example, 'SNMP.1.AgentCommunity')." + system_attributes: + type: dict + description: + - "Dictionary of System attributes and value. The attributes should be + part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above, + see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1) + and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)." + - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. + If the manager attribute name in Server Configuration Profile is .# + (for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is + .. (for Example, 'ThermalSettings.1.ThermalProfile')." + lifecycle_controller_attributes: + type: dict + description: + - "Dictionary of Lifecycle Controller attributes and value. The attributes should be + part of the Integrated Dell Remote Access Controller Attribute Registry.To view the list of attributes in Attribute Registry for iDRAC9 and above, + see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1) + and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)." + - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. + If the manager attribute name in Server Configuration Profile is .# + (for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is + .. (for Example, 'LCAttributes.1.AutoUpdate')." + resource_id: + type: str + description: Redfish ID of the resource. +requirements: + - "python >= 3.8.6" +author: + - Husniya Abdul Hameed (@husniya-hameed) + - Felix Stephen (@felixs88) +notes: + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports C(check_mode). + - For iDRAC7 and iDRAC8 based servers, the value provided for the attributes are not be validated. + Ensure appropriate values are passed. +''' + +EXAMPLES = """ +--- +- name: Configure iDRAC attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.AgentCommunity: public + +- name: Configure System attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + system_attributes: + ThermalSettings.1.ThermalProfile: Sound Cap + +- name: Configure Lifecycle Controller attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + lifecycle_controller_attributes: + LCAttributes.1.AutoUpdate: Enabled + +- name: Configure the iDRAC attributes for email alert settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + EmailAlert.1.CustomMsg: Display Message + EmailAlert.1.Enable: Enabled + EmailAlert.1.Address: test@test.com + +- name: Configure the iDRAC attributes for SNMP alert settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMPAlert.1.Destination: 192.168.0.2 + SNMPAlert.1.State: Enabled + SNMPAlert.1.SNMPv3Username: username + +- name: Configure the iDRAC attributes for SMTP alert settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + RemoteHosts.1.SMTPServerIPAddress: 192.168.0.3 + RemoteHosts.1.SMTPAuthentication: Enabled + RemoteHosts.1.SMTPPort: 25 + RemoteHosts.1.SMTPUserName: username + RemoteHosts.1.SMTPPassword: password + +- name: Configure the iDRAC attributes for webserver settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + WebServer.1.SSLEncryptionBitLength: 128-Bit or higher + WebServer.1.TLSProtocol: TLS 1.1 and Higher + +- name: Configure the iDRAC attributes for SNMP settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.SNMPProtocol: All + SNMP.1.AgentEnable: Enabled + SNMP.1.TrapFormat: SNMPv1 + SNMP.1.AlertPort: 162 + SNMP.1.AgentCommunity: public + +- name: Configure the iDRAC LC attributes for collecting system inventory. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + lifecycle_controller_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: Enabled + +- name: Configure the iDRAC system attributes for LCD configuration. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + system_attributes: + LCD.1.Configuration: Service Tag + LCD.1.vConsoleIndication: Enabled + LCD.1.FrontPanelLocking: Full-Access + LCD.1.UserDefinedString: custom string + +- name: Configure the iDRAC attributes for Timezone settings. + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + Time.1.TimeZone: CST6CDT + NTPConfigGroup.1.NTPEnable: Enabled + NTPConfigGroup.1.NTP1: 192.168.0.5 + NTPConfigGroup.1.NTP2: 192.168.0.6 + NTPConfigGroup.1.NTP3: 192.168.0.7 + +- name: Configure all attributes + dellemc.openmanage.idrac_attributes: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + idrac_attributes: + SNMP.1.AgentCommunity: test + SNMP.1.AgentEnable: Enabled + SNMP.1.DiscoveryPort: 161 + system_attributes: + ServerOS.1.HostName: demohostname + lifecycle_controller_attributes: + LCAttributes.1.AutoUpdate: Disabled +""" + +RETURN = r''' +--- +msg: + type: str + description: Status of the attribute update operation. + returned: always + sample: "Successfully updated the attributes." +invalid_attributes: + type: dict + description: Dict of invalid attributes provided. + returned: on invalid attributes or values. + sample: { + "LCAttributes.1.AutoUpdate": "Invalid value for Enumeration.", + "LCAttributes.1.StorageHealthRollupStatus": "Read only Attribute cannot be modified.", + "SNMP.1.AlertPort": "Not a valid integer.", + "SNMP.1.AlertPorty": "Attribute does not exist.", + "SysLog.1.PowerLogInterval": "Integer out of valid range.", + "ThermalSettings.1.AirExhaustTemp": "Invalid value for Enumeration." + } +error_info: + description: Error information of the operation. + returned: when attribute value is invalid. + type: dict + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "The value 'false' for the property LCAttributes.1.BIOSRTDRequested is of a different type than the property can accept.", + "MessageArgs": [ + "false", + "LCAttributes.1.BIOSRTDRequested" + ], + "MessageArgs@odata.count": 2, + "MessageId": "Base.1.12.PropertyValueTypeError", + "RelatedProperties": [ + "#/Attributes/LCAttributes.1.BIOSRTDRequested" + ], + "RelatedProperties@odata.count": 1, + "Resolution": "Correct the value for the property in the request body and resubmit the request if the operation failed.", + "Severity": "Warning" + } + ], + "code": "Base.1.12.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information" + } + } +''' + +import json +import re +from ssl import SSLError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_manager_res_id +from ansible.module_utils.basic import AnsibleModule + + +SUCCESS_MSG = "Successfully updated the attributes." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_MSG = "Changes found to be applied." +ATTR_FAIL_MSG = "Application of some of the attributes failed due to invalid value or enumeration." +SYSTEM_ID = "System.Embedded.1" +MANAGER_ID = "iDRAC.Embedded.1" +LC_ID = "LifecycleController.Embedded.1" +MANAGERS_URI = "/redfish/v1/Managers" +ATTR = "Attributes" +JOB_URI = "/redfish/v1/Managers/{manager_id}/Jobs/{job_id}" + + +def xml_data_conversion(attrbite, fqdd=None): + component = """{1}""" + attr = "" + json_data = {} + for k, v in attrbite.items(): + key = re.sub(r"(?<=\d)\.", "#", k) + attr += '{1}'.format(key, v) + json_data[key] = str(v) + root = component.format(fqdd, attr) + return root, json_data + + +def validate_attr_name(attribute, req_data): + invalid_attr = {} + data_dict = {attr["Name"]: attr["Value"] for attr in attribute if attr["Name"] in req_data.keys()} + if not len(data_dict) == len(req_data): + for key in req_data.keys(): + if key not in data_dict: + act_key = key.replace("#", ".") + invalid_attr[act_key] = "Attribute does not exist." + return data_dict, invalid_attr + + +def get_check_mode(module, idrac, idrac_json, sys_json, lc_json): + scp_response = idrac.export_scp(export_format="JSON", export_use="Default", + target="iDRAC,System,LifecycleController", job_wait=True) + comp = scp_response.json_data["SystemConfiguration"]["Components"] + exist_idrac, exist_sys, exist_lc, invalid = {}, {}, {}, {} + for cmp in comp: + if idrac_json and cmp.get("FQDD") == MANAGER_ID: + exist_idrac, invalid_attr = validate_attr_name(cmp["Attributes"], idrac_json) + if invalid_attr: + invalid.update(invalid_attr) + if sys_json and cmp.get("FQDD") == SYSTEM_ID: + exist_sys, invalid_attr = validate_attr_name(cmp["Attributes"], sys_json) + if invalid_attr: + invalid.update(invalid_attr) + if lc_json and cmp.get("FQDD") == LC_ID: + exist_lc, invalid_attr = validate_attr_name(cmp["Attributes"], lc_json) + if invalid_attr: + invalid.update(invalid_attr) + if invalid: + module.fail_json(msg="Attributes have invalid values.", invalid_attributes=invalid) + diff_change = [bool(set(exist_idrac.items()) ^ set(idrac_json.items())) or + bool(set(exist_sys.items()) ^ set(sys_json.items())) or + bool(set(exist_lc.items()) ^ set(lc_json.items()))] + if module.check_mode and any(diff_change) is True: + module.exit_json(msg=CHANGES_MSG, changed=True) + elif (module.check_mode and all(diff_change) is False) or \ + (not module.check_mode and all(diff_change) is False): + module.exit_json(msg=NO_CHANGES_MSG) + + +def scp_idrac_attributes(module, idrac, res_id): + job_wait = module.params.get("job_wait", True) + idrac_attr = module.params.get("idrac_attributes") + system_attr = module.params.get("system_attributes") + lc_attr = module.params.get("lifecycle_controller_attributes") + root = """{0}""" + component = "" + idrac_json_data, system_json_data, lc_json_data = {}, {}, {} + if idrac_attr is not None: + idrac_xml_payload, idrac_json_data = xml_data_conversion(idrac_attr, fqdd=MANAGER_ID) + component += idrac_xml_payload + if system_attr is not None: + system_xml_payload, system_json_data = xml_data_conversion(system_attr, fqdd=SYSTEM_ID) + component += system_xml_payload + if lc_attr is not None: + lc_xml_payload, lc_json_data = xml_data_conversion(lc_attr, fqdd=LC_ID) + component += lc_xml_payload + get_check_mode(module, idrac, idrac_json_data, system_json_data, lc_json_data,) + payload = root.format(component) + resp = idrac.import_scp(import_buffer=payload, target="ALL", job_wait=False) + job_id = resp.headers["Location"].split("/")[-1] + job_uri = JOB_URI.format(manager_id=res_id, job_id=job_id) + job_resp = idrac.wait_for_job_completion(job_uri, job_wait=job_wait) + return job_resp + + +def get_response_attr(idrac, idrac_id, attr, uri_dict): + response_attr = {} + diff = 0 + response = idrac.invoke_request(uri_dict.get(idrac_id), "GET") + for k in attr.keys(): + if response.json_data[ATTR].get(k) != attr.get(k): + # response_attr[k] = response.json_data[ATTR].get(k) + response_attr[k] = attr.get(k) + diff += 1 + return diff, response_attr + + +def get_attributes_registry(idrac): + reggy = {} + try: + resp = idrac.invoke_request("/redfish/v1/Registries/ManagerAttributeRegistry", "GET") + loc_list = resp.json_data.get("Location", []) + if loc_list: + reg_json_uri = loc_list[-1].get("Uri") + reg_resp = idrac.invoke_request(reg_json_uri, "GET") + attr_list = reg_resp.json_data.get("RegistryEntries").get("Attributes") + reggy = dict((x["AttributeName"], x) for x in attr_list) + except Exception: + reggy = {} + return reggy + + +def validate_vs_registry(registry, attr_dict): + invalid = {} + for k, v in attr_dict.items(): + if k in registry: + val_dict = registry.get(k) + if val_dict.get("Readonly"): + invalid[k] = "Read only Attribute cannot be modified." + else: + type = val_dict.get("Type") + if type == "Enumeration": + found = False + for val in val_dict.get("Value", []): + if v == val.get("ValueDisplayName"): + found = True + break + if not found: + invalid[k] = "Invalid value for Enumeration." + if type == "Integer": + try: + i = int(v) + except Exception: + invalid[k] = "Not a valid integer." + else: + if not (val_dict.get("LowerBound") <= i <= val_dict.get("UpperBound")): + invalid[k] = "Integer out of valid range." + else: + invalid[k] = "Attribute does not exist." + return invalid + + +def fetch_idrac_uri_attr(idrac, module, res_id): + diff = 0 + uri_dict = {} + idrac_response_attr = {} + system_response_attr = {} + lc_response_attr = {} + response = idrac.invoke_request("{0}/{1}".format(MANAGERS_URI, res_id), "GET") + dell_attributes = response.json_data.get('Links', {}).get('Oem', {}).get('Dell', {}).get('DellAttributes') + if dell_attributes: + for item in dell_attributes: + uri = item.get('@odata.id') + attr_id = uri.split("/")[-1] + uri_dict[attr_id] = uri + idrac_attr = module.params.get("idrac_attributes") + system_attr = module.params.get("system_attributes") + lc_attr = module.params.get("lifecycle_controller_attributes") + invalid = {} + attr_registry = get_attributes_registry(idrac) + if idrac_attr is not None: + x, idrac_response_attr = get_response_attr(idrac, MANAGER_ID, idrac_attr, uri_dict) + invalid.update(validate_vs_registry(attr_registry, idrac_response_attr)) + diff += x + if system_attr is not None: + x, system_response_attr = get_response_attr(idrac, SYSTEM_ID, system_attr, uri_dict) + invalid.update(validate_vs_registry(attr_registry, system_response_attr)) + diff += x + if lc_attr is not None: + x, lc_response_attr = get_response_attr(idrac, LC_ID, lc_attr, uri_dict) + invalid.update(validate_vs_registry(attr_registry, lc_response_attr)) + diff += x + if invalid: + module.exit_json(failed=True, msg="Attributes have invalid values.", invalid_attributes=invalid) + else: + job_resp = scp_idrac_attributes(module, idrac, res_id) + if job_resp.status_code == 200: + error_msg = ["Unable to complete application of configuration profile values.", + "Import of Server Configuration Profile operation completed with errors."] + message = job_resp.json_data["Message"] + message_id = job_resp.json_data["MessageId"] + if message_id == "SYS069": + module.exit_json(msg=NO_CHANGES_MSG) + elif message_id == "SYS053": + module.exit_json(msg=SUCCESS_MSG, changed=True) + elif message in error_msg: + module.fail_json(msg=ATTR_FAIL_MSG) + else: + module.fail_json(msg=message) + return diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr + + +def process_check_mode(module, diff): + if not diff: + module.exit_json(msg=NO_CHANGES_MSG) + elif diff and module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + + +def update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr): + resp = {} + idrac_payload = module.params.get("idrac_attributes") + system_payload = module.params.get("system_attributes") + lc_payload = module.params.get("lifecycle_controller_attributes") + if idrac_payload is not None and idrac_response_attr is not None: + idrac_response = idrac.invoke_request(uri_dict.get(MANAGER_ID), "PATCH", data={ATTR: idrac_payload}) + resp["iDRAC"] = idrac_response.json_data + if system_payload is not None and system_response_attr is not None: + system_response = idrac.invoke_request(uri_dict.get(SYSTEM_ID), "PATCH", data={ATTR: system_payload}) + resp["System"] = system_response.json_data + if lc_payload is not None and lc_response_attr is not None: + lc_response = idrac.invoke_request(uri_dict.get(LC_ID), "PATCH", data={ATTR: lc_payload}) + resp["Lifecycle Controller"] = lc_response.json_data + return resp + + +def main(): + specs = { + "idrac_attributes": {"required": False, "type": 'dict'}, + "system_attributes": {"required": False, "type": 'dict'}, + "lifecycle_controller_attributes": {"required": False, "type": 'dict'}, + "resource_id": {"required": False, "type": 'str'} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[('idrac_attributes', 'system_attributes', 'lifecycle_controller_attributes')], + supports_check_mode=True + ) + try: + with iDRACRedfishAPI(module.params, req_session=True) as idrac: + res_id = module.params.get('resource_id') + if not res_id: + res_id = get_manager_res_id(idrac) + diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = fetch_idrac_uri_attr(idrac, module, res_id) + process_check_mode(module, diff) + resp = update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr) + module.exit_json(msg=SUCCESS_MSG, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py new file mode 100644 index 00000000..8cd9c5e7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py @@ -0,0 +1,820 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.2.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_bios +short_description: Modify and clear BIOS attributes, reset BIOS settings and configure boot sources +version_added: "2.1.0" +description: + - This module allows to modify the BIOS attributes. Also clears pending BIOS attributes and resets BIOS to default settings. + - Boot sources can be enabled or disabled. Boot sequence can be configured. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + type: str + description: (deprecated)Network share or a local path. + share_user: + type: str + description: "(deprecated)Network share user name. Use the format 'user@domain' or domain//user if user + is part of a domain. This option is mandatory for CIFS share." + share_password: + type: str + description: (deprecated)Network share user password. This option is mandatory for CIFS share. + aliases: ['share_pwd'] + share_mnt: + type: str + description: "(deprecated)Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for network shares." + apply_time: + type: str + description: + - Apply time of the I(attributes). + - This is applicable only to I(attributes). + - "C(Immediate) Allows the user to immediately reboot the host and apply the changes. I(job_wait) + is applicable." + - C(OnReset) Allows the user to apply the changes on the next reboot of the host server. + - "C(AtMaintenanceWindowStart) Allows the user to apply at the start of a maintenance window as specified + in I(maintenance_window). A reboot job will be scheduled." + - "C(InMaintenanceWindowOnReset) Allows to apply after a manual reset but within the maintenance window as + specified in I(maintenance_window)." + choices: [Immediate, OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset] + default: Immediate + maintenance_window: + type: dict + description: + - Option to schedule the maintenance window. + - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset). + suboptions: + start_time: + type: str + description: + - The start time for the maintenance window to be scheduled. + - "The format is YYYY-MM-DDThh:mm:ss" + - " is the time offset from UTC that the current timezone set in + iDRAC in the format: +05:30 for IST." + required: True + duration: + type: int + description: + - The duration in seconds for the maintenance window. + required: True + attributes: + type: dict + description: + - "Dictionary of BIOS attributes and value pair. Attributes should be + part of the Redfish Dell BIOS Attribute Registry. Use + U(https://I(idrac_ip)/redfish/v1/Systems/System.Embedded.1/Bios) to view the Redfish URI." + - This is mutually exclusive with I(boot_sources), I(clear_pending), and I(reset_bios). + boot_sources: + type: list + elements: raw + description: + - (deprecated)List of boot devices to set the boot sources settings. + - I(boot_sources) is mutually exclusive with I(attributes), I(clear_pending), and I(reset_bios). + - I(job_wait) is not applicable. The module waits till the completion of this task. + - This feature is deprecated, please use M(dellemc.openmanage.idrac_boot) for configuring boot sources. + clear_pending: + type: bool + description: + - Allows the user to clear all pending BIOS attributes changes. + - C(true) will discard any pending changes to bios attributes or remove job if in scheduled state. + - This operation will not create any job. + - C(false) will not perform any operation. + - This is mutually exclusive with I(boot_sources), I(attributes), and I(reset_bios). + - C(Note) Any BIOS job scheduled due to boot sources configuration will not be cleared. + reset_bios: + type: bool + description: + - Resets the BIOS to default settings and triggers a reboot of host system. + - This is applied to the host after the restart. + - This operation will not create any job. + - C(false) will not perform any operation. + - This is mutually exclusive with I(boot_sources), I(attributes), and I(clear_pending). + - When C(true), this action will always report as changes found to be applicable. + reset_type: + type: str + description: + - C(force_restart) Forcefully reboot the host system. + - C(graceful_restart) Gracefully reboot the host system. + - This is applicable for I(reset_bios), and I(attributes) when I(apply_time) is C(Immediate). + choices: [graceful_restart, force_restart] + default: graceful_restart + job_wait: + type: bool + description: + - Provides the option to wait for job completion. + - This is applicable for I(attributes) when I(apply_time) is C(Immediate). + default: true + job_wait_timeout: + type: int + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + default: 1200 +requirements: + - "omsdk >= 1.2.490" + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Anooja Vardhineni (@anooja-vardhineni)" + - "Jagadeesh N V (@jagadeeshnv)" +notes: + - omsdk is required to be installed only for I(boot_sources) operation. + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Configure generic attributes of the BIOS + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + attributes: + BootMode : "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + +- name: Configure PXE generic attributes + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + attributes: + PxeDev1EnDis: "Enabled" + PxeDev1Protocol: "IPV4" + PxeDev1VlanEnDis: "Enabled" + PxeDev1VlanId: 1 + PxeDev1Interface: "NIC.Embedded.1-1-1" + PxeDev1VlanPriority: 2 + +- name: Configure BIOS attributes at Maintenance window + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + apply_time: AtMaintenanceWindowStart + maintenance_window: + start_time: "2022-09-30T05:15:40-05:00" + duration: 600 + attributes: + BootMode : "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + +- name: Clear pending BIOS attributes + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + clear_pending: yes + +- name: Reset BIOS attributes to default settings. + dellemc.openmanage.idrac_bios: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_pwd }}" + validate_certs: False + reset_bios: yes + +- name: Configure boot sources + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-2-3" + Enabled : true + Index : 0 + +- name: Configure multiple boot sources + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-1-1" + Enabled : true + Index : 0 + - Name : "NIC.Integrated.2-2-2" + Enabled : true + Index : 1 + - Name : "NIC.Integrated.3-3-3" + Enabled : true + Index : 2 + +- name: Configure boot sources - Enabling + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-1-1" + Enabled : true + +- name: Configure boot sources - Index + dellemc.openmanage.idrac_bios: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_sources: + - Name : "NIC.Integrated.1-1-1" + Index : 0 +""" + +RETURN = """ +--- +status_msg: + description: Overall status of the bios operation. + returned: success + type: str + sample: Successfully cleared pending BIOS attributes. +msg: + description: Status of the job for I(boot_sources) or status of the action performed on bios. + returned: success + type: dict + sample: { + "CompletionTime": "2020-04-20T18:50:20", + "Description": "Job Instance", + "EndTime": null, + "Id": "JID_873888162305", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageId": "SYS053", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true + } +invalid_attributes: + type: dict + description: Dict of invalid attributes provided. + returned: on invalid attributes or values. + sample: { + "PxeDev1VlanId": "Not a valid integer.", + "AcPwrRcvryUserDelay": "Integer out of valid range.", + "BootSeqRetry": "Invalid value for Enumeration.", + "Proc1Brand": "Read only Attribute cannot be modified.", + "AssetTag": "Attribute does not exist." + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + +SYSTEM_URI = "/redfish/v1/Systems/System.Embedded.1" +MANAGER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1" +BIOS_URI = "/redfish/v1/Systems/System.Embedded.1/Bios" +BIOS_REGISTRY = "/redfish/v1/Systems/System.Embedded.1/Bios/BiosRegistry" +CLEAR_PENDING_URI = "/redfish/v1/Systems/System.Embedded.1/Bios/Settings/Actions/Oem/DellManager.ClearPending" +RESET_BIOS_DEFAULT = "/redfish/v1/Systems/System.Embedded.1/Bios/Actions/Bios.ResetBios" +BIOS_SETTINGS = "/redfish/v1/Systems/System.Embedded.1/Bios/Settings" +POWER_HOST_URI = "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" +IDRAC_JOBS_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs" +iDRAC_JOBS_EXP = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)" +iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}" +LOG_SERVICE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog" +iDRAC9_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog/Entries" +iDRAC8_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/Logs/Lclog" +LC_LOG_FILTER = "?$filter=MessageId%20eq%20'UEFI0157'" +CPU_RST_FILTER = "?$filter=MessageId%20eq%20'SYS1003'" +BIOS_JOB_RUNNING = "BIOS Config job is running. Wait for the job to complete." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_MSG = "Changes found to be applied." +SUCCESS_CLEAR = "Successfully cleared the pending BIOS attributes." +SUCCESS_COMPLETE = "Successfully applied the BIOS attributes update." +SCHEDULED_SUCCESS = "Successfully scheduled the job for the BIOS attributes update." +COMMITTED_SUCCESS = "Successfully committed changes. The job is in pending state. The changes will be applied {0}" +RESET_TRIGGERRED = "Reset BIOS action triggered successfully." +HOST_RESTART_FAILED = "Unable to restart the host. Check the host status and restart the host manually." +BIOS_RESET_TRIGGERED = "The BIOS reset action has been triggered successfully. The host reboot is complete." +BIOS_RESET_COMPLETE = "BIOS reset to defaults has been completed successfully." +BIOS_RESET_PENDING = "Pending attributes to be applied. " \ + "Clear or apply the pending changes before resetting the BIOS." +FORCE_BIOS_DELETE = "The BIOS configuration job is scheduled. Use 'force' to delete the job." +INVALID_ATTRIBUTES_MSG = "The values specified for the attributes are invalid." +UNSUPPORTED_APPLY_TIME = "Apply time {0} is not supported." +MAINTENANCE_OFFSET = "The maintenance time must be post-fixed with local offset to {0}." +MAINTENANCE_TIME = "The specified maintenance time window occurs in the past, " \ + "provide a future time to schedule the maintenance window." +POWER_CHECK_RETRIES = 30 +POWER_CHECK_INTERVAL = 10 + +import json +import time +from ansible.module_utils.common.dict_transformations import recursive_diff +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import idrac_redfish_job_tracking, \ + strip_substr_dict + + +def run_server_bios_config(idrac, module): + msg = {} + idrac.use_redfish = True + _validate_params(module.params['boot_sources']) + if module.check_mode: + idrac.config_mgr.is_change_applicable() + msg = idrac.config_mgr.configure_boot_sources(input_boot_devices=module.params['boot_sources']) + return msg + + +def _validate_params(params): + """ + Validate list of dict params. + :param params: Ansible list of dict + :return: bool or error. + """ + fields = [ + {"name": "Name", "type": str, "required": True}, + {"name": "Index", "type": int, "required": False, "min": 0}, + {"name": "Enabled", "type": bool, "required": False} + ] + default = ['Name', 'Index', 'Enabled'] + for attr in params: + if not isinstance(attr, dict): + msg = "attribute values must be of type: dict. {0} ({1}) provided.".format(attr, type(attr)) + return msg + elif all(k in default for k in attr.keys()): + msg = check_params(attr, fields) + return msg + else: + msg = "attribute keys must be one of the {0}.".format(default) + return msg + msg = _validate_name_index_duplication(params) + return msg + + +def _validate_name_index_duplication(params): + """ + Validate for duplicate names and indices. + :param params: Ansible list of dict + :return: bool or error. + """ + msg = "" + for i in range(len(params) - 1): + for j in range(i + 1, len(params)): + if params[i]['Name'] == params[j]['Name']: + msg = "duplicate name {0}".format(params[i]['Name']) + return msg + return msg + + +def check_params(each, fields): + """ + Each dictionary parameters validation as per the rule defined in fields. + :param each: validating each dictionary + :param fields: list of dictionary which has the set of rules. + :return: tuple which has err and message + """ + msg = "" + for f in fields: + if f['name'] not in each and f["required"] is False: + continue + if not f["name"] in each and f["required"] is True: + msg = "{0} is required and must be of type: {1}".format(f['name'], f['type']) + elif not isinstance(each[f["name"]], f["type"]): + msg = "{0} must be of type: {1}. {2} ({3}) provided.".format( + f['name'], f['type'], each[f['name']], type(each[f['name']])) + elif f['name'] in each and isinstance(each[f['name']], int) and 'min' in f: + if each[f['name']] < f['min']: + msg = "{0} must be greater than or equal to: {1}".format(f['name'], f['min']) + return msg + + +def check_scheduled_bios_job(redfish_obj): + job_resp = redfish_obj.invoke_request(iDRAC_JOBS_EXP, "GET") + job_list = job_resp.json_data.get('Members', []) + sch_jb = None + jb_state = 'Unknown' + for jb in job_list: + if jb.get("JobType") == "BIOSConfiguration" and jb.get("JobState") in ["Scheduled", "Running", "Starting"]: + sch_jb = jb['Id'] + jb_state = jb.get("JobState") + break + return sch_jb, jb_state + + +def delete_scheduled_bios_job(redfish_obj, job_id): + resp = redfish_obj.invoke_request(iDRAC_JOB_URI.format(job_id=job_id), "DELETE") + return resp + + +def get_pending_attributes(redfish_obj): + try: + resp = redfish_obj.invoke_request(BIOS_SETTINGS, "GET") + attr = resp.json_data.get("Attributes") + except Exception: + attr = {} + return attr + + +def get_power_state(redfish_obj): + retries = 3 + pstate = "Unknown" + while retries > 0: + try: + resp = redfish_obj.invoke_request(SYSTEM_URI, "GET") + pstate = resp.json_data.get("PowerState") + break + except Exception: + retries = retries - 1 + return pstate + + +def power_act_host(redfish_obj, p_state): + try: + redfish_obj.invoke_request(POWER_HOST_URI, "POST", data={'ResetType': p_state}) + p_act = True + except HTTPError: + p_act = False + return p_act + + +def track_power_state(redfish_obj, desired_state, retries=POWER_CHECK_RETRIES, interval=POWER_CHECK_INTERVAL): + count = retries + while count: + ps = get_power_state(redfish_obj) + if ps in desired_state: + achieved = True + break + else: + time.sleep(interval) + count = count - 1 + else: + achieved = False + return achieved + + +def reset_host(module, redfish_obj): + reset_type = module.params.get('reset_type') + p_state = 'On' + ps = get_power_state(redfish_obj) + on_state = ["On"] + if ps in on_state: + p_state = 'GracefulShutdown' + if 'force' in reset_type: + p_state = 'ForceOff' + p_act = power_act_host(redfish_obj, p_state) + if not p_act: + module.exit_json(failed=True, status_msg=HOST_RESTART_FAILED) + state_achieved = track_power_state(redfish_obj, ["Off"]) # 30x10= 300 secs + p_state = "On" + if not state_achieved: + time.sleep(10) + p_state = "ForceRestart" + p_act = power_act_host(redfish_obj, p_state) + if not p_act: + module.exit_json(failed=True, status_msg=HOST_RESTART_FAILED) + state_achieved = track_power_state(redfish_obj, on_state) # 30x10= 300 secs + return state_achieved + + +def get_current_time(redfish_obj): + try: + resp = redfish_obj.invoke_request(MANAGER_URI, "GET") + curr_time = resp.json_data.get("DateTime") + date_offset = resp.json_data.get("DateTimeLocalOffset") + except Exception: + return None, None + return curr_time, date_offset + + +def track_log_entry(redfish_obj): + msg = None + filter_list = [LC_LOG_FILTER, CPU_RST_FILTER] + intrvl = 15 + retries = 360 // intrvl + time.sleep(intrvl) + try: + resp = redfish_obj.invoke_request(LOG_SERVICE_URI, "GET") + uri = resp.json_data.get('Entries').get('@odata.id') + fltr_uris = [] + for fltr in filter_list: + fltr_uris.append("{0}{1}".format(uri, fltr)) + flen = len(fltr_uris) + fln = 1 + pvt = retries // 3 # check for the SYS1003 after 2/3rds of retries + curr_time = resp.json_data.get('DateTime') + while retries: + resp = redfish_obj.invoke_request(fltr_uris[retries % fln], "GET") + logs_list = resp.json_data.get("Members") + for log in logs_list: + if log.get('Created') > curr_time: + msg = BIOS_RESET_COMPLETE + break + if msg: + break + retries = retries - 1 + time.sleep(intrvl) + if retries < pvt: + fln = flen + else: + # msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, "LOOPOVER") + msg = BIOS_RESET_TRIGGERED + except Exception as ex: + # msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, str(ex)) + msg = BIOS_RESET_TRIGGERED + return msg + + +def reset_bios(module, redfish_obj): + attr = get_pending_attributes(redfish_obj) + if attr: + module.exit_json(status_msg=BIOS_RESET_PENDING, failed=True) + if module.check_mode: + module.exit_json(status_msg=CHANGES_MSG, changed=True) + resp = redfish_obj.invoke_request(RESET_BIOS_DEFAULT, "POST", data="{}", dump=True) + reset_success = reset_host(module, redfish_obj) + if not reset_success: + module.exit_json(failed=True, status_msg="{0} {1}".format(RESET_TRIGGERRED, HOST_RESTART_FAILED)) + log_msg = track_log_entry(redfish_obj) + module.exit_json(status_msg=log_msg, changed=True) + + +def clear_pending_bios(module, redfish_obj): + attr = get_pending_attributes(redfish_obj) + if not attr: + module.exit_json(status_msg=NO_CHANGES_MSG) + job_id, job_state = check_scheduled_bios_job(redfish_obj) + if job_id: + if job_state in ["Running", "Starting"]: + module.exit_json(failed=True, status_msg=BIOS_JOB_RUNNING, job_id=job_id) + elif job_state in ["Scheduled", "Scheduling"]: + # if module.params.get("force", True) == False: + # module.exit_json(status_msg=FORCE_BIOS_DELETE, job_id=job_id, failed=True) + if module.check_mode: + module.exit_json(status_msg=CHANGES_MSG, changed=True) + delete_scheduled_bios_job(redfish_obj, job_id) + module.exit_json(status_msg=SUCCESS_CLEAR, changed=True) + if module.check_mode: + module.exit_json(status_msg=CHANGES_MSG, changed=True) + resp = redfish_obj.invoke_request(CLEAR_PENDING_URI, "POST", data="{}", dump=False) + module.exit_json(status_msg=SUCCESS_CLEAR, changed=True) + + +def get_attributes_registry(idrac): + reggy = {} + try: + resp = idrac.invoke_request(BIOS_REGISTRY, "GET") + attr_list = resp.json_data.get("RegistryEntries").get("Attributes") + reggy = dict((x["AttributeName"], x) for x in attr_list) + except Exception: + reggy = {} + return reggy + + +def validate_vs_registry(registry, attr_dict): + invalid = {} + for k, v in attr_dict.items(): + if k in registry: + val_dict = registry.get(k) + if val_dict.get("ReadOnly"): + invalid[k] = "Read only attribute cannot be modified." + else: + type = val_dict.get("Type") + if type == "Enumeration": + found = False + for val in val_dict.get("Value", []): + if v == val.get("ValueName"): + found = True + break + if not found: + invalid[k] = "Invalid value for enumeration." + if type == "Integer": + try: + i = int(v) + except Exception: + invalid[k] = "Invalid integer." + else: + if not (val_dict.get("LowerBound") <= i <= val_dict.get("UpperBound")): + invalid[k] = "Integer not in a valid range." + else: + invalid[k] = "The attribute does not exist." + return invalid + + +def get_current_attributes(redfish_obj): + try: + resp = redfish_obj.invoke_request(BIOS_URI, "GET") + setting = resp.json_data + except Exception: + setting = {} + return setting + + +def validate_time(module, redfish_obj, mtime): + curr_time, date_offset = get_current_time(redfish_obj) + if not mtime.endswith(date_offset): + module.exit_json(failed=True, status_msg=MAINTENANCE_OFFSET.format(date_offset)) + if mtime < curr_time: + module.exit_json(failed=True, status_msg=MAINTENANCE_TIME) + + +def get_redfish_apply_time(module, redfish_obj, aplytm, rf_settings): + rf_set = {} + reboot_req = False + if rf_settings: + if 'Maintenance' in aplytm: + if aplytm not in rf_settings: + module.exit_json(failed=True, status_msg=UNSUPPORTED_APPLY_TIME.format(aplytm)) + else: + rf_set['ApplyTime'] = aplytm + m_win = module.params.get('maintenance_window') + validate_time(module, redfish_obj, m_win.get('start_time')) + rf_set['MaintenanceWindowStartTime'] = m_win.get('start_time') + rf_set['MaintenanceWindowDurationInSeconds'] = m_win.get('duration') + else: # assuming OnReset is always + if aplytm == "Immediate": + if aplytm not in rf_settings: + reboot_req = True + aplytm = 'OnReset' + rf_set['ApplyTime'] = aplytm + return rf_set, reboot_req + + +def trigger_bios_job(redfish_obj): + job_id = None + payload = {"TargetSettingsURI": BIOS_SETTINGS} + resp = redfish_obj.invoke_request(IDRAC_JOBS_URI, "POST", data=payload) + job_id = resp.headers["Location"].split("/")[-1] + return job_id + + +def apply_attributes(module, redfish_obj, pending, rf_settings): + payload = {"Attributes": pending} + aplytm = module.params.get('apply_time') + rf_set, reboot_required = get_redfish_apply_time(module, redfish_obj, aplytm, rf_settings) + if rf_set: + payload["@Redfish.SettingsApplyTime"] = rf_set + resp = redfish_obj.invoke_request(BIOS_SETTINGS, "PATCH", data=payload) + if rf_set: + tmp_resp = redfish_obj.invoke_request(resp.headers["Location"], "GET") + job_id = resp.headers["Location"].split("/")[-1] + else: + if aplytm == "Immediate": + reboot_required = True + job_id = trigger_bios_job(redfish_obj) + return job_id, reboot_required + + +def attributes_config(module, redfish_obj): + curr_resp = get_current_attributes(redfish_obj) + curr_attr = curr_resp.get("Attributes", {}) + inp_attr = module.params.get("attributes") + diff_tuple = recursive_diff(inp_attr, curr_attr) + attr = {} + if diff_tuple: + if diff_tuple[0]: + attr = diff_tuple[0] + invalid = {} + attr_registry = get_attributes_registry(redfish_obj) + if attr_registry: + invalid.update(validate_vs_registry(attr_registry, attr)) + if invalid: + module.exit_json(failed=True, status_msg=INVALID_ATTRIBUTES_MSG, invalid_attributes=invalid) + if not attr: + module.exit_json(status_msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(status_msg=CHANGES_MSG, changed=True) + pending = get_pending_attributes(redfish_obj) + pending.update(attr) + if pending: + job_id, job_state = check_scheduled_bios_job(redfish_obj) + if job_id: + if job_state in ["Running", "Starting"]: + module.exit_json(status_msg=BIOS_JOB_RUNNING, job_id=job_id, failed=True) + elif job_state in ["Scheduled", "Scheduling"]: + # changes staged in pending attributes + # if module.params.get("force", True) == False: + # module.exit_json(status_msg=FORCE_BIOS_DELETE, job_id=job_id, failed=True) + delete_scheduled_bios_job(redfish_obj, job_id) + rf_settings = curr_resp.get("@Redfish.Settings", {}).get("SupportedApplyTimes", []) + job_id, reboot_required = apply_attributes(module, redfish_obj, pending, rf_settings) + if reboot_required and job_id: + reset_success = reset_host(module, redfish_obj) + if not reset_success: + module.exit_json(status_msg="Attributes committed but reboot has failed {0}".format(HOST_RESTART_FAILED), + failed=True) + if module.params.get("job_wait"): + job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking( + redfish_obj, iDRAC_JOB_URI.format(job_id=job_id), + max_job_wait_sec=module.params.get('job_wait_timeout')) + if job_failed: + module.exit_json(failed=True, status_msg=msg, job_id=job_id) + module.exit_json(status_msg=SUCCESS_COMPLETE, job_id=job_id, msg=strip_substr_dict(job_dict), changed=True) + else: + module.exit_json(status_msg=SCHEDULED_SUCCESS, job_id=job_id, changed=True) + module.exit_json(status_msg=COMMITTED_SUCCESS.format(module.params.get('apply_time')), + job_id=job_id, changed=True) + + +def main(): + specs = { + "share_name": {"type": 'str'}, + "share_user": {"type": 'str'}, + "share_password": {"type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "share_mnt": {"type": 'str'}, + "attributes": {"type": 'dict'}, + "boot_sources": {"type": 'list', 'elements': 'raw'}, + "apply_time": {"type": 'str', "default": 'Immediate', + "choices": ['Immediate', 'OnReset', 'AtMaintenanceWindowStart', 'InMaintenanceWindowOnReset']}, + "maintenance_window": {"type": 'dict', + "options": {"start_time": {"type": 'str', "required": True}, + "duration": {"type": 'int', "required": True}}}, + "clear_pending": {"type": 'bool'}, + "reset_bios": {"type": 'bool'}, + "reset_type": {"type": 'str', "choices": ['graceful_restart', 'force_restart'], "default": 'graceful_restart'}, + "job_wait": {"type": 'bool', "default": True}, + "job_wait_timeout": {"type": 'int', "default": 1200} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[('boot_sources', 'attributes', 'clear_pending', 'reset_bios')], + required_one_of=[('boot_sources', 'attributes', 'clear_pending', 'reset_bios')], + required_if=[["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)], + ["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]], + supports_check_mode=True) + try: + msg = {} + if module.params.get("boot_sources") is not None: + with iDRACConnection(module.params) as idrac: + msg = run_server_bios_config(idrac, module) + changed, failed = False, False + if msg.get('Status') == "Success": + changed = True + if msg.get('Message') == "No changes found to commit!": + changed = False + elif msg.get('Status') == "Failed": + failed = True + module.exit_json(msg=msg, changed=changed, failed=failed) + else: + with iDRACRedfishAPI(module.params, req_session=True) as redfish_obj: + if module.params.get("clear_pending"): + clear_pending_bios(module, redfish_obj) + if module.params.get("reset_bios"): + reset_bios(module, redfish_obj) + if module.params.get('attributes'): + attributes_config(module, redfish_obj) + module.exit_json(status_msg=NO_CHANGES_MSG) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py new file mode 100644 index 00000000..ad563c5c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py @@ -0,0 +1,563 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_boot +short_description: Configure the boot order settings. +version_added: "6.1.0" +description: + - This module allows to configure the boot order settings. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + boot_options: + type: list + elements: dict + description: + - Options to enable or disable the boot devices. + - This is mutually exclusive with I(boot_order), I(boot_source_override_mode), I(boot_source_override_enabled) + I(boot_source_override_target), and I(uefi_target_boot_source_override). + suboptions: + boot_option_reference: + type: str + description: + - FQDD of the boot device. + - This is mutually exclusive with I(display_name). + display_name: + type: str + description: + - Display name of the boot source device. + - This is mutually exclusive with I(boot_option_reference). + enabled: + type: bool + required: true + description: Enable or disable the boot device. + boot_order: + type: list + elements: str + description: + - This option allows to set the boot devices in the required boot order sequences. + - This is mutually exclusive with I(boot_options). + boot_source_override_mode: + type: str + description: + - The BIOS boot mode (either Legacy or UEFI) to be used when I(boot_source_override_target) + boot source is booted from. + - C(legacy) The system boot in non-UEFI(Legacy) boot mode to the I(boot_source_override_target). + - C(uefi) The system boot in UEFI boot mode to the I(boot_source_override_target). + - This is mutually exclusive with I(boot_options). + choices: [legacy, uefi] + boot_source_override_enabled: + type: str + description: + - The state of the Boot Source Override feature. + - C(disabled) The system boots normally. + - C(once) The system boots (one time) to the I(boot_source_override_target). + - C(continuous) The system boots to the target specified in the I(boot_source_override_target) + until this property is set to Disabled. + - The state is set to C(once) for the one-time boot override and C(continuous) for the + remain-active-until—canceled override. If the state is set C(once), the value is reset + to C(disabled) after the I(boot_source_override_target) actions have completed successfully. + - Changes to this options do not alter the BIOS persistent boot order configuration. + - This is mutually exclusive with I(boot_options). + choices: [continuous, disabled, once] + boot_source_override_target: + type: str + description: + - The boot source override target device to use during the next boot instead of the normal boot device. + - C(pxe) performs PXE boot from the primary NIC. + - C(floppy), C(cd), C(hdd), C(sd_card) performs boot from their devices respectively. + - C(bios_setup) performs boot into the native BIOS setup. + - C(utilities) performs boot from the local utilities. + - C(uefi_target) performs boot from the UEFI device path found in I(uefi_target_boot_source_override). + - If the I(boot_source_override_target) is set to a value other than C(none) then the + I(boot_source_override_enabled) is automatically set to C(once). + - Changes to this options do not alter the BIOS persistent boot order configuration. + - This is mutually exclusive with I(boot_options). + choices: [uefi_http, sd_card, uefi_target, utilities, bios_setup, hdd, cd, floppy, pxe, none] + uefi_target_boot_source_override: + type: str + description: + - The UEFI device path of the device from which to boot when I(boot_source_override_target) is C(uefi_target). + - I(boot_source_override_enabled) cannot be set to c(continuous) if I(boot_source_override_target) + set to C(uefi_target) because this settings is defined in UEFI as a one-time-boot setting. + - Changes to this options do not alter the BIOS persistent boot order configuration. + - This is required if I(boot_source_override_target) is C(uefi_target). + - This is mutually exclusive with I(boot_options). + reset_type: + type: str + description: + - C(none) Host system is not rebooted and I(job_wait) is not applicable. + - C(force_reset) Forcefully reboot the Host system. + - C(graceful_reset) Gracefully reboot the Host system. + choices: [graceful_restart, force_restart, none] + default: graceful_restart + job_wait: + type: bool + description: + - Provides the option to wait for job completion. + - This is applicable when I(reset_type) is C(force_reset) or C(graceful_reset). + default: true + job_wait_timeout: + type: int + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + default: 900 + resource_id: + type: str + description: Redfish ID of the resource. +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports C(check_mode). +""" + + +EXAMPLES = """ +--- +- name: Configure the system boot options settings. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_options: + - display_name: Hard drive C + enabled: true + - boot_option_reference: NIC.PxeDevice.2-1 + enabled: true + +- name: Configure the boot order settings. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_order: + - Boot0001 + - Boot0002 + - Boot0004 + - Boot0003 + +- name: Configure the boot source override mode. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: legacy + boot_source_override_target: cd + boot_source_override_enabled: once + +- name: Configure the UEFI target settings. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: uefi + boot_source_override_target: uefi_target + uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)" + +- name: Configure the boot source override mode as pxe. + dellemc.openmanage.idrac_boot: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + boot_source_override_mode: legacy + boot_source_override_target: pxe + boot_source_override_enabled: continuous +""" + + +RETURN = r''' +--- +msg: + description: Successfully updated the boot settings. + returned: success + type: str + sample: Successfully updated the boot settings. +job: + description: Configured job details. + returned: success + type: dict + sample: { + "ActualRunningStartTime": "2019-06-19T00:57:24", + "ActualRunningStopTime": "2019-06-19T01:00:27", + "CompletionTime": "2019-06-19T01:00:27", + "Description": "Job Instance", + "EndTime": "TIME_NA", + "Id": "JID_609237056489", + "JobState": "Completed", + "JobType": "BIOSConfiguration", + "Message": "Job completed successfully.", + "MessageArgs": [], + "MessageId": "PR19", + "Name": "Configure: BIOS.Setup.1-1", + "PercentComplete": 100, + "StartTime": "2019-06-19T00:55:05", + "TargetSettingsURI": null } +boot: + description: Configured boot settings details. + returned: success + type: dict + sample: { + "BootOptions": { + "Description": "Collection of BootOptions", + "Members": [{ + "BootOptionEnabled": false, + "BootOptionReference": "HardDisk.List.1-1", + "Description": "Current settings of the Legacy Boot option", + "DisplayName": "Hard drive C:", + "Id": "HardDisk.List.1-1", + "Name": "Legacy Boot option", + "UefiDevicePath": "VenHw(D6C0639F-C705-4EB9-AA4F-5802D8823DE6)"}], + "Name": "Boot Options Collection" + }, + "BootOrder": [ "HardDisk.List.1-1"], + "BootSourceOverrideEnabled": "Disabled", + "BootSourceOverrideMode": "Legacy", + "BootSourceOverrideTarget": "None", + "UefiTargetBootSourceOverride": null } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +import time +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (strip_substr_dict, idrac_system_reset, + get_system_res_id, + wait_for_idrac_job_completion) +from ansible.module_utils.basic import AnsibleModule + +SYSTEM_URI = "/redfish/v1/Systems" +BOOT_OPTIONS_URI = "/redfish/v1/Systems/{0}/BootOptions?$expand=*($levels=1)" +JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)" +JOB_URI_ID = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{0}" +BOOT_SEQ_URI = "/redfish/v1/Systems/{0}/BootSources" +PATCH_BOOT_SEQ_URI = "/redfish/v1/Systems/{0}/BootSources/Settings" + +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_MSG = "Changes found to be applied." +JOB_EXISTS = "Unable to complete the request because the BIOS configuration job already " \ + "exists. Wait for the pending job to complete." +BOOT_OPT_ERROR_MSG = "{0} boot_options provided." +INVALID_BOOT_OPT = "{0} boot order reference provided." +SUCCESS_MSG = "Successfully updated the boot settings." +FAILED_MSG = "Failed to update the boot settings." +UNSUPPORTED_MSG = "The system does not support the BootOptions feature." +JOB_WAIT_MSG = "The boot settings job is triggered successfully." +AUTH_ERROR_MSG = "Unable to communicate with iDRAC {0}. This may be due to one of the following: " \ + "Incorrect username or password, unreachable iDRAC IP or a failure in TLS/SSL handshake." + +BS_OVERRIDE_MODE = {"legacy": "Legacy", "uefi": "UEFI"} +BS_OVERRIDE_ENABLED = {"continuous": "Continuous", "disabled": "Disabled", "once": "Once"} +BS_OVERRIDE_TARGET = {"none": "None", "pxe": "Pxe", "floppy": "Floppy", "cd": "Cd", + "hdd": "Hdd", "bios_setup": "BiosSetup", "utilities": "Utilities", + "uefi_target": "UefiTarget", "sd_card": "SDCard", "uefi_http": "UefiHttp"} +RESET_TYPE = {"graceful_restart": "GracefulRestart", "force_restart": "ForceRestart", "none": None} + + +def get_response_attributes(module, idrac, res_id): + resp = idrac.invoke_request("{0}/{1}".format(SYSTEM_URI, res_id), "GET") + resp_data = resp.json_data["Boot"] + resp_data.pop("Certificates", None) + resp_data.pop("BootOrder@odata.count", None) + resp_data.pop("BootSourceOverrideTarget@Redfish.AllowableValues", None) + if resp_data.get("BootOptions") is None and module.params.get("boot_options") is not None: + module.fail_json(msg=UNSUPPORTED_MSG) + if resp.json_data.get("Actions") is not None: + type_reset = resp.json_data["Actions"]["#ComputerSystem.Reset"]["ResetType@Redfish.AllowableValues"] + if "GracefulRestart" not in type_reset: + RESET_TYPE["graceful_restart"] = "ForceRestart" + return resp_data + + +def get_existing_boot_options(idrac, res_id): + resp = idrac.invoke_request(BOOT_OPTIONS_URI.format(res_id), "GET") + resp_data = strip_substr_dict(resp.json_data) + strip_members = [] + for each in resp_data["Members"]: + strip_members.append(strip_substr_dict(each)) + resp_data["Members"] = strip_members + return resp_data + + +def system_reset(module, idrac, res_id): + reset_msg, track_failed, reset, reset_type, job_resp = "", False, True, module.params.get("reset_type"), {} + if reset_type is not None and not reset_type == "none": + data = {"ResetType": RESET_TYPE[reset_type]} + reset, track_failed, reset_msg, job_resp = idrac_system_reset(idrac, res_id, payload=data, job_wait=True) + if RESET_TYPE["graceful_restart"] == "ForceRestart": + reset = True + if reset_type == "force_restart" and RESET_TYPE["graceful_restart"] == "GracefulRestart": + reset = True + return reset, track_failed, reset_msg, job_resp + + +def get_scheduled_job(idrac, job_state=None): + if job_state is None: + job_state = ["Scheduled", "New", "Running"] + is_job, job_type_name, progress_job = False, "BIOSConfiguration", [] + time.sleep(10) + job_resp = idrac.invoke_request(JOB_URI, "GET") + job_resp_member = job_resp.json_data["Members"] + if job_resp_member: + bios_config_job = list(filter(lambda d: d.get("JobType") in [job_type_name], job_resp_member)) + progress_job = list(filter(lambda d: d.get("JobState") in job_state, bios_config_job)) + if progress_job: + is_job = True + return is_job, progress_job + + +def configure_boot_options(module, idrac, res_id, payload): + is_job, progress_job = get_scheduled_job(idrac) + job_data, job_wait = {}, module.params["job_wait"] + resp_data = get_response_attributes(module, idrac, res_id) + override_mode = resp_data["BootSourceOverrideMode"] + if module.params["reset_type"] == "none": + job_wait = False + if is_job: + module.fail_json(msg=JOB_EXISTS) + boot_seq_resp = idrac.invoke_request(BOOT_SEQ_URI.format(res_id), "GET") + seq_key = "BootSeq" if override_mode == "Legacy" else "UefiBootSeq" + boot_seq_data = boot_seq_resp.json_data["Attributes"][seq_key] + [each.update({"Enabled": payload.get(each["Name"])} + ) for each in boot_seq_data if payload.get(each["Name"]) is not None] + seq_payload = {"Attributes": {seq_key: boot_seq_data}, "@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}} + if seq_key == "UefiBootSeq": + for i in range(len(boot_seq_data)): + if payload.get(resp_data["BootOrder"][i]) is not None: + boot_seq_data[i].update({"Enabled": payload.get(resp_data["BootOrder"][i])}) + seq_payload["Attributes"][seq_key] = boot_seq_data + resp = idrac.invoke_request(PATCH_BOOT_SEQ_URI.format(res_id), "PATCH", data=seq_payload) + if resp.status_code == 202: + location = resp.headers["Location"] + job_id = location.split("/")[-1] + reset, track_failed, reset_msg, reset_job_resp = system_reset(module, idrac, res_id) + if reset_job_resp: + job_data = reset_job_resp.json_data + if reset: + job_resp, error_msg = wait_for_idrac_job_completion(idrac, JOB_URI_ID.format(job_id), + job_wait=job_wait, + wait_timeout=module.params["job_wait_timeout"]) + if error_msg: + module.fail_json(msg=error_msg) + job_data = job_resp.json_data + else: + module.fail_json(msg=reset_msg) + return job_data + + +def apply_boot_settings(module, idrac, payload, res_id): + job_data, job_wait = {}, module.params["job_wait"] + if module.params["reset_type"] == "none": + job_wait = False + resp = idrac.invoke_request("{0}/{1}".format(SYSTEM_URI, res_id), "PATCH", data=payload) + if resp.status_code == 200: + reset, track_failed, reset_msg, reset_job_resp = system_reset(module, idrac, res_id) + if reset_job_resp: + job_data = reset_job_resp.json_data + is_job, progress_job = get_scheduled_job(idrac) + if is_job: + if reset: + job_resp, error_msg = wait_for_idrac_job_completion(idrac, JOB_URI_ID.format(progress_job[0]["Id"]), + job_wait=job_wait, + wait_timeout=module.params["job_wait_timeout"]) + if error_msg: + module.fail_json(msg=error_msg) + job_data = job_resp.json_data + else: + module.fail_json(msg=reset_msg) + return job_data + + +def configure_boot_settings(module, idrac, res_id): + job_resp, diff_change, payload = {}, [], {"Boot": {}} + boot_order = module.params.get("boot_order") + override_mode = module.params.get("boot_source_override_mode") + override_enabled = module.params.get("boot_source_override_enabled") + override_target = module.params.get("boot_source_override_target") + response = get_response_attributes(module, idrac, res_id) + if boot_order is not None: + exist_boot_order = response.get("BootOrder") + invalid_boot_order = [bo for bo in boot_order if bo not in exist_boot_order] + if invalid_boot_order: + module.fail_json(msg=INVALID_BOOT_OPT.format("Invalid"), invalid_boot_order=invalid_boot_order) + if not len(set(boot_order)) == len(boot_order): + dup_order = boot_order[:] + [dup_order.remove(bo) for bo in exist_boot_order if bo in dup_order] + module.fail_json(msg=INVALID_BOOT_OPT.format("Duplicate"), + duplicate_boot_order=dup_order) + if not len(boot_order) == len(exist_boot_order): + module.fail_json(msg="Unable to complete the operation because all boot devices " + "are required for this operation.") + if not boot_order == exist_boot_order: + payload["Boot"].update({"BootOrder": boot_order}) + if override_mode is not None and \ + (not BS_OVERRIDE_MODE.get(override_mode) == response.get("BootSourceOverrideMode")): + payload["Boot"].update({"BootSourceOverrideMode": BS_OVERRIDE_MODE.get(override_mode)}) + if override_enabled is not None and \ + (not BS_OVERRIDE_ENABLED.get(override_enabled) == response.get("BootSourceOverrideEnabled")): + payload["Boot"].update({"BootSourceOverrideEnabled": BS_OVERRIDE_ENABLED.get(override_enabled)}) + if override_target is not None and \ + (not BS_OVERRIDE_TARGET.get(override_target) == response.get("BootSourceOverrideTarget")): + payload["Boot"].update({"BootSourceOverrideTarget": BS_OVERRIDE_TARGET.get(override_target)}) + uefi_override_target = module.params.get("uefi_target_boot_source_override") + if override_target == "uefi_target" and not uefi_override_target == response.get("UefiTargetBootSourceOverride"): + payload["Boot"].update({"UefiTargetBootSourceOverride": uefi_override_target}) + if module.check_mode and payload["Boot"]: + module.exit_json(msg=CHANGES_MSG, changed=True) + elif (module.check_mode or not module.check_mode) and not payload["Boot"]: + module.exit_json(msg=NO_CHANGES_MSG) + else: + job_resp = apply_boot_settings(module, idrac, payload, res_id) + return job_resp + + +def configure_idrac_boot(module, idrac, res_id): + boot_options = module.params.get("boot_options") + inv_boot_options, diff_change, payload, job_resp, boot_attr = [], [], {}, {}, {} + if boot_options is not None: + boot_option_data = get_existing_boot_options(idrac, res_id) + for each in boot_options: + attr_val = each["display_name"] if each.get("display_name") is not None else each.get("boot_option_reference") + attr_key = "DisplayName" if each.get("display_name") is not None else "BootOptionReference" + report = list(filter(lambda d: d[attr_key] in [attr_val], boot_option_data["Members"])) + if not report: + inv_boot_options.append(each) + else: + act_val = {"BootOptionEnabled": each["enabled"]} + ext_val = {"BootOptionEnabled": report[0]["BootOptionEnabled"]} + diff_change.append(bool(set(ext_val.items()) ^ set(act_val.items()))) + payload[report[0]["Id"]] = each["enabled"] + if inv_boot_options: + module.fail_json(msg=BOOT_OPT_ERROR_MSG.format("Invalid"), invalid_boot_options=inv_boot_options) + if not len(payload) == len(boot_options): + module.fail_json(msg=BOOT_OPT_ERROR_MSG.format("Duplicate"), duplicate_boot_options=boot_options) + if module.check_mode and any(diff_change) is True: + module.exit_json(msg=CHANGES_MSG, changed=True) + elif (module.check_mode and all(diff_change) is False) or (not module.check_mode and not any(diff_change)): + module.exit_json(msg=NO_CHANGES_MSG) + else: + job_resp = configure_boot_options(module, idrac, res_id, payload) + else: + job_resp = configure_boot_settings(module, idrac, res_id) + return job_resp + + +def main(): + specs = { + "boot_options": { + "required": False, "type": "list", "elements": "dict", + "options": { + "boot_option_reference": {"required": False, "type": "str"}, + "display_name": {"required": False, "type": "str"}, + "enabled": {"required": True, "type": "bool"}, + }, + "mutually_exclusive": [("boot_option_reference", "display_name")], + "required_one_of": [("boot_option_reference", "display_name")], + }, + "boot_order": {"required": False, "type": "list", "elements": "str"}, + "boot_source_override_mode": {"required": False, "type": "str", "choices": ["legacy", "uefi"]}, + "boot_source_override_enabled": {"required": False, "type": "str", + "choices": ["continuous", "disabled", "once"]}, + "boot_source_override_target": {"required": False, "type": "str", + "choices": ["uefi_http", "sd_card", "uefi_target", "utilities", "bios_setup", + "hdd", "cd", "floppy", "pxe", "none"]}, + "uefi_target_boot_source_override": {"required": False, "type": "str"}, + "reset_type": {"required": False, "type": "str", "default": "graceful_restart", + "choices": ["graceful_restart", "force_restart", "none"]}, + "job_wait": {"required": False, "type": "bool", "default": True}, + "job_wait_timeout": {"required": False, "type": "int", "default": 900}, + "resource_id": {"required": False, "type": "str"} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[["boot_options", "boot_order", "boot_source_override_mode", + "boot_source_override_enabled", "boot_source_override_target", + "uefi_target_boot_source_override"]], + mutually_exclusive=[ + ("boot_options", "boot_order"), ("boot_options", "boot_source_override_mode"), + ("boot_options", "boot_source_override_enabled"), ("boot_options", "boot_source_override_target"), + ("boot_options", "uefi_target_boot_source_override") + ], + required_if=[ + ["boot_source_override_target", "uefi_target", ("uefi_target_boot_source_override",)], + ], + supports_check_mode=True, + ) + try: + with iDRACRedfishAPI(module.params, req_session=True) as idrac: + res_id = module.params.get("resource_id") + if not res_id: + res_id, error_msg = get_system_res_id(idrac) + if error_msg: + module.fail_json(msg=error_msg) + job_resp = configure_idrac_boot(module, idrac, res_id) + job_resp_data = strip_substr_dict(job_resp) + boot_option_data = get_existing_boot_options(idrac, res_id) + boot_attr = get_response_attributes(module, idrac, res_id) + boot_attr["BootOptions"] = boot_option_data + if job_resp_data and \ + (job_resp_data.get("JobState") in ["Failed", "RebootFailed"] or + "failed" in job_resp_data.get("Message").lower()): + module.fail_json(msg=FAILED_MSG, job=job_resp_data) + if (not module.params["job_wait"] or module.params["reset_type"] == "none") and \ + not job_resp_data.get("JobState") == "RebootCompleted": + module.exit_json(msg=JOB_WAIT_MSG, job=job_resp_data, boot=boot_attr) + module.exit_json(msg=SUCCESS_MSG, job=job_resp_data, boot=boot_attr, changed=True) + except HTTPError as err: + if err.code == 401: + module.fail_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"])) + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"]), unreachable=True) + except (ImportError, ValueError, RuntimeError, SSLValidationError, + ConnectionError, KeyError, TypeError, IndexError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py new file mode 100644 index 00000000..f5471a3a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.5.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: idrac_certificates +short_description: Configure certificates for iDRAC +version_added: "5.5.0" +description: + - This module allows to generate certificate signing request, import, and export certificates on iDRAC. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + command: + description: + - "C(generate_csr), generate CSR. This requires I(cert_params) and I(certificate_path). + This is applicable only for C(HTTPS)" + - C(import), import the certificate file. This requires I(certificate_path). + - C(export), export the certificate. This requires I(certificate_path). + - C(reset), reset the certificate to default settings. This is applicable only for C(HTTPS). + type: str + choices: ['import', 'export', 'generate_csr', 'reset'] + default: 'generate_csr' + certificate_type: + description: + - Type of the iDRAC certificate. + - C(HTTPS) The Dell self-signed SSL certificate. + - C(CA) Certificate Authority(CA) signed SSL certificate. + - C(CSC) The custom signed SSL certificate. + - C(CLIENT_TRUST_CERTIFICATE) Client trust certificate. + type: str + choices: ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE'] + default: 'HTTPS' + certificate_path: + description: + - Absolute path of the certificate file if I(command) is C(import). + - Directory path with write permissions if I(command) is C(generate_csr) or C(export). + type: path + passphrase: + description: The passphrase string if the certificate to be imported is passphrase protected. + type: str + cert_params: + description: Certificate parameters to generate signing request. + type: dict + suboptions: + common_name: + description: The common name of the certificate. + type: str + required: True + organization_unit: + description: The name associated with an organizational unit. For example department name. + type: str + required: True + locality_name: + description: The city or other location where the entity applying for certification is located. + type: str + required: True + state_name: + description: The state where the entity applying for certification is located. + type: str + required: True + country_code: + description: The country code of the country where the entity applying for certification is located. + type: str + required: True + email_address: + description: The email associated with the CSR. + type: str + required: True + organization_name: + description: The name associated with an organization. + type: str + required: True + subject_alt_name: + description: The alternative domain names associated with the request. + type: list + elements: str + default: [] + resource_id: + description: Redfish ID of the resource. + type: str + reset: + description: + - To reset the iDRAC after the certificate operation. + - This is applicable when I(command) is C(import) or C(reset). + type: bool + default: True + wait: + description: + - Maximum wait time for iDRAC to start after the reset, in seconds. + - This is applicable when I(command) is C(import) or C(reset) and I(reset) is C(True). + type: int + default: 300 +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V(@jagadeeshnv)" +notes: + - The certificate operations are supported on iDRAC firmware 5.10.10.00 and above. + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Generate HTTPS certificate signing request + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "generate_csr" + certificate_type: "HTTPS" + certificate_path: "/home/omam/mycerts" + cert_params: + common_name: "sample.domain.com" + organization_unit: "OrgUnit" + locality_name: "Bangalore" + state_name: "Karnataka" + country_code: "IN" + email_address: "admin@domain.com" + organization_name: "OrgName" + subject_alt_name: + - 192.198.2.1 + +- name: Import a HTTPS certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + certificate_type: "HTTPS" + certificate_path: "/path/to/cert.pem" + +- name: Export a HTTPS certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + certificate_type: "HTTPS" + certificate_path: "/home/omam/mycert_dir" + +- name: Import a CSC certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + certificate_type: "CSC" + certificate_path: "/path/to/cert.pem" + +- name: Export a Client trust certificate. + dellemc.openmanage.idrac_certificates: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + certificate_type: "CLIENT_TRUST_CERTIFICATE" + certificate_path: "/home/omam/mycert_dir" +''' + +RETURN = r''' +--- +msg: + type: str + description: Status of the certificate configuration operation. + returned: always + sample: "Successfully performed the operation generate_csr." +certificate_path: + type: str + description: The csr or exported certificate file path + returned: when I(command) is C(export) or C(generate_csr) + sample: "/home/ansible/myfiles/cert.pem" +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +import base64 +import os +from datetime import datetime +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import reset_idrac + +NOT_SUPPORTED_ACTION = "Certificate {op} not supported for the specified certificate type {certype}." +SUCCESS_MSG = "Successfully performed the '{command}' operation." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_MSG = "Changes found to be applied." +SYSTEM_ID = "System.Embedded.1" +MANAGER_ID = "iDRAC.Embedded.1" +ACTIONS_PFIX = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService." +SYSTEMS_URI = "/redfish/v1/Systems" +MANAGERS_URI = "/redfish/v1/Managers" +IDRAC_SERVICE = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService" +CSR_SSL = "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR" +IMPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate" +EXPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate" +RESET_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg" +IDRAC_RESET = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset" + +idrac_service_actions = { + "#DelliDRACCardService.DeleteCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.DeleteCertificate", + "#DelliDRACCardService.ExportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportCertificate", + "#DelliDRACCardService.ExportSSLCertificate": EXPORT_SSL, + "#DelliDRACCardService.FactoryIdentityCertificateGenerateCSR": + "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR", + "#DelliDRACCardService.FactoryIdentityExportCertificate": + "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityExportCertificate", + "#DelliDRACCardService.FactoryIdentityImportCertificate": + "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityImportCertificate", + "#DelliDRACCardService.GenerateSEKMCSR": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.GenerateSEKMCSR", + "#DelliDRACCardService.ImportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportCertificate", + "#DelliDRACCardService.ImportSSLCertificate": IMPORT_SSL, + "#DelliDRACCardService.SSLResetCfg": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg", + "#DelliDRACCardService.iDRACReset": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.iDRACReset" +} + +rfish_cert_coll = {'Server': { + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/NetworkProtocol/HTTPS/Certificates" +}} +out_mapper = {} +out_file_path = {"CSRString": 'certificate_path', + "CertificateFile": 'certificate_path'} +changed_map = {"generate_csr": False, "import": True, "export": False, "reset": True} +# reset_map = {"generate_csr": False, "import": True, "export": False, "reset": True} +csr_transform = {"common_name": "CommonName", + "organization_unit": "OrganizationalUnit", + "locality_name": 'City', + "state_name": 'State', + "country_code": "Country", + "email_address": 'Email', + "organization_name": "Organization", + "subject_alt_name": 'AlternativeNames'} +action_url_map = {"generate_csr": {}, + "import": {'Server': "#DelliDRACCardService.ImportSSLCertificate", + 'CA': "#DelliDRACCardService.ImportSSLCertificate", + 'CSC': "#DelliDRACCardService.ImportSSLCertificate", + 'ClientTrustCertificate': "#DelliDRACCardService.ImportSSLCertificate"}, + "export": {'Server': "#DelliDRACCardService.ExportSSLCertificate", + 'CA': "#DelliDRACCardService.ExportSSLCertificate", + 'CSC': "#DelliDRACCardService.ExportSSLCertificate", + 'ClientTrustCertificate': "#DelliDRACCardService.ExportSSLCertificate"}, + "reset": {'Server': "#DelliDRACCardService.SSLResetCfg"}} + +dflt_url_map = {"generate_csr": {'Server': CSR_SSL}, + "import": {'Server': IMPORT_SSL, + 'CA': IMPORT_SSL, + 'CSC': IMPORT_SSL, + 'ClientTrustCertificate': IMPORT_SSL}, + "export": {'Server': EXPORT_SSL, + 'CA': EXPORT_SSL, + 'CSC': EXPORT_SSL, + 'ClientTrustCertificate': EXPORT_SSL}, + "reset": {'Server': RESET_SSL}} +certype_map = {'HTTPS': "Server", 'CA': "CA", 'CSC': "CSC", + 'CLIENT_TRUST_CERTIFICATE': "ClientTrustCertificate"} + + +def get_ssl_payload(module, op, certype): + payload = {} + method = 'POST' + if op == 'import': + payload["CertificateType"] = certype + if module.params.get('passphrase'): + payload['Passphrase'] = module.params.get('passphrase') + fpath = module.params.get('certificate_path') + try: + if str(fpath).lower().endswith('.p12') or str(fpath).lower().endswith( + '.pfx'): # Linux generates .p12 Windows .pfx + with open(fpath, 'rb') as cert: + cert_content = cert.read() + cert_file = base64.encodebytes(cert_content).decode('ascii') + else: + with open(fpath, "r") as cert: + cert_file = cert.read() + except OSError as file_err: + module.exit_json(msg=str(file_err), failed=True) + payload['SSLCertificateFile'] = cert_file + elif op == 'export': + payload['SSLCertType'] = certype + elif op == 'generate_csr': + payload = {} + cert_params = module.params.get("cert_params") + for k, v in csr_transform.items(): + payload[v] = cert_params.get(k) + if rfish_cert_coll.get(certype): + payload["CertificateCollection"] = rfish_cert_coll.get(certype) + elif op == 'reset': + payload = "{}" + return payload, method + + +payload_map = {"Server": get_ssl_payload, + "CA": get_ssl_payload, + "CSC": get_ssl_payload, + "ClientTrustCertificate": get_ssl_payload} + + +def get_res_id(idrac, certype): + cert_map = {"Server": MANAGER_ID} + try: + resp = idrac.invoke_request("GET", cert_map.get(certype, MANAGERS_URI)) + membs = resp.json_data.get("Members") + res_uri = membs[0].get('@odata.id') # Getting the first item + res_id = res_uri.split("/")[-1] + except Exception: + res_id = cert_map.get(certype, MANAGER_ID) + return res_id + + +def get_idrac_service(idrac, res_id): + srvc = IDRAC_SERVICE.format(res_id=res_id) + try: + resp = idrac.invoke_request('GET', "{0}/{1}".format(MANAGERS_URI, res_id)) + srvc_data = resp.json_data + dell_srvc = srvc_data['Links']['Oem']['Dell']['DelliDRACCardService'] + srvc = dell_srvc.get("@odata.id", IDRAC_SERVICE.format(res_id=res_id)) + except Exception: + srvc = IDRAC_SERVICE.format(res_id=res_id) + return srvc + + +def get_actions_map(idrac, idrac_service_uri): + actions = idrac_service_actions + try: + resp = idrac.invoke_request(idrac_service_uri, 'GET') + srvc_data = resp.json_data + actions = dict((k, v.get('target')) for k, v in srvc_data.get('Actions').items()) + except Exception as exc: + actions = idrac_service_actions + return actions + + +def get_cert_url(actions, op, certype, res_id): + idrac_key = action_url_map.get(op).get(certype) + dynurl = actions.get(idrac_key) + if not dynurl: + dynurl = dflt_url_map.get(op).get(certype) + if dynurl: + dynurl = dynurl.format(res_id=res_id) + return dynurl + + +def certificate_action(module, idrac, actions, op, certype, res_id): + cert_url = get_cert_url(actions, op, certype, res_id) + if not cert_url: + module.exit_json(msg=NOT_SUPPORTED_ACTION.format(op=op, certype=module.params.get('certificate_type'))) + cert_payload, method = payload_map.get(certype)(module, op, certype) + exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id) + + +def write_to_file(module, cert_data, dkey): + f_ext = {'HTTPS': ".pem", 'CA': ".pem", 'CSC': ".crt", 'CLIENT_TRUST_CERTIFICATE': ".crt"} + path = module.params.get('certificate_path') + if not (os.path.exists(path) or os.path.isdir(path)): + module.exit_json(msg="Provided directory path '{0}' is not valid.".format(path), failed=True) + if not os.access(path, os.W_OK): + module.exit_json(msg="Provided directory path '{0}' is not writable. Please check if you " + "have appropriate permissions.".format(path), failed=True) + d = datetime.now() + if module.params.get('command') == 'generate_csr': + ext = '.txt' + else: + ext = f_ext.get(module.params.get('certificate_type')) + cert_file_name = "{0}_{1}{2}{3}_{4}{5}{6}_{7}{8}".format( + module.params["idrac_ip"], d.date().year, d.date().month, d.date().day, + d.time().hour, d.time().minute, d.time().second, module.params.get('certificate_type'), ext) + file_name = os.path.join(path, cert_file_name) + write_data = cert_data.pop(dkey, None) + with open(file_name, "w") as fp: + fp.writelines(write_data) + cert_data[out_file_path.get(dkey)] = file_name + + +def format_output(module, cert_data): + # cert_data = strip_substr_dict(cert_data, chkstr='@odata') + result = {} + cp = cert_data.copy() + klist = cp.keys() + for k in klist: + if "message" in k.lower(): + cert_data.pop(k, None) + if k in out_mapper: + cert_data[out_mapper.get(k)] = cert_data.pop(k, None) + if k in out_file_path: + write_to_file(module, cert_data, k) + if result: + cert_data.update({'result': result}) + cert_data.pop("CertificateCollection", None) + return cert_data + + +def get_export_data(idrac, certype, res_id): + try: + resp = idrac.invoke_request(EXPORT_SSL.format(res_id=res_id), "POST", data={"SSLCertType": certype}) + cert_data = resp.json_data + except Exception: + cert_data = {"CertificateFile": ""} + return cert_data.get("CertificateFile") + + +def exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id): + cmd = module.params.get('command') + changed = changed_map.get(cmd) + reset = changed_map.get(cmd) and module.params.get('reset') + result = {"changed": changed} + reset_msg = "" + if changed: + reset_msg = " Reset iDRAC to apply new certificate." \ + " Until iDRAC is reset, the old certificate will be active." + if module.params.get('command') == 'import': + export_cert = get_export_data(idrac, certype, res_id) + if cert_payload.get('SSLCertificateFile') in export_cert: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode and changed: + module.exit_json(msg=CHANGES_MSG, changed=changed) + if module.params.get('command') == 'reset' and certype == "Server": + resp = idrac.invoke_request(cert_url, method, data=cert_payload, dump=False) + else: + resp = idrac.invoke_request(cert_url, method, data=cert_payload) + cert_data = resp.json_data + cert_output = format_output(module, cert_data) + result.update(cert_output) + if reset: + reset, track_failed, reset_msg = reset_idrac(idrac, module.params.get('wait'), res_id) + result['msg'] = "{0}{1}".format(SUCCESS_MSG.format(command=cmd), reset_msg) + module.exit_json(**result) + + +def main(): + specs = { + "command": {"type": 'str', "default": 'generate_csr', + "choices": ['generate_csr', 'export', 'import', 'reset']}, + "certificate_type": {"type": 'str', "default": 'HTTPS', + "choices": ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE']}, + "certificate_path": {"type": 'path'}, + "passphrase": {"type": 'str', "no_log": True}, + "cert_params": {"type": 'dict', "options": { + "common_name": {"type": 'str', "required": True}, + "organization_unit": {"type": 'str', "required": True}, + "locality_name": {"type": 'str', "required": True}, + "state_name": {"type": 'str', "required": True}, + "country_code": {"type": 'str', "required": True}, + "email_address": {"type": 'str', "required": True}, + "organization_name": {"type": 'str', "required": True}, + "subject_alt_name": {"type": 'list', "elements": 'str', "default": []} + }}, + "resource_id": {"type": 'str'}, + "reset": {"type": 'bool', "default": True}, + "wait": {"type": 'int', "default": 300} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ['command', 'generate_csr', ('cert_params', 'certificate_path',)], + ['command', 'import', ('certificate_path',)], + ['command', 'export', ('certificate_path',)] + ], + supports_check_mode=True) + + try: + with iDRACRedfishAPI(module.params) as idrac: + certype = certype_map.get(module.params.get('certificate_type')) + op = module.params.get('command') + res_id = module.params.get('resource_id') + if not res_id: + res_id = get_res_id(idrac, certype) + idrac_service_uri = get_idrac_service(idrac, res_id) + actions_map = get_actions_map(idrac, idrac_service_uri) + certificate_action(module, idrac, actions_map, op, certype, res_id) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (ImportError, ValueError, RuntimeError, SSLValidationError, + ConnectionError, KeyError, TypeError, IndexError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py new file mode 100644 index 00000000..e4d96634 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py @@ -0,0 +1,651 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: idrac_firmware +short_description: Firmware update from a repository on a network share (CIFS, NFS, HTTP, HTTPS, FTP) +version_added: "2.1.0" +description: + - Update the Firmware by connecting to a network share (CIFS, NFS, HTTP, HTTPS, FTP) that contains a catalog of + available updates. + - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs. + - All applicable updates contained in the repository are applied to the system. + - This feature is available only with iDRAC Enterprise License. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + description: Network share path of update repository. CIFS, NFS, HTTP, HTTPS and FTP share types are supported. + type: str + required: True + share_user: + description: Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + type: str + share_password: + description: Network share user password. This option is mandatory for CIFS Network Share. + type: str + aliases: ['share_pwd'] + share_mnt: + description: + - Local mount path of the network share with read-write permission for ansible user. + - This option is not applicable for HTTP, HTTPS, and FTP shares. + type: str + job_wait: + description: Whether to wait for job completion or not. + type: bool + default: True + catalog_file_name: + description: Catalog file name relative to the I(share_name). + type: str + default: 'Catalog.xml' + ignore_cert_warning: + description: Specifies if certificate warnings are ignored when HTTPS share is used. + If C(True) option is set, then the certificate warnings are ignored. + type: bool + default: True + apply_update: + description: + - If I(apply_update) is set to C(True), then the packages are applied. + - If I(apply_update) is set to C(False), no updates are applied, and a catalog report + of packages is generated and returned. + type: bool + default: True + reboot: + description: + - Provides the option to apply the update packages immediately or in the next reboot. + - If I(reboot) is set to C(True), then the packages are applied immediately. + - If I(reboot) is set to C(False), then the packages are staged and applied in the next reboot. + - Packages that do not require a reboot are applied immediately irrespective of I (reboot). + type: bool + default: False + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Rajeev Arakkal (@rajeevarakkal)" + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - Module will report success based on the iDRAC firmware update parent job status if there are no individual + component jobs present. + - For server with iDRAC firmware 5.00.00.00 and later, if the repository contains unsupported packages, then the + module will return success with a proper message. + - This module supports C(check_mode). +''' + +EXAMPLES = """ +--- +- name: Update firmware from repository on a NFS Share + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.0:/share" + reboot: True + job_wait: True + apply_update: True + catalog_file_name: "Catalog.xml" + +- name: Update firmware from repository on a CIFS Share + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "full_cifs_path" + share_user: "share_user" + share_password: "share_password" + reboot: True + job_wait: True + apply_update: True + catalog_file_name: "Catalog.xml" + +- name: Update firmware from repository on a HTTP + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://downloads.dell.com" + reboot: True + job_wait: True + apply_update: True + +- name: Update firmware from repository on a HTTPS + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "https://downloads.dell.com" + reboot: True + job_wait: True + apply_update: True + +- name: Update firmware from repository on a FTP + dellemc.openmanage.idrac_firmware: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "ftp://ftp.dell.com" + reboot: True + job_wait: True + apply_update: True +""" + +RETURN = """ +--- +msg: + type: str + description: Overall firmware update status. + returned: always + sample: "Successfully updated the firmware." +update_status: + type: dict + description: Firmware Update job and progress details from the iDRAC. + returned: success + sample: { + 'InstanceID': 'JID_XXXXXXXXXXXX', + 'JobState': 'Completed', + 'Message': 'Job completed successfully.', + 'MessageId': 'REDXXX', + 'Name': 'Repository Update', + 'JobStartTime': 'NA', + 'Status': 'Success', + } +""" + + +import os +import json +import time +from ssl import SSLError +from xml.etree import ElementTree as ET +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +try: + from omsdk.sdkcreds import UserCredentials + from omsdk.sdkfile import FileOnShare + from omsdk.http.sdkwsmanbase import WsManProtocolBase + HAS_OMSDK = True +except ImportError: + HAS_OMSDK = False + +SHARE_TYPE = {'nfs': 'NFS', 'cifs': 'CIFS', 'ftp': 'FTP', + 'http': 'HTTP', 'https': 'HTTPS', 'tftp': 'TFTP'} +CERT_WARN = {True: 'On', False: 'Off'} +IDRAC_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService" +PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService/Actions/" \ + "DellSoftwareInstallationService.InstallFromRepository" +GET_REPO_BASED_UPDATE_LIST_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService/" \ + "Actions/DellSoftwareInstallationService.GetRepoBasedUpdateList" +JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}" +iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}" +MESSAGE = "Firmware versions on server match catalog, applicable updates are not present in the repository." +EXIT_MESSAGE = "The catalog in the repository specified in the operation has the same firmware versions " \ + "as currently present on the server." +IDEM_MSG_ID = "SUP029" +REDFISH_VERSION = "3.30" +INTERVAL = 30 # polling interval +WAIT_COUNT = 240 +JOB_WAIT_MSG = 'Job wait timed out after {0} minutes' + + +def wait_for_job_completion(module, job_uri, job_wait=False, reboot=False, apply_update=False): + track_counter = 0 + response = {} + msg = None + while track_counter < 5: + try: + # For job_wait False return a valid response, try 5 times + with iDRACRedfishAPI(module.params) as redfish: + response = redfish.invoke_request(job_uri, "GET") + track_counter += 5 + msg = None + except Exception as error_message: + msg = str(error_message) + track_counter += 1 + time.sleep(10) + if track_counter < 5: + msg = None + # reset track counter + track_counter = 0 + while job_wait and track_counter <= WAIT_COUNT: + try: + with iDRACRedfishAPI(module.params) as redfish: + response = redfish.invoke_request(job_uri, "GET") + job_state = response.json_data.get("JobState") + msg = None + except Exception as error_message: + msg = str(error_message) + track_counter += 2 + time.sleep(INTERVAL) + else: + if response.json_data.get("PercentComplete") == 100 and job_state == "Completed": # apply now + break + if job_state in ["Starting", "Running", "Pending", "New"] and not reboot and apply_update: # apply on + break + track_counter += 1 + time.sleep(INTERVAL) + if track_counter > WAIT_COUNT: + # TIMED OUT + msg = JOB_WAIT_MSG.format((WAIT_COUNT * INTERVAL) / 60) + return response, msg + + +def _validate_catalog_file(catalog_file_name): + normilized_file_name = catalog_file_name.lower() + if not normilized_file_name: + raise ValueError('catalog_file_name should be a non-empty string.') + elif not normilized_file_name.endswith("xml"): + raise ValueError('catalog_file_name should be an XML file.') + + +def get_check_mode_status(status, module): + if status['job_details']["Data"]["GetRepoBasedUpdateList_OUTPUT"].get("Message") == MESSAGE.rstrip(".") and \ + status.get('JobStatus') == "Completed": + if module.check_mode: + module.exit_json(msg="No changes found to commit!") + module.exit_json(msg=EXIT_MESSAGE) + + +def get_job_status(module, each_comp, idrac): + failed, each_comp['JobStatus'], each_comp['Message'] = False, None, None + job_wait = module.params['job_wait'] + reboot = module.params['reboot'] + apply_update = module.params['apply_update'] + if each_comp.get("JobID") is not None: + if idrac: + resp = idrac.job_mgr.job_wait(each_comp.get("JobID")) + while reboot and apply_update: + resp = idrac.job_mgr.job_wait(each_comp.get("JobID")) + if resp.get("JobStatus") is not None and (not resp.get('JobStatus') == "Scheduled"): + break + each_comp['Message'] = resp.get('Message') + each_comp['JobStatus'] = "OK" + fail_words_lower = ['fail', 'invalid', 'unable', 'not', 'cancel'] + if any(x in resp.get('Message').lower() for x in fail_words_lower): + each_comp['JobStatus'] = "Critical" + failed = True + else: + resp, msg = wait_for_job_completion(module, JOB_URI.format(job_id=each_comp.get("JobID")), job_wait, reboot, + apply_update) + if not msg: + resp_data = resp.json_data + if resp_data.get('Messages'): + each_comp['Message'] = resp_data.get('Messages')[0]['Message'] + each_comp['JobStatus'] = resp_data.get('JobStatus') + if each_comp['JobStatus'] == "Critical": + failed = True + else: + failed = True + return each_comp, failed + + +def _convert_xmltojson(module, job_details, idrac): + """get all the xml data from PackageList and returns as valid json.""" + data, repo_status, failed_status = [], False, False + try: + xmldata = ET.fromstring(job_details['PackageList']) + for iname in xmldata.iter('INSTANCENAME'): + comp_data = dict([(attr.attrib['NAME'], txt.text) for attr in iname.iter("PROPERTY") for txt in attr]) + component, failed = get_job_status(module, comp_data, idrac) + # get the any single component update failure and record the only very first failure on failed_status True + if not failed_status and failed: + failed_status = True + data.append(component) + repo_status = True + except ET.ParseError: + data = job_details['PackageList'] + return data, repo_status, failed_status + + +def get_jobid(module, resp): + """Get the Job ID from the response header.""" + jobid = None + if resp.status_code == 202: + joburi = resp.headers.get('Location') + if joburi is None: + module.fail_json(msg="Failed to update firmware.") + jobid = joburi.split("/")[-1] + else: + module.fail_json(msg="Failed to update firmware.") + return jobid + + +def handle_HTTP_error(module, httperr): + err_message = json.load(httperr) + err_list = err_message.get('error', {}).get('@Message.ExtendedInfo', [{"Message": EXIT_MESSAGE}]) + if err_list: + err_reason = err_list[0].get("Message", EXIT_MESSAGE) + if IDEM_MSG_ID in err_list[0].get('MessageId'): + module.exit_json(msg=err_reason) + if "error" in err_message: + module.fail_json(msg=err_message) + + +def update_firmware_url_redfish(module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls): + """Update firmware through HTTP/HTTPS/FTP and return the job details.""" + repo_url = urlparse(share_name) + job_details, status = {}, {} + ipaddr = repo_url.netloc + share_type = repo_url.scheme + sharename = repo_url.path.strip('/') + payload['IPAddress'] = ipaddr + if repo_url.path: + payload['ShareName'] = sharename + payload['ShareType'] = SHARE_TYPE[share_type] + install_url = PATH + get_repo_url = GET_REPO_BASED_UPDATE_LIST_PATH + actions = repo_urls.get('Actions') + if actions: + install_url = actions.get("#DellSoftwareInstallationService.InstallFromRepository", {}).get("target", PATH) + get_repo_url = actions.get("#DellSoftwareInstallationService.GetRepoBasedUpdateList", {}).\ + get("target", GET_REPO_BASED_UPDATE_LIST_PATH) + resp = idrac.invoke_request(install_url, method="POST", data=payload) + job_id = get_jobid(module, resp) + resp, msg = wait_for_job_completion(module, JOB_URI.format(job_id=job_id), job_wait, reboot, apply_update) + if not msg: + status = resp.json_data + else: + status['update_msg'] = msg + try: + resp_repo_based_update_list = idrac.invoke_request(get_repo_url, method="POST", data="{}", + dump=False) + job_details = resp_repo_based_update_list.json_data + except HTTPError as err: + handle_HTTP_error(module, err) + raise err + return status, job_details + + +def update_firmware_url_omsdk(module, idrac, share_name, catalog_file_name, apply_update, reboot, + ignore_cert_warning, job_wait, payload): + """Update firmware through HTTP/HTTPS/FTP and return the job details.""" + repo_url = urlparse(share_name) + job_details, status = {}, {} + ipaddr = repo_url.netloc + share_type = repo_url.scheme + sharename = repo_url.path.strip('/') + if ipaddr == "downloads.dell.com": + status = idrac.update_mgr.update_from_dell_repo_url(ipaddress=ipaddr, share_type=share_type, + share_name=sharename, catalog_file=catalog_file_name, + apply_update=apply_update, reboot_needed=reboot, + ignore_cert_warning=ignore_cert_warning, job_wait=job_wait) + get_check_mode_status(status, module) + else: + status = idrac.update_mgr.update_from_repo_url(ipaddress=ipaddr, share_type=share_type, + share_name=sharename, catalog_file=catalog_file_name, + apply_update=apply_update, reboot_needed=reboot, + ignore_cert_warning=ignore_cert_warning, job_wait=job_wait) + get_check_mode_status(status, module) + return status, job_details + + +def update_firmware_omsdk(idrac, module): + """Update firmware from a network share and return the job details.""" + msg = {} + msg['changed'], msg['failed'], msg['update_status'] = False, False, {} + msg['update_msg'] = "Successfully triggered the job to update the firmware." + try: + share_name = module.params['share_name'] + catalog_file_name = module.params['catalog_file_name'] + share_user = module.params['share_user'] + share_pwd = module.params['share_password'] + reboot = module.params['reboot'] + job_wait = module.params['job_wait'] + ignore_cert_warning = module.params['ignore_cert_warning'] + apply_update = module.params['apply_update'] + payload = {"RebootNeeded": reboot, "CatalogFile": catalog_file_name, "ApplyUpdate": str(apply_update), + "IgnoreCertWarning": CERT_WARN[ignore_cert_warning]} + if share_user is not None: + payload['UserName'] = share_user + if share_pwd is not None: + payload['Password'] = share_pwd + + if share_name.lower().startswith(('http://', 'https://', 'ftp://')): + msg['update_status'], job_details = update_firmware_url_omsdk(module, idrac, share_name, catalog_file_name, + apply_update, reboot, ignore_cert_warning, + job_wait, payload) + if job_details: + msg['update_status']['job_details'] = job_details + else: + upd_share = FileOnShare(remote="{0}{1}{2}".format(share_name, os.sep, catalog_file_name), + mount_point=module.params['share_mnt'], isFolder=False, + creds=UserCredentials(share_user, share_pwd)) + msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share, apply_update=apply_update, + reboot_needed=reboot, job_wait=job_wait) + get_check_mode_status(msg['update_status'], module) + + json_data, repo_status, failed = msg['update_status']['job_details'], False, False + if "PackageList" not in json_data: + job_data = json_data.get('Data') + pkglst = job_data['body'] if 'body' in job_data else job_data.get('GetRepoBasedUpdateList_OUTPUT') + if 'PackageList' in pkglst: # Returns from OMSDK + pkglst['PackageList'], repo_status, failed = _convert_xmltojson(module, pkglst, idrac) + else: # Redfish + json_data['PackageList'], repo_status, failed = _convert_xmltojson(module, json_data, None) + + if not apply_update and not failed: + msg['update_msg'] = "Successfully fetched the applicable firmware update package list." + elif apply_update and not reboot and not job_wait and not failed: + msg['update_msg'] = "Successfully triggered the job to stage the firmware." + elif apply_update and job_wait and not reboot and not failed: + msg['update_msg'] = "Successfully staged the applicable firmware update packages." + msg['changed'] = True + elif apply_update and job_wait and not reboot and failed: + msg['update_msg'] = "Successfully staged the applicable firmware update packages with error(s)." + msg['failed'] = True + + except RuntimeError as e: + module.fail_json(msg=str(e)) + + if module.check_mode and not (json_data.get('PackageList') or json_data.get('Data')) and \ + msg['update_status']['JobStatus'] == 'Completed': + module.exit_json(msg="No changes found to commit!") + elif module.check_mode and (json_data.get('PackageList') or json_data.get('Data')) and \ + msg['update_status']['JobStatus'] == 'Completed': + module.exit_json(msg="Changes found to commit!", changed=True, + update_status=msg['update_status']) + elif module.check_mode and not msg['update_status']['JobStatus'] == 'Completed': + msg['update_status'].pop('job_details') + module.fail_json(msg="Unable to complete the firmware repository download.", + update_status=msg['update_status']) + elif not module.check_mode and "Status" in msg['update_status']: + if msg['update_status']['Status'] in ["Success", "InProgress"]: + if module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and ( + 'job_details' in msg['update_status'] and repo_status) and not failed: + msg['changed'] = True + msg['update_msg'] = "Successfully updated the firmware." + elif module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and ( + 'job_details' in msg['update_status'] and repo_status) and failed: + msg['failed'], msg['changed'] = True, False + msg['update_msg'] = "Firmware update failed." + else: + failed_msg = "Firmware update failed." + if not apply_update: + failed_msg = "Unable to complete the repository update." + module.fail_json(msg=failed_msg, update_status=msg['update_status']) + return msg + + +def update_firmware_redfish(idrac, module, repo_urls): + """Update firmware from a network share and return the job details.""" + msg = {} + msg['changed'], msg['failed'] = False, False + msg['update_msg'] = "Successfully triggered the job to update the firmware." + try: + share_name = module.params['share_name'] + catalog_file_name = module.params['catalog_file_name'] + share_user = module.params['share_user'] + share_pwd = module.params['share_password'] + reboot = module.params['reboot'] + job_wait = module.params['job_wait'] + ignore_cert_warning = module.params['ignore_cert_warning'] + apply_update = module.params['apply_update'] + payload = {"RebootNeeded": reboot, "CatalogFile": catalog_file_name, "ApplyUpdate": str(apply_update), + "IgnoreCertWarning": CERT_WARN[ignore_cert_warning]} + if share_user is not None: + payload['UserName'] = share_user + if share_pwd is not None: + payload['Password'] = share_pwd + + if share_name.lower().startswith(('http://', 'https://', 'ftp://')): + msg['update_status'], job_details = update_firmware_url_redfish( + module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls) + if job_details: + msg['update_status']['job_details'] = job_details + else: + if share_name.startswith('\\\\'): + cifs = share_name.split('\\') + payload['IPAddress'] = cifs[2] + payload['ShareName'] = '\\'.join(cifs[3:]) + payload['ShareType'] = 'CIFS' + else: + nfs = urlparse(share_name) + payload['IPAddress'] = nfs.scheme + payload['ShareName'] = nfs.path.strip('/') + payload['ShareType'] = 'NFS' + resp = idrac.invoke_request(PATH, method="POST", data=payload) + job_id = get_jobid(module, resp) + resp, mesg = wait_for_job_completion(module, JOB_URI.format(job_id=job_id), job_wait, reboot, apply_update) + if not mesg: + msg['update_status'] = resp.json_data + else: + msg['update_status'] = mesg + try: + repo_based_update_list = idrac.invoke_request(GET_REPO_BASED_UPDATE_LIST_PATH, method="POST", + data="{}", dump=False) + msg['update_status']['job_details'] = repo_based_update_list.json_data + except HTTPError as err: + handle_HTTP_error(module, err) + raise err + json_data, repo_status, failed = msg['update_status']['job_details'], False, False + if "PackageList" not in json_data: + job_data = json_data.get('Data') + pkglst = job_data['body'] if 'body' in job_data else job_data.get('GetRepoBasedUpdateList_OUTPUT') + if 'PackageList' in pkglst: + pkglst['PackageList'], repo_status, failed = _convert_xmltojson(module, pkglst, idrac) + else: + json_data['PackageList'], repo_status, failed = _convert_xmltojson(module, json_data, None) + + if not apply_update and not failed: + msg['update_msg'] = "Successfully fetched the applicable firmware update package list." + elif apply_update and not reboot and not job_wait and not failed: + msg['update_msg'] = "Successfully triggered the job to stage the firmware." + elif apply_update and job_wait and not reboot and not failed: + msg['update_msg'] = "Successfully staged the applicable firmware update packages." + msg['changed'] = True + elif apply_update and job_wait and not reboot and failed: + msg['update_msg'] = "Successfully staged the applicable firmware update packages with error(s)." + msg['failed'] = True + + except RuntimeError as e: + module.fail_json(msg=str(e)) + + if module.check_mode and not (json_data.get('PackageList') or json_data.get('Data')) and \ + msg['update_status']['JobStatus'] == 'OK': + module.exit_json(msg="No changes found to commit!") + elif module.check_mode and (json_data.get('PackageList') or json_data.get('Data')) and \ + msg['update_status']['JobStatus'] == 'OK': + module.exit_json(msg="Changes found to commit!", changed=True, + update_status=msg['update_status']) + elif module.check_mode and not msg['update_status']['JobStatus'] == 'OK': + msg['update_status'].pop('job_details') + module.fail_json(msg="Unable to complete the firmware repository download.", + update_status=msg['update_status']) + elif not module.check_mode and "JobStatus" in msg['update_status']: + if not msg['update_status']['JobStatus'] == "Critical": + if module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and \ + ('job_details' in msg['update_status'] and repo_status) and not failed: + msg['changed'] = True + msg['update_msg'] = "Successfully updated the firmware." + elif module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and \ + ('job_details' in msg['update_status'] and repo_status) and failed: + msg['failed'], msg['changed'] = True, False + msg['update_msg'] = "Firmware update failed." + else: + failed_msg = "Firmware update failed." + if not apply_update: + failed_msg = "Unable to complete the repository update." + module.fail_json(msg=failed_msg, update_status=msg['update_status']) + return msg + + +def main(): + specs = { + "share_name": {"required": True, "type": 'str'}, + "share_user": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "share_mnt": {"required": False, "type": 'str'}, + + "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"}, + "reboot": {"required": False, "type": 'bool', "default": False}, + "job_wait": {"required": False, "type": 'bool', "default": True}, + "ignore_cert_warning": {"required": False, "type": 'bool', "default": True}, + "apply_update": {"required": False, "type": 'bool', "default": True}, + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + redfish_check = False + try: + with iDRACRedfishAPI(module.params) as obj: + resp = obj.invoke_request(IDRAC_PATH, method="GET") + software_service_data = resp.json_data + redfish_check = True + except Exception: + software_service_data = {} + redfish_check = False + + try: + # Validate the catalog file + _validate_catalog_file(module.params['catalog_file_name']) + if module.check_mode: + module.params['apply_update'] = False + module.params['reboot'] = False + module.params['job_wait'] = True + # Connect to iDRAC and update firmware + if redfish_check: + with iDRACRedfishAPI(module.params) as redfish_obj: + status = update_firmware_redfish(redfish_obj, module, software_service_data) + else: + with iDRACConnection(module.params) as idrac: + status = update_firmware_omsdk(idrac, module) + except HTTPError as err: + module.fail_json(msg=str(err), update_status=json.load(err)) + except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, SSLError) as e: + module.fail_json(msg=str(e)) + except Exception as exc: + module.fail_json(msg="Unhandled Exception {0}".format(exc)) + + module.exit_json(msg=status['update_msg'], update_status=status['update_status'], + changed=status['changed'], failed=status['failed']) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py new file mode 100644 index 00000000..3f644f85 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_firmware_info +short_description: Get Firmware Inventory +version_added: "3.0.0" +description: Get Firmware Inventory. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Rajeev Arakkal (@rajeevarakkal)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Get Installed Firmware Inventory + dellemc.openmanage.idrac_firmware_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" +""" + +RETURN = r''' +--- +msg: + description: "Fetching the firmware inventory details." + returned: always + type: str + sample: "Successfully fetched the firmware inventory details." +firmware_info: + type: dict + description: Details of the firmware. + returned: success + sample: { + "Firmware": [{ + "BuildNumber": "0", + "Classifications": "10", + "ComponentID": "102573", + "ComponentType": "FRMW", + "DeviceID": null, + "ElementName": "Power Supply.Slot.1", + "FQDD": "PSU.Slot.1", + "HashValue": null, + "IdentityInfoType": "OrgID:ComponentType:ComponentID", + "IdentityInfoValue": "DCIM:firmware:102573", + "InstallationDate": "2018-11-22T03:58:23Z", + "InstanceID": "DCIM:INSTALLED#0x15__PSU.Slot.1", + "IsEntity": "true", + "Key": "DCIM:INSTALLED#0x15__PSU.Slot.1", + "MajorVersion": "0", + "MinorVersion": "3", + "RevisionNumber": "67", + "RevisionString": null, + "Status": "Installed", + "SubDeviceID": null, + "SubVendorID": null, + "Updateable": "true", + "VendorID": null, + "VersionString": "00.3D.67", + "impactsTPMmeasurements": "false" + }] + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +try: + from omsdk.sdkfile import LocalFile + from omsdk.catalog.sdkupdatemgr import UpdateManager + from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper + HAS_OMSDK = True +except ImportError: + HAS_OMSDK = False + + +# Main +def main(): + specs = {} + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + msg = idrac.update_mgr.InstalledFirmware + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e: + module.fail_json(msg=str(e)) + + module.exit_json(msg="Successfully fetched the firmware inventory details.", + firmware_info=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py new file mode 100644 index 00000000..2d555f9a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_lifecycle_controller_job_status_info +short_description: Get the status of a Lifecycle Controller job +version_added: "2.1.0" +description: This module shows the status of a specific Lifecycle Controller job using its job ID. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + job_id: + required: True + type: str + description: JOB ID in the format "JID_123456789012". +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Rajeev Arakkal (@rajeevarakkal)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Show status of a Lifecycle Control job + dellemc.openmanage.idrac_lifecycle_controller_job_status_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + job_id: "JID_1234567890" +""" + +RETURN = r''' +--- +msg: + description: Overall status of the job facts operation. + returned: always + type: str + sample: "Successfully fetched the job info." +job_info: + description: Displays the status of a Lifecycle Controller job. + returned: success + type: dict + sample: { + "ElapsedTimeSinceCompletion": "8742", + "InstanceID": "JID_844222910040", + "JobStartTime": "NA", + "JobStatus": "Completed", + "JobUntilTime": "NA", + "Message": "Job completed successfully.", + "MessageArguments": "NA", + "MessageID": "RED001", + "Name": "update:DCIM:INSTALLED#iDRAC.Embedded.1-1#IDRACinfo", + "PercentComplete": "100", + "Status": "Success" + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + specs = { + "job_id": {"required": True, "type": 'str'} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + job_id, msg, failed = module.params.get('job_id'), {}, False + msg = idrac.job_mgr.get_job_status(job_id) + if msg.get('Status') == "Found Fault": + module.fail_json(msg="Job ID is invalid.") + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg="Successfully fetched the job info", job_info=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py new file mode 100644 index 00000000..984f8e3f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_lifecycle_controller_jobs +short_description: Delete the Lifecycle Controller Jobs +version_added: "2.1.0" +description: + - Delete a Lifecycle Controller job using its job ID or delete all jobs. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + job_id: + type: str + description: + - Job ID of the specific job to be deleted. + - All the jobs in the job queue are deleted if this option is not specified. + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module does not support C(check_mode). +""" +EXAMPLES = """ +--- +- name: Delete Lifecycle Controller job queue + dellemc.openmanage.idrac_lifecycle_controller_jobs: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + +- name: Delete Lifecycle Controller job using a job ID + dellemc.openmanage.idrac_lifecycle_controller_jobs: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + job_id: "JID_801841929470" +""" +RETURN = """ +--- +msg: + type: str + description: Status of the delete operation. + returned: always + sample: 'Successfully deleted the job.' +status: + type: dict + description: Details of the delete operation. + returned: success + sample: { + 'Message': 'The specified job was deleted', + 'MessageID': 'SUP020', + 'ReturnValue': '0' + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + + +def main(): + specs = { + "job_id": {"required": False, "type": 'str'} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=False) + try: + with iDRACConnection(module.params) as idrac: + job_id, resp = module.params.get('job_id'), {} + if job_id is not None: + resp = idrac.job_mgr.delete_job(job_id) + jobstr = "job" + else: + resp = idrac.job_mgr.delete_all_jobs() + jobstr = "job queue" + if resp["Status"] == "Error": + msg = "Failed to delete the Job: {0}.".format(job_id) + module.fail_json(msg=msg, status=resp) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (ImportError, ValueError, RuntimeError, TypeError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg="Successfully deleted the {0}.".format(jobstr), status=resp, changed=True) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py new file mode 100644 index 00000000..74606260 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_lifecycle_controller_logs +short_description: Export Lifecycle Controller logs to a network share or local path. +version_added: "2.1.0" +description: + - Export Lifecycle Controller logs to a given network share or local path. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + description: + - Network share or local path. + - CIFS, NFS network share types are supported. + type: str + required: True + share_user: + type: str + description: Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + share_password: + type: str + description: Network share user password. This option is mandatory for CIFS Network Share. + aliases: ['share_pwd'] + job_wait: + description: Whether to wait for the running job completion or not. + type: bool + default: True + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Rajeev Arakkal (@rajeevarakkal)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Exporting data to a local share is supported only on iDRAC9-based PowerEdge Servers and later. + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module does not support C(check_mode). +""" + +EXAMPLES = r''' +--- +- name: Export lifecycle controller logs to NFS share. + dellemc.openmanage.idrac_lifecycle_controller_logs: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.0:/nfsfileshare" + +- name: Export lifecycle controller logs to CIFS share. + dellemc.openmanage.idrac_lifecycle_controller_logs: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: "share_user_name" + share_password: "share_user_pwd" + +- name: Export lifecycle controller logs to LOCAL path. + dellemc.openmanage.idrac_lifecycle_controller_logs: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "/example/export_lc" +''' + +RETURN = """ +--- +msg: + type: str + description: Status of the export lifecycle controller logs job. + returned: always + sample: "Successfully exported the lifecycle controller logs." +lc_logs_status: + description: Status of the export operation along with job details and file path. + returned: success + type: dict + sample: { + "ElapsedTimeSinceCompletion": "0", + "InstanceID": "JID_274774785395", + "JobStartTime": "NA", + "JobStatus": "Completed", + "JobUntilTime": "NA", + "Message": "LCL Export was successful", + "MessageArguments": "NA", + "MessageID": "LC022", + "Name": "LC Export", + "PercentComplete": "100", + "Status": "Success", + "file": "192.168.0.0:/nfsfileshare/190.168.0.1_20210728_133437_LC_Log.log", + "retval": true + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +import json +try: + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +def get_user_credentials(module): + share_username = module.params['share_user'] + share_password = module.params['share_password'] + work_group = None + if share_username is not None and "@" in share_username: + username_domain = share_username.split("@") + share_username = username_domain[0] + work_group = username_domain[1] + elif share_username is not None and "\\" in share_username: + username_domain = share_username.split("\\") + work_group = username_domain[0] + share_username = username_domain[1] + share = file_share_manager.create_share_obj(share_path=module.params['share_name'], + creds=UserCredentials(share_username, share_password, + work_group=work_group), isFolder=True) + return share + + +def run_export_lc_logs(idrac, module): + """ + Export Lifecycle Controller Log to the given file share + + Keyword arguments: + idrac -- iDRAC handle + module -- Ansible module + """ + + lclog_file_name_format = "%ip_%Y%m%d_%H%M%S_LC_Log.log" + share_username = module.params.get('share_user') + if (share_username is not None) and ("@" in share_username or "\\" in share_username): + myshare = get_user_credentials(module) + else: + myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'], + creds=UserCredentials(module.params['share_user'], + module.params['share_password']), + isFolder=True) + lc_log_file = myshare.new_file(lclog_file_name_format) + job_wait = module.params['job_wait'] + msg = idrac.log_mgr.lclog_export(lc_log_file, job_wait) + return msg + + +# Main() +def main(): + specs = { + "share_name": {"required": True, "type": 'str'}, + "share_user": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "job_wait": {"required": False, "type": 'bool', "default": True}, + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=False) + + try: + with iDRACConnection(module.params) as idrac: + msg = run_export_lc_logs(idrac, module) + if msg.get("Status") in ["Failed", "Failure"] or msg.get("JobStatus") in ["Failed", "Failure"]: + msg.pop("file", None) + module.fail_json(msg="Unable to export the lifecycle controller logs.", lc_logs_status=msg) + message = "Successfully exported the lifecycle controller logs." + if module.params['job_wait'] is False: + message = "The export lifecycle controller log job is submitted successfully." + module.exit_json(msg=message, lc_logs_status=msg) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py new file mode 100644 index 00000000..3d3bddc0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_lifecycle_controller_status_info +short_description: Get the status of the Lifecycle Controller +version_added: "2.1.0" +description: + - This module shows the status of the Lifecycle Controller on a Dell EMC PowerEdge server. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Rajeev Arakkal (@rajeevarakkal)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Show status of the Lifecycle Controller + dellemc.openmanage.idrac_lifecycle_controller_status_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" +""" + +RETURN = r''' +--- +msg: + description: Overall status of fetching lifecycle controller status. + returned: always + type: str + sample: "Successfully fetched the lifecycle controller status." +lc_status_info: + description: Displays the status of the Lifecycle Controller on a Dell EMC PowerEdge server. + returned: success + type: dict + sample: { + "msg": { + "LCReady": true, + "LCStatus": "Ready" + } + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.basic import AnsibleModule +import json + + +def main(): + specs = {} + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + lcready = idrac.config_mgr.LCReady + lcstatus = idrac.config_mgr.LCStatus + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg="Successfully fetched the lifecycle controller status.", + lc_status_info={'LCReady': lcready, 'LCStatus': lcstatus}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py new file mode 100644 index 00000000..8f293016 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py @@ -0,0 +1,444 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_network +short_description: Configures the iDRAC network attributes +version_added: "2.1.0" +deprecated: + removed_at_date: "2024-07-31" + why: Replaced with M(dellemc.openmanage.idrac_attributes). + alternative: Use M(dellemc.openmanage.idrac_attributes) instead. + removed_from_collection: dellemc.openmanage +description: + - This module allows to configure iDRAC network settings. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + type: str + description: + - (deprecated)Network share or a local path. + - This option is deprecated and will be removed in the later version. + share_user: + type: str + description: + - (deprecated)Network share user name. Use the format 'user@domain' or 'domain\\user' if user is part of a domain. + This option is mandatory for CIFS share. + - This option is deprecated and will be removed in the later version. + share_password: + type: str + description: + - (deprecated)Network share user password. This option is mandatory for CIFS share. + - This option is deprecated and will be removed in the later version. + aliases: ['share_pwd'] + share_mnt: + type: str + description: + - (deprecated)Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for network shares. + - This option is deprecated and will be removed in the later version. + setup_idrac_nic_vlan: + type: str + description: Allows to configure VLAN on iDRAC. + choices: [Enabled, Disabled] + register_idrac_on_dns: + type: str + description: Registers iDRAC on a Domain Name System (DNS). + choices: [Enabled, Disabled] + dns_idrac_name: + type: str + description: Name of the DNS to register iDRAC. + auto_config: + type: str + description: Allows to enable or disable auto-provisioning to automatically acquire domain name from DHCP. + choices: [Enabled, Disabled] + static_dns: + type: str + description: Enter the static DNS domain name. + vlan_id: + type: int + description: Enter the VLAN ID. The VLAN ID must be a number from 1 through 4094. + vlan_priority: + type: int + description: Enter the priority for the VLAN ID. The priority value must be a number from 0 through 7. + enable_nic: + type: str + description: Allows to enable or disable the Network Interface Controller (NIC) used by iDRAC. + choices: [Enabled, Disabled] + nic_selection: + type: str + description: Select one of the available NICs. + choices: [Dedicated, LOM1, LOM2, LOM3, LOM4] + failover_network: + type: str + description: "Select one of the remaining LOMs. If a network fails, the traffic is routed through the failover + network." + choices: [ALL, LOM1, LOM2, LOM3, LOM4, T_None] + auto_detect: + type: str + description: Allows to auto detect the available NIC types used by iDRAC. + choices: [Enabled, Disabled] + auto_negotiation: + type: str + description: Allows iDRAC to automatically set the duplex mode and network speed. + choices: [Enabled, Disabled] + network_speed: + type: str + description: Select the network speed for the selected NIC. + choices: [T_10, T_100, T_1000] + duplex_mode: + type: str + description: Select the type of data transmission for the NIC. + choices: [Full, Half] + nic_mtu: + type: int + description: Maximum Transmission Unit of the NIC. + ip_address: + type: str + description: Enter a valid iDRAC static IPv4 address. + enable_dhcp: + type: str + description: Allows to enable or disable Dynamic Host Configuration Protocol (DHCP) in iDRAC. + choices: [Enabled, Disabled] + enable_ipv4: + type: str + description: Allows to enable or disable IPv4 configuration. + choices: [Enabled, Disabled] + dns_from_dhcp: + type: str + description: Allows to enable DHCP to obtain DNS server address. + choices: [Enabled, Disabled] + static_dns_1: + type: str + description: Enter the preferred static DNS server IPv4 address. + static_dns_2: + type: str + description: Enter the preferred static DNS server IPv4 address. + static_gateway: + type: str + description: Enter the static IPv4 gateway address to iDRAC. + static_net_mask: + type: str + description: Enter the static IP subnet mask to iDRAC. +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Configure iDRAC network settings + dellemc.openmanage.idrac_network: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + register_idrac_on_dns: Enabled + dns_idrac_name: None + auto_config: None + static_dns: None + setup_idrac_nic_vlan: Enabled + vlan_id: 0 + vlan_priority: 1 + enable_nic: Enabled + nic_selection: Dedicated + failover_network: T_None + auto_detect: Disabled + auto_negotiation: Enabled + network_speed: T_1000 + duplex_mode: Full + nic_mtu: 1500 + ip_address: "192.168.0.1" + enable_dhcp: Enabled + enable_ipv4: Enabled + static_dns_1: "192.168.0.1" + static_dns_2: "192.168.0.1" + dns_from_dhcp: Enabled + static_gateway: None + static_net_mask: None +""" + +RETURN = r''' +--- +msg: + description: Successfully configured the idrac network settings. + returned: always + type: str + sample: "Successfully configured the idrac network settings." +network_status: + description: Status of the Network settings operation job. + returned: success + type: dict + sample: { + "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob", + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_856418531008", + "@odata.type": "#DellJob.v1_0_2.DellJob", + "CompletionTime": "2020-03-31T03:04:15", + "Description": "Job Instance", + "EndTime": null, + "Id": "JID_856418531008", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "SYS053", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true +} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import os +import tempfile +import json +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +try: + from omdrivers.enums.iDRAC.iDRAC import (DNSRegister_NICTypes, DNSDomainFromDHCP_NICStaticTypes, + Enable_NICTypes, VLanEnable_NICTypes, + Selection_NICTypes, Failover_NICTypes, + AutoDetect_NICTypes, Autoneg_NICTypes, + Speed_NICTypes, Duplex_NICTypes, DHCPEnable_IPv4Types, + DNSFromDHCP_IPv4Types, Enable_IPv4Types, + DNSFromDHCP_IPv4StaticTypes) + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +def run_idrac_network_config(idrac, module): + idrac.use_redfish = True + share_path = tempfile.gettempdir() + os.sep + upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True) + if not upd_share.IsValid: + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + idrac.config_mgr.set_liason_share(upd_share) + if module.params['register_idrac_on_dns'] is not None: + idrac.config_mgr.configure_dns( + register_idrac_on_dns=DNSRegister_NICTypes[module.params['register_idrac_on_dns']] + ) + if module.params['dns_idrac_name'] is not None: + idrac.config_mgr.configure_dns( + dns_idrac_name=module.params['dns_idrac_name'] + ) + if module.params['auto_config'] is not None: + idrac.config_mgr.configure_dns( + auto_config=DNSDomainFromDHCP_NICStaticTypes[module.params['auto_config']] + ) + if module.params['static_dns'] is not None: + idrac.config_mgr.configure_dns( + static_dns=module.params['static_dns'] + ) + + if module.params['setup_idrac_nic_vlan'] is not None: + idrac.config_mgr.configure_nic_vlan( + vlan_enable=VLanEnable_NICTypes[module.params['setup_idrac_nic_vlan']] + ) + if module.params['vlan_id'] is not None: + idrac.config_mgr.configure_nic_vlan( + vlan_id=module.params['vlan_id'] + ) + if module.params['vlan_priority'] is not None: + idrac.config_mgr.configure_nic_vlan( + vlan_priority=module.params['vlan_priority'] + ) + + if module.params['enable_nic'] is not None: + idrac.config_mgr.configure_network_settings( + enable_nic=Enable_NICTypes[module.params['enable_nic']] + ) + if module.params['nic_selection'] is not None: + idrac.config_mgr.configure_network_settings( + nic_selection=Selection_NICTypes[module.params['nic_selection']] + ) + if module.params['failover_network'] is not None: + idrac.config_mgr.configure_network_settings( + failover_network=Failover_NICTypes[module.params['failover_network']] + ) + if module.params['auto_detect'] is not None: + idrac.config_mgr.configure_network_settings( + auto_detect=AutoDetect_NICTypes[module.params['auto_detect']] + ) + if module.params['auto_negotiation'] is not None: + idrac.config_mgr.configure_network_settings( + auto_negotiation=Autoneg_NICTypes[module.params['auto_negotiation']] + ) + if module.params['network_speed'] is not None: + idrac.config_mgr.configure_network_settings( + network_speed=Speed_NICTypes[module.params['network_speed']] + ) + if module.params['duplex_mode'] is not None: + idrac.config_mgr.configure_network_settings( + duplex_mode=Duplex_NICTypes[module.params['duplex_mode']] + ) + if module.params['nic_mtu'] is not None: + idrac.config_mgr.configure_network_settings( + nic_mtu=module.params['nic_mtu'] + ) + + if module.params['enable_dhcp'] is not None: + idrac.config_mgr.configure_ipv4( + enable_dhcp=DHCPEnable_IPv4Types[module.params["enable_dhcp"]] + ) + if module.params['ip_address'] is not None: + idrac.config_mgr.configure_ipv4( + ip_address=module.params["ip_address"] + ) + if module.params['enable_ipv4'] is not None: + idrac.config_mgr.configure_ipv4( + enable_ipv4=Enable_IPv4Types[module.params["enable_ipv4"]] + ) + if module.params['dns_from_dhcp'] is not None: + idrac.config_mgr.configure_static_ipv4( + dns_from_dhcp=DNSFromDHCP_IPv4StaticTypes[module.params["dns_from_dhcp"]] + ) + if module.params['static_dns_1'] is not None: + idrac.config_mgr.configure_static_ipv4( + dns_1=module.params["static_dns_1"] + ) + if module.params['static_dns_2'] is not None: + idrac.config_mgr.configure_static_ipv4( + dns_2=module.params["static_dns_2"] + ) + if module.params['static_gateway'] is not None: + idrac.config_mgr.configure_static_ipv4( + gateway=module.params["static_gateway"] + ) + if module.params['static_net_mask'] is not None: + idrac.config_mgr.configure_static_ipv4( + net_mask=module.params["static_net_mask"] + ) + + if module.check_mode: + msg = idrac.config_mgr.is_change_applicable() + else: + msg = idrac.config_mgr.apply_changes(reboot=False) + return msg + + +# Main +def main(): + specs = { + # Export Destination + "share_name": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "share_user": {"required": False, "type": 'str'}, + "share_mnt": {"required": False, "type": 'str'}, + + # setup DNS + "register_idrac_on_dns": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None}, + "dns_idrac_name": {"required": False, "default": None, "type": 'str'}, + "auto_config": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None, 'type': 'str'}, + "static_dns": {"required": False, "default": None, "type": "str"}, + + # set up idrac vlan + "setup_idrac_nic_vlan": {"required": False, "choices": ['Enabled', 'Disabled']}, + "vlan_id": {"required": False, "type": 'int'}, + "vlan_priority": {"required": False, "type": 'int'}, + + # set up NIC + "enable_nic": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None}, + "nic_selection": {"required": False, "choices": ['Dedicated', 'LOM1', 'LOM2', 'LOM3', 'LOM4'], "default": None}, + "failover_network": {"required": False, "choices": ['ALL', 'LOM1', 'LOM2', 'LOM3', 'LOM4', 'T_None'], + "default": None}, + "auto_detect": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None}, + "auto_negotiation": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None}, + "network_speed": {"required": False, "choices": ['T_10', 'T_100', 'T_1000'], "default": None}, + "duplex_mode": {"required": False, "choices": ['Full', 'Half'], "default": None}, + "nic_mtu": {"required": False, 'type': 'int'}, + + # setup iDRAC IPV4 + "ip_address": {"required": False, "default": None, "type": "str"}, + "enable_dhcp": {"required": False, "choices": ["Enabled", "Disabled"], "default": None}, + "enable_ipv4": {"required": False, "choices": ["Enabled", "Disabled"], "default": None}, + + # setup iDRAC Static IPv4 + "dns_from_dhcp": {"required": False, "choices": ["Enabled", "Disabled"], "default": None}, + "static_dns_1": {"required": False, "default": None, "type": "str"}, + "static_dns_2": {"required": False, "default": None, "type": "str"}, + "static_gateway": {"required": False, "type": "str"}, + "static_net_mask": {"required": False, "type": "str"}, + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + msg = run_idrac_network_config(idrac, module) + changed, failed = False, False + if msg.get('Status') == "Success": + changed = True + if msg.get('Message') == "No changes found to commit!": + changed = False + if "No changes were applied" in msg.get('Message'): + changed = False + elif msg.get('Status') == "Failed": + failed = True + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except AttributeError as err: + if "NoneType" in str(err): + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg="Successfully configured the idrac network settings.", + network_status=msg, changed=changed, failed=failed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py new file mode 100644 index 00000000..797534e3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: idrac_os_deployment +short_description: Boot to a network ISO image +version_added: "2.1.0" +description: Boot to a network ISO image. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + share_name: + required: True + description: CIFS or NFS Network share. + type: str + share_user: + description: Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + type: str + share_password: + description: Network share user password. This option is mandatory for CIFS Network Share. + type: str + aliases: ['share_pwd'] + iso_image: + required: True + description: Network ISO name. + type: str + expose_duration: + description: It is the time taken in minutes for the ISO image file to be exposed as a local CD-ROM device to + the host server. When the time expires, the ISO image gets automatically detached. + type: int + default: 1080 +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Jagadeesh N V (@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module does not support C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Boot to Network ISO + dellemc.openmanage.idrac_os_deployment: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.0:/nfsfileshare" + iso_image: "unattended_os_image.iso" + expose_duration: 180 +''' + +RETURN = r''' +--- +msg: + type: str + description: Over all device information status. + returned: on error + sample: "Failed to boot to network iso" +boot_status: + description: Details of the boot to network ISO image operation. + returned: always + type: dict + sample: { + "DeleteOnCompletion": "false", + "InstanceID": "DCIM_OSDConcreteJob:1", + "JobName": "BootToNetworkISO", + "JobStatus": "Success", + "Message": "The command was successful.", + "MessageID": "OSD1", + "Name": "BootToNetworkISO", + "Status": "Success", + "file": "192.168.0.0:/nfsfileshare/unattended_os_image.iso", + "retval": true + } +''' + + +import os +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +try: + from omsdk.sdkfile import FileOnShare + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +def minutes_to_cim_format(module, dur_minutes): + try: + if dur_minutes < 0: + module.fail_json(msg="Invalid value for ExposeDuration.") + MIN_PER_HOUR = 60 + MIN_PER_DAY = 1440 + days = dur_minutes // MIN_PER_DAY + minutes = dur_minutes % MIN_PER_DAY + hours = minutes // MIN_PER_HOUR + minutes = minutes % MIN_PER_HOUR + if days > 0: + hours = 23 + cim_format = "{:08d}{:02d}{:02d}00.000000:000" + cim_time = cim_format.format(days, hours, minutes) + except Exception: + module.fail_json(msg="Invalid value for ExposeDuration.") + return cim_time + + +def run_boot_to_network_iso(idrac, module): + """Boot to a network ISO image""" + try: + share_name = module.params['share_name'] + if share_name is None: + share_name = '' + share_obj = FileOnShare(remote="{0}{1}{2}".format(share_name, os.sep, module.params['iso_image']), + isFolder=False, creds=UserCredentials(module.params['share_user'], + module.params['share_password']) + ) + cim_exp_duration = minutes_to_cim_format(module, module.params['expose_duration']) + boot_status = idrac.config_mgr.boot_to_network_iso(share_obj, "", expose_duration=cim_exp_duration) + if not boot_status.get("Status", False) == "Success": + module.fail_json(msg=boot_status) + except Exception as e: + module.fail_json(msg=str(e)) + return boot_status + + +def main(): + specs = { + "share_name": {"required": True, "type": 'str'}, + "share_user": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "iso_image": {"required": True, "type": 'str'}, + "expose_duration": {"required": False, "type": 'int', "default": 1080} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=False) + + try: + with iDRACConnection(module.params) as idrac: + boot_status = run_boot_to_network_iso(idrac, module) + module.exit_json(changed=True, boot_status=boot_status) + except (ImportError, ValueError, RuntimeError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py new file mode 100644 index 00000000..a506e5ce --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py @@ -0,0 +1,773 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.3.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: idrac_redfish_storage_controller +short_description: Configures the physical disk, virtual disk, and storage controller settings +version_added: "2.1.0" +description: + - This module allows the users to configure the settings of the physical disk, virtual disk, + and storage controller. +extends_documentation_fragment: + - dellemc.openmanage.redfish_auth_options +options: + command: + description: + - These actions may require a system reset, depending on the capabilities of the controller. + - C(ResetConfig) - Deletes all the virtual disks and unassigns all hot spares on physical disks. + I(controller_id) is required for this operation. + - C(AssignSpare) - Assigns a physical disk as a dedicated or global hot spare for a virtual disk. + I(target) is required for this operation. + - C(SetControllerKey) - Sets the key on controllers, which is used to encrypt the drives in Local + Key Management(LKM). I(controller_id), I(key), and I(key_id) are required for this operation. + - C(RemoveControllerKey) - Deletes the encryption key on the controller. + I(controller_id) is required for this operation. + - C(ReKey) - Resets the key on the controller and it always reports as changes found when check mode is enabled. + I(controller_id), I(old_key), I(key_id), and I(key) is required for this operation. + - C(UnassignSpare) - To unassign the Global or Dedicated hot spare. I(target) is required for this operation. + - C(EnableControllerEncryption) - To enable Local Key Management (LKM) or Secure Enterprise Key Manager (SEKM) + on controllers that support encryption of the drives. I(controller_id), I(key), and I(key_id) are required + for this operation. + - C(BlinkTarget) - Blinks the target virtual drive or physical disk and it always reports as changes found + when check mode is enabled. I(target) or I(volume_id) is required for this operation. + - C(UnBlinkTarget) - Unblink the target virtual drive or physical disk and and it always reports as changes + found when check mode is enabled. I(target) or I(volume_id) is required for this operation. + - C(ConvertToRAID) - Converts the disk form non-Raid to Raid. I(target) is required for this operation. + - C(ConvertToNonRAID) - Converts the disk form Raid to non-Raid. I(target) is required for this operation. + - C(ChangePDStateToOnline) - To set the disk status to online. I(target) is required for this operation. + - C(ChangePDStateToOffline) - To set the disk status to offline. I(target) is required for this operation. + - C(LockVirtualDisk) - To encrypt the virtual disk. I(volume_id) is required for this operation. + choices: [ResetConfig, AssignSpare, SetControllerKey, RemoveControllerKey, ReKey, UnassignSpare, + EnableControllerEncryption, BlinkTarget, UnBlinkTarget, ConvertToRAID, ConvertToNonRAID, + ChangePDStateToOnline, ChangePDStateToOffline, LockVirtualDisk] + default: AssignSpare + type: str + target: + description: + - Fully Qualified Device Descriptor (FQDD) of the target physical drive. + - This is mandatory when I(command) is C(AssignSpare), C(UnassisgnSpare), + C(ChangePDStateToOnline), C(ChangePDStateToOffline), C(ConvertToRAID), or C(ConvertToNonRAID). + - If I(volume_id) is not specified or empty, this physical drive will be + assigned as a global hot spare when I(command) is C(AssignSpare). + - "Notes: Global or Dedicated hot spare can be assigned only once for a physical disk, + Re-assign cannot be done when I(command) is C(AssignSpare)." + type: list + elements: str + aliases: [drive_id] + volume_id: + description: + - Fully Qualified Device Descriptor (FQDD) of the volume. + - Applicable if I(command) is C(AssignSpare), C(BlinkTarget), C(UnBlinkTarget) or C(LockVirtualDisk). + - I(volume_id) or I(target) is required when the I(command) is C(BlinkTarget) or C(UnBlinkTarget), + if both are specified I(target) is considered. + - To know the number of volumes to which a hot spare can be assigned, refer iDRAC Redfish API documentation. + type: list + elements: str + controller_id: + description: + - Fully Qualified Device Descriptor (FQDD) of the storage controller. For example-'RAID.Slot.1-1'. + - This option is mandatory when I(command) is C(ResetConfig), C(SetControllerKey), + C(RemoveControllerKey), C(ReKey), or C(EnableControllerEncryption). + type: str + key: + description: + - A new security key passphrase that the encryption-capable controller uses to create the + encryption key. The controller uses the encryption key to lock or unlock access to the + Self-Encrypting Drive (SED). Only one encryption key can be created for each controller. + - This is mandatory when I(command) is C(SetControllerKey), C(ReKey), or C(EnableControllerEncryption) + and when I(mode) is C(LKM). + - The length of the key can be a maximum of 32 characters in length, where the expanded form of + the special character is counted as a single character. + - "The key must contain at least one character from each of the character classes: uppercase, + lowercase, number, and special character." + type: str + key_id: + description: + - This is a user supplied text label associated with the passphrase. + - This is mandatory when I(command) is C(SetControllerKey), C(ReKey), or C(EnableControllerEncryption) + and when I(mode) is C(LKM). + - The length of I(key_id) can be a maximum of 32 characters in length and should not have any spaces. + type: str + old_key: + description: + - Security key passphrase used by the encryption-capable controller. + - This option is mandatory when I(command) is C(ReKey) and I(mode) is C(LKM). + type: str + mode: + description: + - Encryption mode of the encryption capable controller. + - This option is applicable only when I(command) is C(ReKey) or C(EnableControllerEncryption). + - C(SEKM) requires secure enterprise key manager license on the iDRAC. + - C(LKM) to choose mode as local key mode. + choices: [LKM, SEKM] + default: LKM + type: str + job_wait: + description: + - Provides the option if the module has to wait for the job to be completed. + type: bool + default: False + job_wait_timeout: + description: + - The maximum wait time of job completion in seconds before the job tracking is stopped. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 120 +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V (@jagadeeshnv)" + - "Felix Stephen (@felixs88)" + - "Husniya Hameed (@husniya_hameed)" +notes: + - Run this module from a system that has direct access to Dell iDRAC. + - This module always reports as changes found when C(ReKey), C(BlinkTarget), and C(UnBlinkTarget). + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Assign dedicated hot spare + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + volume_id: + - "Disk.Virtual.0:RAID.Slot.1-1" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - assign_dedicated_hot_spare + +- name: Assign global hot spare + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - assign_global_hot_spare + +- name: Unassign hot spare + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + command: UnassignSpare + tags: + - un-assign-hot-spare + +- name: Set controller encryption key + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "SetControllerKey" + controller_id: "RAID.Slot.1-1" + key: "PassPhrase@123" + key_id: "mykeyid123" + tags: + - set_controller_key + +- name: Rekey in LKM mode + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + key: "NewPassPhrase@123" + key_id: "newkeyid123" + old_key: "OldPassPhrase@123" + tags: + - rekey_lkm + +- name: Rekey in SEKM mode + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ReKey" + controller_id: "RAID.Slot.1-1" + mode: "SEKM" + tags: + - rekey_sekm + +- name: Remove controller key + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "RemoveControllerKey" + controller_id: "RAID.Slot.1-1" + tags: + - remove_controller_key + +- name: Reset controller configuration + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ResetConfig" + controller_id: "RAID.Slot.1-1" + tags: + - reset_config + +- name: Enable controller encryption + idrac_redfish_storage_controller: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + ca_path: "/path/to/ca_cert.pem" + command: "EnableControllerEncryption" + controller_id: "RAID.Slot.1-1" + mode: "LKM" + key: "your_Key@123" + key_id: "your_Keyid@123" + tags: + - enable-encrypt + +- name: Blink physical disk. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: BlinkTarget + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - blink-target + +- name: Blink virtual drive. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: BlinkTarget + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + tags: + - blink-volume + +- name: Unblink physical disk. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: UnBlinkTarget + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - unblink-target + +- name: Unblink virtual drive. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: UnBlinkTarget + volume_id: "Disk.Virtual.0:RAID.Slot.1-1" + tags: + - unblink-drive + +- name: Convert physical disk to RAID + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ConvertToRAID" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - convert-raid + +- name: Convert physical disk to non-RAID + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ConvertToNonRAID" + target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - convert-non-raid + +- name: Change physical disk state to online. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ChangePDStateToOnline" + target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - pd-state-online + +- name: Change physical disk state to offline. + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "ChangePDStateToOnline" + target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1" + tags: + - pd-state-offline + +- name: Lock virtual drive + dellemc.openmanage.idrac_redfish_storage_controller: + baseuri: "192.168.0.1:443" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: "LockVirtualDisk" + volume_id: "Disk.Virtual.0:RAID.SL.3-1" + tags: + - lock +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the storage controller configuration operation. + returned: always + sample: "Successfully submitted the job that performs the AssignSpare operation" +task: + type: dict + description: ID and URI resource of the job created. + returned: success + sample: { + "id": "JID_XXXXXXXXXXXXX", + "uri": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX" + } +status: + type: dict + description: status of the submitted job. + returned: always + sample: { + "ActualRunningStartTime": "2022-02-09T04:42:41", + "ActualRunningStopTime": "2022-02-09T04:44:00", + "CompletionTime": "2022-02-09T04:44:00", + "Description": "Job Instance", + "EndTime": "TIME_NA", + "Id": "JID_444033604418", + "JobState": "Completed", + "JobType": "RealTimeNoRebootConfiguration", + "Message": "Job completed successfully.", + "MessageArgs":[], + "MessageId": "PR19", + "Name": "Configure: RAID.Integrated.1-1", + "PercentComplete": 100, + "StartTime": "2022-02-09T04:42:40", + "TargetSettingsURI": null + } +error_info: + type: dict + description: Details of a http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to run the method because the requested HTTP method is not allowed.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "iDRAC.1.6.SYS402", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "Enter a valid HTTP method and retry the operation. For information about + valid methods, see the Redfish Users Guide available on the support site.", + "Severity": "Informational" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information" + } + } +''' + + +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import wait_for_job_completion, strip_substr_dict +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +SYSTEM_ID = "System.Embedded.1" +MANAGER_ID = "iDRAC.Embedded.1" +RAID_ACTION_URI = "/redfish/v1/Systems/{system_id}/Oem/Dell/DellRaidService/Actions/DellRaidService.{action}" +CONTROLLER_URI = "/redfish/v1/Dell/Systems/{system_id}/Storage/DellController/{controller_id}" +VOLUME_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Volumes" +PD_URI = "/redfish/v1/Systems/System.Embedded.1/Storage/{controller_id}/Drives/{drive_id}" +JOB_URI_OEM = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}" + +JOB_SUBMISSION = "Successfully submitted the job that performs the '{0}' operation." +JOB_COMPLETION = "Successfully performed the '{0}' operation." +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." +TARGET_ERR_MSG = "The Fully Qualified Device Descriptor (FQDD) of the target {0} must be only one." +PD_ERROR_MSG = "Unable to locate the physical disk with the ID: {0}" +ENCRYPT_ERR_MSG = "The storage controller '{0}' does not support encryption." +PHYSICAL_DISK_ERR = "Volume is not encryption capable." + + +def check_id_exists(module, redfish_obj, key, item_id, uri): + msg = "{0} with id '{1}' not found in system".format(key, item_id) + try: + resp = redfish_obj.invoke_request("GET", uri.format(system_id=SYSTEM_ID, controller_id=item_id)) + if not resp.success: + module.fail_json(msg=msg) + except HTTPError as err: + module.fail_json(msg=msg, error_info=json.load(err)) + + +def ctrl_key(module, redfish_obj): + resp, job_uri, job_id, payload = None, None, None, {} + controller_id = module.params.get("controller_id") + command, mode = module.params["command"], module.params["mode"] + key, key_id = module.params.get("key"), module.params.get("key_id") + check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI) + ctrl_resp = redfish_obj.invoke_request("GET", CONTROLLER_URI.format(system_id=SYSTEM_ID, + controller_id=controller_id)) + security_status = ctrl_resp.json_data.get("SecurityStatus") + if security_status == "EncryptionNotCapable": + module.fail_json(msg=ENCRYPT_ERR_MSG.format(controller_id)) + ctrl_key_id = ctrl_resp.json_data.get("KeyID") + if command == "SetControllerKey": + if module.check_mode and ctrl_key_id is None: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and ctrl_key_id is not None) or (not module.check_mode and ctrl_key_id is not None): + module.exit_json(msg=NO_CHANGES_FOUND) + payload = {"TargetFQDD": controller_id, "Key": key, "Keyid": key_id} + elif command == "ReKey": + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + if mode == "LKM": + payload = {"TargetFQDD": controller_id, "Mode": mode, "NewKey": key, + "Keyid": key_id, "OldKey": module.params.get("old_key")} + else: + payload = {"TargetFQDD": controller_id, "Mode": mode} + elif command == "RemoveControllerKey": + if module.check_mode and ctrl_key_id is not None: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and ctrl_key_id is None) or (not module.check_mode and ctrl_key_id is None): + module.exit_json(msg=NO_CHANGES_FOUND) + payload = {"TargetFQDD": controller_id} + elif command == "EnableControllerEncryption": + if module.check_mode and not security_status == "SecurityKeyAssigned": + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and security_status == "SecurityKeyAssigned") or \ + (not module.check_mode and security_status == "SecurityKeyAssigned"): + module.exit_json(msg=NO_CHANGES_FOUND) + payload = {"TargetFQDD": controller_id, "Mode": mode} + if mode == "LKM": + payload["Key"] = key + payload["Keyid"] = key_id + resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, action=command), + data=payload) + job_uri = resp.headers.get("Location") + job_id = job_uri.split("/")[-1] + return resp, job_uri, job_id + + +def ctrl_reset_config(module, redfish_obj): + resp, job_uri, job_id = None, None, None + controller_id = module.params.get("controller_id") + check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI) + member_resp = redfish_obj.invoke_request("GET", VOLUME_URI.format(system_id=SYSTEM_ID, controller_id=controller_id)) + members = member_resp.json_data.get("Members") + if module.check_mode and members: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and not members) or (not module.check_mode and not members): + module.exit_json(msg=NO_CHANGES_FOUND) + else: + resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, + action=module.params["command"]), + data={"TargetFQDD": controller_id}) + job_uri = resp.headers.get("Location") + job_id = job_uri.split("/")[-1] + return resp, job_uri, job_id + + +def hot_spare_config(module, redfish_obj): + target, command = module.params.get("target"), module.params["command"] + resp, job_uri, job_id = None, None, None + volume = module.params.get("volume_id") + controller_id = target[0].split(":")[-1] + drive_id = target[0] + try: + pd_resp = redfish_obj.invoke_request("GET", PD_URI.format(controller_id=controller_id, drive_id=drive_id)) + except HTTPError: + module.fail_json(msg=PD_ERROR_MSG.format(drive_id)) + else: + hot_spare = pd_resp.json_data.get("HotspareType") + if module.check_mode and hot_spare == "None" and command == "AssignSpare" or \ + (module.check_mode and not hot_spare == "None" and command == "UnassignSpare"): + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and hot_spare in ["Dedicated", "Global"] and command == "AssignSpare") or \ + (not module.check_mode and hot_spare in ["Dedicated", "Global"] and command == "AssignSpare") or \ + (module.check_mode and hot_spare == "None" and command == "UnassignSpare") or \ + (not module.check_mode and hot_spare == "None" and command == "UnassignSpare"): + module.exit_json(msg=NO_CHANGES_FOUND) + else: + payload = {"TargetFQDD": drive_id} + if volume is not None and command == "AssignSpare": + payload["VirtualDiskArray"] = volume + resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, + action=command), + data=payload) + job_uri = resp.headers.get("Location") + job_id = job_uri.split("/")[-1] + return resp, job_uri, job_id + + +def change_pd_status(module, redfish_obj): + resp, job_uri, job_id = None, None, None + command, target = module.params["command"], module.params.get("target") + controller_id = target[0].split(":")[-1] + drive_id = target[0] + state = "Online" if command == "ChangePDStateToOnline" else "Offline" + try: + pd_resp = redfish_obj.invoke_request("GET", PD_URI.format(controller_id=controller_id, drive_id=drive_id)) + raid_status = pd_resp.json_data["Oem"]["Dell"]["DellPhysicalDisk"]["RaidStatus"] + except HTTPError: + module.fail_json(msg=PD_ERROR_MSG.format(drive_id)) + else: + if module.check_mode and not state == raid_status: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and state == raid_status) or (not module.check_mode and state == raid_status): + module.exit_json(msg=NO_CHANGES_FOUND) + else: + resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, + action="ChangePDState"), + data={"TargetFQDD": drive_id, "State": state}) + job_uri = resp.headers.get("Location") + job_id = job_uri.split("/")[-1] + return resp, job_uri, job_id + + +def convert_raid_status(module, redfish_obj): + resp, job_uri, job_id = None, None, None + command, target = module.params["command"], module.params.get("target") + ctrl, pd_ready_state = None, [] + try: + for ctrl in target: + controller_id = ctrl.split(":")[-1] + pd_resp = redfish_obj.invoke_request("GET", PD_URI.format(controller_id=controller_id, drive_id=ctrl)) + raid_status = pd_resp.json_data["Oem"]["Dell"]["DellPhysicalDisk"]["RaidStatus"] + pd_ready_state.append(raid_status) + except HTTPError: + module.fail_json(msg=PD_ERROR_MSG.format(ctrl)) + else: + if (command == "ConvertToRAID" and module.check_mode and 0 < pd_ready_state.count("NonRAID")) or \ + (command == "ConvertToNonRAID" and module.check_mode and 0 < pd_ready_state.count("Ready")): + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (command == "ConvertToRAID" and module.check_mode and + len(pd_ready_state) == pd_ready_state.count("Ready")) or \ + (command == "ConvertToRAID" and not module.check_mode and + len(pd_ready_state) == pd_ready_state.count("Ready")) or \ + (command == "ConvertToNonRAID" and module.check_mode and + len(pd_ready_state) == pd_ready_state.count("NonRAID")) or \ + (command == "ConvertToNonRAID" and not module.check_mode and + len(pd_ready_state) == pd_ready_state.count("NonRAID")): + module.exit_json(msg=NO_CHANGES_FOUND) + else: + resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, + action=command), + data={"PDArray": target}) + job_uri = resp.headers.get("Location") + job_id = job_uri.split("/")[-1] + return resp, job_uri, job_id + + +def target_identify_pattern(module, redfish_obj): + target, volume = module.params.get("target"), module.params.get("volume_id") + command = module.params.get("command") + payload = {"TargetFQDD": None} + + if target is not None and volume is None: + payload = {"TargetFQDD": target[0]} + elif volume is not None and target is None: + payload = {"TargetFQDD": volume[0]} + elif target is not None and volume is not None: + payload = {"TargetFQDD": target[0]} + + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, + action=command), + data=payload) + return resp + + +def lock_virtual_disk(module, redfish_obj): + volume, command = module.params.get("volume_id"), module.params["command"] + resp, job_uri, job_id = None, None, None + controller_id = volume[0].split(":")[-1] + check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI) + volume_uri = VOLUME_URI + "/{volume_id}" + try: + volume_resp = redfish_obj.invoke_request("GET", volume_uri.format(system_id=SYSTEM_ID, + controller_id=controller_id, + volume_id=volume[0])) + links = volume_resp.json_data.get("Links") + if links: + for disk in volume_resp.json_data.get("Links").get("Drives"): + drive_link = disk["@odata.id"] + drive_resp = redfish_obj.invoke_request("GET", drive_link) + encryption_ability = drive_resp.json_data.get("EncryptionAbility") + if encryption_ability != "SelfEncryptingDrive": + module.fail_json(msg=PHYSICAL_DISK_ERR) + lock_status = volume_resp.json_data.get("Oem").get("Dell").get("DellVolume").get("LockStatus") + except HTTPError: + module.fail_json(msg=PD_ERROR_MSG.format(controller_id)) + else: + if lock_status == "Unlocked" and module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif lock_status == "Locked": + module.exit_json(msg=NO_CHANGES_FOUND) + else: + resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, + action="LockVirtualDisk"), + data={"TargetFQDD": volume[0]}) + job_uri = resp.headers.get("Location") + job_id = job_uri.split("/")[-1] + return resp, job_uri, job_id + + +def validate_inputs(module): + module_params = module.params + command = module_params.get("command") + mode = module_params.get("mode") + if command == "ReKey" and mode == "LKM": + key = module_params.get("key") + key_id = module_params.get("key_id") + old_key = module_params.get("old_key") + if not all([key, key_id, old_key]): + module.fail_json(msg="All of the following: key, key_id and old_key are " + "required for '{0}' operation.".format(command)) + elif command == "EnableControllerEncryption" and mode == "LKM": + key = module_params.get("key") + key_id = module_params.get("key_id") + if not all([key, key_id]): + module.fail_json(msg="All of the following: key, key_id are " + "required for '{0}' operation.".format(command)) + elif command in ["AssignSpare", "UnassignSpare", "BlinkTarget", "UnBlinkTarget", "LockVirtualDisk"]: + target, volume = module_params.get("target"), module_params.get("volume_id") + if target is not None and not 1 >= len(target): + module.fail_json(msg=TARGET_ERR_MSG.format("physical disk")) + if volume is not None and not 1 >= len(volume): + module.fail_json(msg=TARGET_ERR_MSG.format("virtual drive")) + elif command in ["ChangePDStateToOnline", "ChangePDStateToOffline"]: + target = module.params.get("target") + if target is not None and not 1 >= len(target): + module.fail_json(msg=TARGET_ERR_MSG.format("physical disk")) + + +def main(): + specs = { + "command": {"required": False, "default": "AssignSpare", + "choices": ["ResetConfig", "AssignSpare", "SetControllerKey", "RemoveControllerKey", + "ReKey", "UnassignSpare", "EnableControllerEncryption", "BlinkTarget", + "UnBlinkTarget", "ConvertToRAID", "ConvertToNonRAID", "ChangePDStateToOnline", + "ChangePDStateToOffline", "LockVirtualDisk"]}, + "controller_id": {"required": False, "type": "str"}, + "volume_id": {"required": False, "type": "list", "elements": "str"}, + "target": {"required": False, "type": "list", "elements": "str", "aliases": ["drive_id"]}, + "key": {"required": False, "type": "str", "no_log": True}, + "key_id": {"required": False, "type": "str"}, + "old_key": {"required": False, "type": "str", "no_log": True}, + "mode": {"required": False, "choices": ["LKM", "SEKM"], "default": "LKM"}, + "job_wait": {"required": False, "type": "bool", "default": False}, + "job_wait_timeout": {"required": False, "type": "int", "default": 120} + } + specs.update(redfish_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ["command", "SetControllerKey", ["controller_id", "key", "key_id"]], + ["command", "ReKey", ["controller_id", "mode"]], ["command", "ResetConfig", ["controller_id"]], + ["command", "RemoveControllerKey", ["controller_id"]], ["command", "AssignSpare", ["target"]], + ["command", "UnassignSpare", ["target"]], ["command", "EnableControllerEncryption", ["controller_id"]], + ["command", "BlinkTarget", ["target", "volume_id"], True], + ["command", "UnBlinkTarget", ["target", "volume_id"], True], ["command", "ConvertToRAID", ["target"]], + ["command", "ConvertToNonRAID", ["target"]], ["command", "ChangePDStateToOnline", ["target"]], + ["command", "ChangePDStateToOffline", ["target"]], + ["command", "LockVirtualDisk", ["volume_id"]] + ], + supports_check_mode=True) + validate_inputs(module) + try: + command = module.params["command"] + with Redfish(module.params, req_session=True) as redfish_obj: + if command == "ResetConfig": + resp, job_uri, job_id = ctrl_reset_config(module, redfish_obj) + elif command == "SetControllerKey" or command == "ReKey" or \ + command == "RemoveControllerKey" or command == "EnableControllerEncryption": + resp, job_uri, job_id = ctrl_key(module, redfish_obj) + elif command == "AssignSpare" or command == "UnassignSpare": + resp, job_uri, job_id = hot_spare_config(module, redfish_obj) + elif command == "BlinkTarget" or command == "UnBlinkTarget": + resp = target_identify_pattern(module, redfish_obj) + if resp.success and resp.status_code == 200: + module.exit_json(msg=JOB_COMPLETION.format(command), changed=True) + elif command == "ConvertToRAID" or command == "ConvertToNonRAID": + resp, job_uri, job_id = convert_raid_status(module, redfish_obj) + elif command == "ChangePDStateToOnline" or command == "ChangePDStateToOffline": + resp, job_uri, job_id = change_pd_status(module, redfish_obj) + elif command == "LockVirtualDisk": + resp, job_uri, job_id = lock_virtual_disk(module, redfish_obj) + oem_job_url = JOB_URI_OEM.format(job_id=job_id) + job_wait = module.params["job_wait"] + if job_wait: + resp, msg = wait_for_job_completion(redfish_obj, oem_job_url, job_wait=job_wait, + wait_timeout=module.params["job_wait_timeout"]) + job_data = strip_substr_dict(resp.json_data) + if job_data["JobState"] == "Failed": + changed, failed = False, True + else: + changed, failed = True, False + module.exit_json(msg=JOB_COMPLETION.format(command), task={"id": job_id, "uri": oem_job_url}, + status=job_data, changed=changed, failed=failed) + else: + resp, msg = wait_for_job_completion(redfish_obj, oem_job_url, job_wait=job_wait, + wait_timeout=module.params["job_wait_timeout"]) + job_data = strip_substr_dict(resp.json_data) + module.exit_json(msg=JOB_SUBMISSION.format(command), task={"id": job_id, "uri": oem_job_url}, + status=job_data) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, AttributeError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py new file mode 100644 index 00000000..8de5ffc9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_reset +short_description: Reset iDRAC +version_added: "2.1.0" +description: + - This module resets iDRAC. + - "iDRAC is not accessible for some time after running this module. It is recommended to wait for some time, + before trying to connect to iDRAC." +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Reset iDRAC + dellemc.openmanage.idrac_reset: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + idrac_port: 443 + ca_path: "/path/to/ca_cert.pem" +""" + +RETURN = r''' +--- +msg: + description: Status of the iDRAC reset operation. + returned: always + type: str + sample: "Successfully performed iDRAC reset." +reset_status: + description: Details of iDRAC reset operation. + returned: always + type: dict + sample: { + "idracreset": { + "Data": { + "StatusCode": 204 + }, + "Message": "none", + "Status": "Success", + "StatusCode": 204, + "retval": true + } + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def run_idrac_reset(idrac, module): + if module.check_mode: + msg = {'Status': 'Success', 'Message': 'Changes found to commit!', 'changes_applicable': True} + else: + idrac.use_redfish = True + msg = idrac.config_mgr.reset_idrac() + return msg + + +def main(): + specs = {} + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + msg = run_idrac_reset(idrac, module) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg="Successfully performed iDRAC reset.", reset_status=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py new file mode 100644 index 00000000..67a02c12 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py @@ -0,0 +1,666 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: idrac_server_config_profile +short_description: Export or Import iDRAC Server Configuration Profile (SCP) +version_added: "2.1.0" +description: + - Export the Server Configuration Profile (SCP) from the iDRAC or import from a + network share (CIFS, NFS, HTTP, HTTPS) or a local file. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + command: + description: + - If C(import), the module performs SCP import operation. + - If C(export), the module performs SCP export operation. + - If C(preview), the module performs SCP preview operation. + type: str + choices: ['import', 'export', 'preview'] + default: 'export' + job_wait: + description: Whether to wait for job completion or not. + type: bool + required: True + share_name: + description: + - Network share or local path. + - CIFS, NFS, HTTP, and HTTPS network share types are supported. + type: str + required: True + share_user: + description: Network share user in the format 'user@domain' or 'domain\\user' if user is + part of a domain else 'user'. This option is mandatory for CIFS Network Share. + type: str + share_password: + description: Network share user password. This option is mandatory for CIFS Network Share. + type: str + aliases: ['share_pwd'] + scp_file: + description: + - Name of the server configuration profile (SCP) file. + - This option is mandatory if I(command) is C(import). + - The default format _YYmmdd_HHMMSS_scp is used if this option is not specified for C(import). + - I(export_format) is used if the valid extension file is not provided for C(import). + type: str + scp_components: + description: + - If C(ALL), this module exports or imports all components configurations from SCP file. + - If C(IDRAC), this module exports or imports iDRAC configuration from SCP file. + - If C(BIOS), this module exports or imports BIOS configuration from SCP file. + - If C(NIC), this module exports or imports NIC configuration from SCP file. + - If C(RAID), this module exports or imports RAID configuration from SCP file. + type: str + choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'] + default: 'ALL' + shutdown_type: + description: + - This option is applicable for C(import) command. + - If C(Graceful), the job gracefully shuts down the operating system and turns off the server. + - If C(Forced), it forcefully shuts down the server. + - If C(NoReboot), the job that applies the SCP will pause until you manually reboot the server. + type: str + choices: ['Graceful', 'Forced', 'NoReboot'] + default: 'Graceful' + end_host_power_state: + description: + - This option is applicable for C(import) command. + - If C(On), End host power state is on. + - If C(Off), End host power state is off. + type: str + choices: ['On' ,'Off'] + default: 'On' + export_format: + description: Specify the output file format. This option is applicable for C(export) command. + type: str + choices: ['JSON', 'XML'] + default: 'XML' + export_use: + description: Specify the type of server configuration profile (SCP) to be exported. + This option is applicable for C(export) command. + type: str + choices: ['Default', 'Clone', 'Replace'] + default: 'Default' +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V(@jagadeeshnv)" + - "Felix Stephen (@felixs88)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports C(check_mode). + - To import Server Configuration Profile (SCP) on the iDRAC7 and iDRAC8-based servers, + the servers must have iDRAC Enterprise license or later. +''' + +EXAMPLES = r''' +--- +- name: Export SCP with IDRAC components in JSON format to a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + scp_components: IDRAC + scp_file: example_file + export_format: JSON + export_use: Clone + job_wait: True + +- name: Import SCP with IDRAC components in JSON format from a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + command: import + scp_components: "IDRAC" + scp_file: example_file.json + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: False + +- name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + scp_components: "BIOS" + export_format: XML + export_use: Default + job_wait: True + +- name: Import SCP with BIOS components in XML format from a NFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + command: import + scp_components: "BIOS" + scp_file: 192.168.0.1_20210618_162856.xml + shutdown_type: NoReboot + end_host_power_state: "Off" + job_wait: False + +- name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username@domain + share_password: share_password + share_mnt: /mnt/cifs + scp_file: example_file.xml + scp_components: "RAID" + export_format: XML + export_use: Default + job_wait: True + +- name: Import SCP with RAID components in XML format from a CIFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username + share_password: share_password + share_mnt: /mnt/cifs + command: import + scp_components: "RAID" + scp_file: example_file.xml + shutdown_type: Forced + end_host_power_state: "On" + job_wait: True + +- name: Export SCP with ALL components in JSON format to a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://192.168.0.3/share" + share_user: share_username + share_password: share_password + scp_file: example_file.json + scp_components: ALL + export_format: JSON + job_wait: False + +- name: Import SCP with ALL components in JSON format from a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: import + share_name: "http://192.168.0.3/share" + share_user: share_username + share_password: share_password + scp_file: example_file.json + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: True + +- name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "https://192.168.0.4/share" + share_user: share_username + share_password: share_password + scp_components: ALL + export_format: XML + export_use: Replace + job_wait: True + +- name: Import SCP with ALL components in XML format from a HTTPS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + command: import + share_name: "https://192.168.0.4/share" + share_user: share_username + share_password: share_password + scp_file: 192.168.0.1_20160618_164647.xml + shutdown_type: Graceful + end_host_power_state: "On" + job_wait: False + +- name: Preview SCP with ALL components in XML format from a CIFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "\\\\192.168.0.2\\share" + share_user: share_username + share_password: share_password + command: preview + scp_components: "ALL" + scp_file: example_file.xml + job_wait: True + +- name: Preview SCP with ALL components in JSON format from a NFS share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + command: preview + scp_components: "IDRAC" + scp_file: example_file.xml + job_wait: True + +- name: Preview SCP with ALL components in XML format from a HTTP share path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "http://192.168.0.1/http-share" + share_user: share_username + share_password: share_password + command: preview + scp_components: "ALL" + scp_file: example_file.xml + job_wait: True + +- name: Preview SCP with ALL components in XML format from a local path + dellemc.openmanage.idrac_server_config_profile: + idrac_ip: "{{ idrac_ip }}" + idrac_user: "{{ idrac_user }}" + idrac_password: "{{ idrac_password }}" + ca_path: "/path/to/ca_cert.pem" + share_name: "/scp_folder" + command: preview + scp_components: "IDRAC" + scp_file: example_file.json + job_wait: False +''' + +RETURN = r''' +--- +msg: + type: str + description: Status of the import or export SCP job. + returned: always + sample: "Successfully imported the Server Configuration Profile" +scp_status: + type: dict + description: SCP operation job and progress details from the iDRAC. + returned: success + sample: + { + "Id": "JID_XXXXXXXXX", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageId": "XXX123", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import os +import json +import re +import copy +from datetime import datetime +from os.path import exists +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.parse import urlparse + + +REDFISH_SCP_BASE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1" +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." +INVALID_FILE = "Invalid file path provided." +JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}" + + +def get_scp_file_format(module): + scp_file = module.params['scp_file'] + if scp_file: + scp_file_name_format = scp_file + if not str(scp_file.lower()).endswith(('.xml', '.json')): + scp_file_name_format = "{0}.{1}".format(scp_file, module.params['export_format'].lower()) + else: + d = datetime.now() + scp_file_name_format = "{0}_{1}{2}{3}_{4}{5}{6}_scp.{7}".format( + module.params["idrac_ip"], d.date().year, d.date().month, d.date().day, + d.time().hour, d.time().minute, d.time().second, + module.params['export_format'].lower()) + return scp_file_name_format + + +def response_format_change(response, params, file_name): + resp = {} + if params["job_wait"]: + response = response.json_data + response.pop("Description", None) + response.pop("Name", None) + response.pop("EndTime", None) + response.pop("StartTime", None) + response.pop("TaskState", None) + response.pop("Messages", None) + if response.get("Oem") is not None: + response.update(response["Oem"]["Dell"]) + response.pop("Oem", None) + sep = "/" if "/" in params["share_name"] else "\\" + response["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name) + response["retval"] = True + else: + location = response.headers.get("Location") + job_id = location.split("/")[-1] + job_uri = JOB_URI.format(job_id=job_id) + resp["Data"] = {"StatusCode": response.status_code, "jobid": job_id, "next_uri": job_uri} + resp["Job"] = {"JobId": job_id, "ResourceURI": job_uri} + resp["Return"] = "JobCreated" + resp["Status"] = "Success" + resp["Message"] = "none" + resp["StatusCode"] = response.status_code + sep = "/" if "/" in params["share_name"] else "\\" + resp["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name) + resp["retval"] = True + response = resp + return response + + +def run_export_import_scp_http(idrac, module): + share_url = urlparse(module.params["share_name"]) + share = {} + scp_file = module.params.get("scp_file") + share["share_ip"] = share_url.netloc + share["share_name"] = share_url.path.strip('/') + share["share_type"] = share_url.scheme.upper() + share["file_name"] = scp_file + scp_file_name_format = scp_file + share["username"] = module.params.get("share_user") + share["password"] = module.params.get("share_password") + command = module.params["command"] + if command == "import": + scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"], + host_powerstate=module.params["end_host_power_state"], + job_wait=module.params["job_wait"], + target=module.params["scp_components"], share=share, ) + elif command == "export": + scp_file_name_format = get_scp_file_format(module) + share["file_name"] = scp_file_name_format + scp_response = idrac.export_scp(export_format=module.params["export_format"], + export_use=module.params["export_use"], + target=module.params["scp_components"], + job_wait=module.params["job_wait"], share=share, ) + scp_response = response_format_change(scp_response, module.params, scp_file_name_format) + if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical": + module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response) + return scp_response + + +def get_scp_share_details(module): + share_name = module.params.get("share_name") + command = module.params["command"] + scp_file_name_format = get_scp_file_format(module) + if ":" in share_name: + nfs_split = share_name.split(":") + share = {"share_ip": nfs_split[0], "share_name": nfs_split[1], "share_type": "NFS"} + if command == "export": + share["file_name"] = scp_file_name_format + elif "\\" in share_name: + ip_pattern = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}") + share_path = re.split(ip_pattern, share_name) + share_ip = re.findall(ip_pattern, share_name) + share_path_name = "\\".join(list(filter(None, share_path[-1].split("\\")))) + share = {"share_ip": share_ip[0], "share_name": share_path_name, "share_type": "CIFS", + "username": module.params.get("share_user"), "password": module.params.get("share_password")} + if command == "export": + share["file_name"] = scp_file_name_format + else: + share = {"share_type": "LOCAL", "share_name": share_name} + if command == "export": + share["file_name"] = scp_file_name_format + return share, scp_file_name_format + + +def export_scp_redfish(module, idrac): + command = module.params["command"] + share, scp_file_name_format = get_scp_share_details(module) + if share["share_type"] == "LOCAL": + scp_response = idrac.export_scp(export_format=module.params["export_format"], + export_use=module.params["export_use"], + target=module.params["scp_components"], + job_wait=False, share=share, ) + scp_response = wait_for_response(scp_response, module, share, idrac) + else: + scp_response = idrac.export_scp(export_format=module.params["export_format"], + export_use=module.params["export_use"], + target=module.params["scp_components"], + job_wait=module.params["job_wait"], share=share, ) + scp_response = response_format_change(scp_response, module.params, scp_file_name_format) + if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical": + module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response) + return scp_response + + +def wait_for_response(scp_resp, module, share, idrac): + task_uri = scp_resp.headers["Location"] + job_id = task_uri.split("/")[-1] + job_uri = JOB_URI.format(job_id=job_id) + wait_resp = idrac.wait_for_job_complete(task_uri, job_wait=True) + with open("{0}/{1}".format(share["share_name"], share["file_name"]), "w") as file_obj: + if module.params["export_format"] == "JSON": + json.dump(wait_resp.json_data, file_obj, indent=4) + else: + wait_resp_value = wait_resp.decode("utf-8") + file_obj.write(wait_resp_value) + if module.params["job_wait"]: + scp_resp = idrac.invoke_request(job_uri, "GET") + return scp_resp + + +def preview_scp_redfish(module, idrac, http_share, import_job_wait=False): + command = module.params["command"] + scp_target = module.params["scp_components"] + job_wait_option = module.params["job_wait"] + if command == "import": + job_wait_option = import_job_wait + if http_share: + share_url = urlparse(module.params["share_name"]) + share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'), + "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"), + "username": module.params.get("share_user"), "password": module.params.get("share_password")} + else: + share, scp_file_name_format = get_scp_share_details(module) + share["file_name"] = module.params.get("scp_file") + buffer_text = None + if share["share_type"] == "LOCAL": + scp_target = "ALL" + file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"]) + if not exists(file_path): + module.fail_json(msg=INVALID_FILE) + with open(file_path, "r") as file_obj: + buffer_text = file_obj.read() + scp_response = idrac.import_preview(import_buffer=buffer_text, target=scp_target, + share=share, job_wait=job_wait_option) + scp_response = response_format_change(scp_response, module.params, share["file_name"]) + if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical": + module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response) + return scp_response + + +def import_scp_redfish(module, idrac, http_share): + command = module.params["command"] + scp_target = module.params["scp_components"] + job_wait = copy.copy(module.params["job_wait"]) + if module.check_mode: + module.params["job_wait"] = True + scp_resp = preview_scp_redfish(module, idrac, http_share, import_job_wait=True) + if "SYS081" in scp_resp["MessageId"] or "SYS082" in scp_resp["MessageId"]: + module.exit_json(msg=CHANGES_FOUND, changed=True) + else: + module.fail_json(msg=scp_resp) + if http_share: + share_url = urlparse(module.params["share_name"]) + share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'), + "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"), + "username": module.params.get("share_user"), "password": module.params.get("share_password")} + else: + share, scp_file_name_format = get_scp_share_details(module) + share["file_name"] = module.params.get("scp_file") + buffer_text = None + share_dict = share + if share["share_type"] == "LOCAL": + scp_target = "ALL" + file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"]) + if not exists(file_path): + module.fail_json(msg=INVALID_FILE) + with open(file_path, "r") as file_obj: + buffer_text = file_obj.read() + share_dict = {} + module.params["job_wait"] = job_wait + scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"], + host_powerstate=module.params["end_host_power_state"], + job_wait=module.params["job_wait"], + target=scp_target, + import_buffer=buffer_text, share=share_dict, ) + scp_response = response_format_change(scp_response, module.params, share["file_name"]) + if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical": + module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response) + return scp_response + + +def main(): + specs = { + "command": {"required": False, "type": 'str', + "choices": ['export', 'import', 'preview'], "default": 'export'}, + "job_wait": {"required": True, "type": 'bool'}, + "share_name": {"required": True, "type": 'str'}, + "share_user": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', + "aliases": ['share_pwd'], "no_log": True}, + "scp_components": {"required": False, + "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'], + "default": 'ALL'}, + "scp_file": {"required": False, "type": 'str'}, + "shutdown_type": {"required": False, + "choices": ['Graceful', 'Forced', 'NoReboot'], + "default": 'Graceful'}, + "end_host_power_state": {"required": False, + "choices": ['On', 'Off'], + "default": 'On'}, + "export_format": {"required": False, "type": 'str', + "choices": ['JSON', 'XML'], "default": 'XML'}, + "export_use": {"required": False, "type": 'str', + "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ["command", "import", ["scp_file"]], + ["command", "preview", ["scp_file"]], + ], + supports_check_mode=True) + + try: + changed = False + http_share = module.params["share_name"].lower().startswith(('http://', 'https://')) + with iDRACRedfishAPI(module.params) as idrac: + command = module.params['command'] + if command == 'import': + if http_share: + scp_status = run_export_import_scp_http(idrac, module) + if "SYS069" in scp_status.get("MessageId", ""): + changed = False + elif "SYS053" in scp_status.get("MessageId", ""): + changed = True + else: + scp_status = import_scp_redfish(module, idrac, http_share) + if "No changes were applied" not in scp_status.get('Message', ""): + changed = True + elif "SYS043" in scp_status.get("MessageId", ""): + changed = True + elif "SYS069" in scp_status.get("MessageId", ""): + changed = False + elif command == "export": + if http_share: + scp_status = run_export_import_scp_http(idrac, module) + else: + scp_status = export_scp_redfish(module, idrac) + else: + scp_status = preview_scp_redfish(module, idrac, http_share, import_job_wait=False) + if module.params.get('job_wait'): + scp_status = strip_substr_dict(scp_status) + msg = "Successfully {0}ed the Server Configuration Profile." + module.exit_json(changed=changed, msg=msg.format(command), scp_status=scp_status) + else: + msg = "Successfully triggered the job to {0} the Server Configuration Profile." + module.exit_json(msg=msg.format(command), scp_status=scp_status) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (ImportError, ValueError, RuntimeError, SSLValidationError, + ConnectionError, KeyError, TypeError, IndexError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py new file mode 100644 index 00000000..d078b085 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_syslog +short_description: Enable or disable the syslog on iDRAC +version_added: "2.1.0" +description: + - This module allows to enable or disable the iDRAC syslog. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options + - dellemc.openmanage.network_share_options +options: + syslog: + description: Enables or disables an iDRAC syslog. + choices: [Enabled, Disabled] + type: str + default: Enabled +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Enable iDRAC syslog + dellemc.openmanage.idrac_syslog: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + share_password: "share_user_pwd" + share_user: "share_user_name" + share_mnt: "/mnt/share" + syslog: "Enabled" + +- name: Disable iDRAC syslog + dellemc.openmanage.idrac_syslog: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + share_name: "192.168.0.2:/share" + share_password: "share_user_pwd" + share_user: "share_user_name" + share_mnt: "/mnt/share" + syslog: "Disabled" +""" + +RETURN = r''' +--- +msg: + description: Overall status of the syslog export operation. + returned: always + type: str + sample: "Successfully fetch the syslogs." +syslog_status: + description: Job details of the syslog operation. + returned: success + type: dict + sample: { + "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob", + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485", + "@odata.type": "#DellJob.v1_0_2.DellJob", + "CompletionTime": "2020-03-27T02:27:45", + "Description": "Job Instance", + "EndTime": null, + "Id": "JID_852940632485", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "SYS053", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +try: + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +def run_setup_idrac_syslog(idrac, module): + idrac.use_redfish = True + upd_share = file_share_manager.create_share_obj(share_path=module.params['share_name'], + mount_point=module.params['share_mnt'], + isFolder=True, + creds=UserCredentials( + module.params['share_user'], + module.params['share_password'])) + if not upd_share.IsValid: + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + idrac.config_mgr.set_liason_share(upd_share) + if module.check_mode: + if module.params['syslog'] == 'Enabled': + idrac.config_mgr.enable_syslog(apply_changes=False) + elif module.params['syslog'] == 'Disabled': + idrac.config_mgr.disable_syslog(apply_changes=False) + msg = idrac.config_mgr.is_change_applicable() + else: + if module.params['syslog'] == 'Enabled': + msg = idrac.config_mgr.enable_syslog() + elif module.params['syslog'] == 'Disabled': + msg = idrac.config_mgr.disable_syslog() + return msg + + +def main(): + specs = { + "share_name": {"required": True, "type": 'str'}, + "share_user": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "share_mnt": {"required": False, "type": 'str'}, + "syslog": {"required": False, "choices": ['Enabled', 'Disabled'], "default": 'Enabled'} + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + msg = run_setup_idrac_syslog(idrac, module) + changed = False + if msg.get('Status') == "Success": + changed = True + if msg.get('Message') == "No changes found to commit!": + changed = False + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except AttributeError as err: + if "NoneType" in str(err): + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg="Successfully fetch the syslogs.", + syslog_status=msg, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py new file mode 100644 index 00000000..61827f2d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_system_info +short_description: Get the PowerEdge Server System Inventory +version_added: "3.0.0" +description: + - Get the PowerEdge Server System Inventory. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: "Rajeev Arakkal (@rajeevarakkal)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Get System Inventory + dellemc.openmanage.idrac_system_info: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" +""" + +RETURN = r''' +--- +msg: + description: "Overall system inventory information status." + returned: always + type: str + sample: "Successfully fetched the system inventory details." +system_info: + type: dict + description: Details of the PowerEdge Server System Inventory. + returned: success + sample: { + "BIOS": [ + { + "BIOSReleaseDate": "11/26/2019", + "FQDD": "BIOS.Setup.1-1", + "InstanceID": "DCIM:INSTALLED#741__BIOS.Setup.1-1", + "Key": "DCIM:INSTALLED#741__BIOS.Setup.1-1", + "SMBIOSPresent": "True", + "VersionString": "2.4.8" + } + ] + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +# Main +def main(): + specs = {} + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + try: + with iDRACConnection(module.params) as idrac: + idrac.get_entityjson() + msg = idrac.get_json_device() + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e: + module.fail_json(msg=str(e)) + + module.exit_json(msg="Successfully fetched the system inventory details.", + system_info=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py new file mode 100644 index 00000000..6227571c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_timezone_ntp +short_description: Configures time zone and NTP on iDRAC +version_added: "2.1.0" +deprecated: + removed_at_date: "2024-07-31" + why: Replaced with M(dellemc.openmanage.idrac_attributes). + alternative: Use M(dellemc.openmanage.idrac_attributes) instead. + removed_from_collection: dellemc.openmanage +description: + - This module allows to configure time zone and NTP on iDRAC. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + setup_idrac_timezone: + type: str + description: Allows to configure time zone on iDRAC. + enable_ntp: + type: str + description: Allows to enable or disable NTP on iDRAC. + choices: [Enabled, Disabled] + ntp_server_1: + type: str + description: The IP address of the NTP server 1. + ntp_server_2: + type: str + description: The IP address of the NTP server 2. + ntp_server_3: + type: str + description: The IP address of the NTP server 3. + share_name: + type: str + description: + - (deprecated)Network share or a local path. + - This option is deprecated and will be removed in the later version. + share_user: + type: str + description: + - (deprecated)Network share user name. Use the format 'user@domain' or 'domain\\user' if user is part of a domain. + This option is mandatory for CIFS share. + - This option is deprecated and will be removed in the later version. + share_password: + type: str + description: + - (deprecated)Network share user password. This option is mandatory for CIFS share. + - This option is deprecated and will be removed in the later version. + aliases: ['share_pwd'] + share_mnt: + type: str + description: + - (deprecated)Local mount path of the network share with read-write permission for ansible user. + This option is mandatory for network shares. + - This option is deprecated and will be removed in the later version. + +requirements: + - "omsdk >= 1.2.488" + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Anooja Vardhineni (@anooja-vardhineni)" +notes: + - This module requires 'Administrator' privilege for I(idrac_user). + - Run this module from a system that has direct access to Dell EMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Configure time zone and NTP on iDRAC + dellemc.openmanage.idrac_timezone_ntp: + idrac_ip: "190.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + setup_idrac_timezone: "UTC" + enable_ntp: Enabled + ntp_server_1: "190.168.0.1" + ntp_server_2: "190.168.0.2" + ntp_server_3: "190.168.0.3" +""" + +RETURN = r''' +--- +msg: + description: Overall status of the timezone and ntp configuration. + returned: always + type: str + sample: "Successfully configured the iDRAC time settings." +timezone_ntp_status: + description: Job details of the time zone setting operation. + returned: success + type: dict + sample: { + "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob", + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_861801613971", + "@odata.type": "#DellJob.v1_0_0.DellJob", + "CompletionTime": "2020-04-06T19:06:01", + "Description": "Job Instance", + "EndTime": null, + "Id": "JID_861801613971", + "JobState": "Completed", + "JobType": "ImportConfiguration", + "Message": "Successfully imported and applied Server Configuration Profile.", + "MessageArgs": [], + "MessageId": "SYS053", + "Name": "Import Configuration", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": null, + "retval": true +} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import os +import tempfile +from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +import json +try: + from omdrivers.enums.iDRAC.iDRAC import NTPEnable_NTPConfigGroupTypes + from omsdk.sdkfile import file_share_manager + from omsdk.sdkcreds import UserCredentials +except ImportError: + pass + + +def run_idrac_timezone_config(idrac, module): + """ + Get Lifecycle Controller status + + Keyword arguments: + idrac -- iDRAC handle + module -- Ansible module + """ + idrac.use_redfish = True + share_path = tempfile.gettempdir() + os.sep + upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True) + if not upd_share.IsValid: + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + idrac.config_mgr.set_liason_share(upd_share) + + if module.params['setup_idrac_timezone'] is not None: + idrac.config_mgr.configure_timezone(module.params['setup_idrac_timezone']) + + if module.params['enable_ntp'] is not None: + idrac.config_mgr.configure_ntp( + enable_ntp=NTPEnable_NTPConfigGroupTypes[module.params['enable_ntp']] + ) + if module.params['ntp_server_1'] is not None: + idrac.config_mgr.configure_ntp( + ntp_server_1=module.params['ntp_server_1'] + ) + if module.params['ntp_server_2'] is not None: + idrac.config_mgr.configure_ntp( + ntp_server_2=module.params['ntp_server_2'] + ) + if module.params['ntp_server_3'] is not None: + idrac.config_mgr.configure_ntp( + ntp_server_3=module.params['ntp_server_3'] + ) + + if module.check_mode: + msg = idrac.config_mgr.is_change_applicable() + else: + msg = idrac.config_mgr.apply_changes(reboot=False) + return msg + + +# Main +def main(): + specs = { + # Export Destination + "share_name": {"required": False, "type": 'str'}, + "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, + "share_user": {"required": False, "type": 'str'}, + "share_mnt": {"required": False, "type": 'str'}, + + # setup NTP + "enable_ntp": {"required": False, "choices": ['Enabled', 'Disabled']}, + "ntp_server_1": {"required": False}, + "ntp_server_2": {"required": False}, + "ntp_server_3": {"required": False}, + + # set up timezone + "setup_idrac_timezone": {"required": False, "type": 'str'}, + + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + with iDRACConnection(module.params) as idrac: + changed = False + msg = run_idrac_timezone_config(idrac, module) + if "Status" in msg: + if msg['Status'] == "Success": + changed = True + if "Message" in msg: + if msg['Message'] == "No changes found to commit!": + changed = False + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except AttributeError as err: + if "NoneType" in str(err): + module.fail_json(msg="Unable to access the share. Ensure that the share name, " + "share mount, and share credentials provided are correct.") + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as e: + module.fail_json(msg=str(e)) + module.exit_json(msg="Successfully configured the iDRAC time settings.", + timezone_ntp_status=msg, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py new file mode 100644 index 00000000..df9f9adb --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py @@ -0,0 +1,429 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_user +short_description: Configure settings for user accounts +version_added: "2.1.0" +description: + - This module allows to perform the following, + - Add a new user account. + - Edit a user account. + - Enable or Disable a user account. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + state: + type: str + description: + - Select C(present) to create or modify a user account. + - Select C(absent) to remove a user account. + - Ensure Lifecycle Controller is available because the user operation + uses the capabilities of Lifecycle Controller. + choices: [present, absent] + default: present + user_name: + type: str + required: True + description: Provide the I(user_name) of the account to be created, deleted or modified. + user_password: + type: str + description: + - Provide the password for the user account. The password can be changed when the user account is modified. + - To ensure security, the I(user_password) must be at least eight characters long and must contain + lowercase and upper-case characters, numbers, and special characters. + new_user_name: + type: str + description: Provide the I(user_name) for the account to be modified. + privilege: + type: str + description: + - Following are the role-based privileges. + - A user with C(Administrator) privilege can log in to iDRAC, and then configure iDRAC, configure users, + clear logs, control and configure system, access virtual console, access virtual media, test alerts, + and execute debug commands. + - A user with C(Operator) privilege can log in to iDRAC, and then configure iDRAC, control and configure system, + access virtual console, access virtual media, and execute debug commands. + - A user with C(ReadOnly) privilege can only log in to iDRAC. + - A user with C(None), no privileges assigned. + choices: [Administrator, ReadOnly, Operator, None] + ipmi_lan_privilege: + type: str + description: The Intelligent Platform Management Interface LAN privilege level assigned to the user. + choices: [Administrator, Operator, User, No Access] + ipmi_serial_privilege: + type: str + description: + - The Intelligent Platform Management Interface Serial Port privilege level assigned to the user. + - This option is only applicable for rack and tower servers. + choices: [Administrator, Operator, User, No Access] + enable: + type: bool + description: Provide the option to enable or disable a user from logging in to iDRAC. + sol_enable: + type: bool + description: Enables Serial Over Lan (SOL) for an iDRAC user. + protocol_enable: + type: bool + description: Enables protocol for the iDRAC user. + authentication_protocol: + type: str + description: + - This option allows to configure one of the following authentication protocol + types to authenticate the iDRAC user. + - Secure Hash Algorithm C(SHA). + - Message Digest 5 C(MD5). + - An authentication protocol is not configured if C(None) is selected. + choices: [None, SHA, MD5] + privacy_protocol: + type: str + description: + - This option allows to configure one of the following privacy encryption protocols for the iDRAC user. + - Data Encryption Standard C(DES). + - Advanced Encryption Standard C(AES). + - A privacy protocol is not configured if C(None) is selected. + choices: [None, DES, AES] +requirements: + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to DellEMC iDRAC. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Configure a new iDRAC user + dellemc.openmanage.idrac_user: + idrac_ip: 198.162.0.1 + idrac_user: idrac_user + idrac_password: idrac_password + ca_path: "/path/to/ca_cert.pem" + state: present + user_name: user_name + user_password: user_password + privilege: Administrator + ipmi_lan_privilege: Administrator + ipmi_serial_privilege: Administrator + enable: true + sol_enable: true + protocol_enable: true + authentication_protocol: SHA + privacy_protocol: AES + +- name: Modify existing iDRAC user username and password + dellemc.openmanage.idrac_user: + idrac_ip: 198.162.0.1 + idrac_user: idrac_user + idrac_password: idrac_password + ca_path: "/path/to/ca_cert.pem" + state: present + user_name: user_name + new_user_name: new_user_name + user_password: user_password + +- name: Delete existing iDRAC user account + dellemc.openmanage.idrac_user: + idrac_ip: 198.162.0.1 + idrac_user: idrac_user + idrac_password: idrac_password + ca_path: "/path/to/ca_cert.pem" + state: absent + user_name: user_name +""" + +RETURN = r''' +--- +msg: + description: Status of the iDRAC user configuration. + returned: always + type: str + sample: "Successfully created user account details." +status: + description: Configures the iDRAC users attributes. + returned: success + type: dict + sample: { + "@Message.ExtendedInfo": [{ + "Message": "Successfully Completed Request", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.5.Success", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + }, { + "Message": "The operation successfully completed.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "IDRAC.2.1.SYS413", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "No response action is required.", + "Severity": "Informational"} + ]} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +import re +import time +from ssl import SSLError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule + + +ACCOUNT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/" +ATTRIBUTE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Attributes/" +PRIVILEGE = {"Administrator": 511, "Operator": 499, "ReadOnly": 1, "None": 0} +ACCESS = {0: "Disabled", 1: "Enabled"} + + +def compare_payload(json_payload, idrac_attr): + """ + :param json_payload: json payload created for update operation + :param idrac_attr: idrac user attributes + case1: always skip password for difference + case2: as idrac_attr returns privilege in the format of string so + convert payload to string only for comparision + :return: bool + """ + copy_json = json_payload.copy() + for key, val in dict(copy_json).items(): + split_key = key.split("#")[1] + if split_key == "Password": + is_change_required = True + break + if split_key == "Privilege": + copy_json[key] = str(val) + else: + is_change_required = bool(list(set(copy_json.items()) - set(idrac_attr.items()))) + return is_change_required + + +def get_user_account(module, idrac): + """ + This function gets the slot id and slot uri for create and modify. + :param module: ansible module arguments + :param idrac: idrac objects + :return: user_attr, slot_uri, slot_id, empty_slot, empty_slot_uri + """ + slot_uri, slot_id, empty_slot, empty_slot_uri = None, None, None, None + if not module.params["user_name"]: + module.fail_json(msg="User name is not valid.") + response = idrac.export_scp(export_format="JSON", export_use="Default", target="IDRAC", job_wait=True) + user_attributes = idrac.get_idrac_local_account_attr(response.json_data, fqdd="iDRAC.Embedded.1") + slot_num = tuple(range(2, 17)) + for num in slot_num: + user_name = "Users.{0}#UserName".format(num) + if user_attributes.get(user_name) == module.params["user_name"]: + slot_id = num + slot_uri = ACCOUNT_URI + str(num) + break + if not user_attributes.get(user_name) and (empty_slot_uri and empty_slot) is None: + empty_slot = num + empty_slot_uri = ACCOUNT_URI + str(num) + return user_attributes, slot_uri, slot_id, empty_slot, empty_slot_uri + + +def get_payload(module, slot_id, action=None): + """ + This function creates the payload with slot id. + :param module: ansible module arguments + :param action: new user name is only applicable in case of update user name. + :param slot_id: slot id for user slot + :return: json data with slot id + """ + slot_payload = {"Users.{0}.UserName": module.params["user_name"], + "Users.{0}.Password": module.params["user_password"], + "Users.{0}.Enable": ACCESS.get(module.params["enable"]), + "Users.{0}.Privilege": PRIVILEGE.get(module.params["privilege"]), + "Users.{0}.IpmiLanPrivilege": module.params["ipmi_lan_privilege"], + "Users.{0}.IpmiSerialPrivilege": module.params["ipmi_serial_privilege"], + "Users.{0}.SolEnable": ACCESS.get(module.params["sol_enable"]), + "Users.{0}.ProtocolEnable": ACCESS.get(module.params["protocol_enable"]), + "Users.{0}.AuthenticationProtocol": module.params["authentication_protocol"], + "Users.{0}.PrivacyProtocol": module.params["privacy_protocol"], } + if module.params["new_user_name"] is not None and action == "update": + user_name = "Users.{0}.UserName".format(slot_id) + slot_payload[user_name] = module.params["new_user_name"] + elif module.params["state"] == "absent": + slot_payload = {"Users.{0}.UserName": "", "Users.{0}.Enable": "Disabled", "Users.{0}.Privilege": 0, + "Users.{0}.IpmiLanPrivilege": "No Access", "Users.{0}.IpmiSerialPrivilege": "No Access", + "Users.{0}.SolEnable": "Disabled", "Users.{0}.ProtocolEnable": "Disabled", + "Users.{0}.AuthenticationProtocol": "SHA", "Users.{0}.PrivacyProtocol": "AES"} + payload = dict([(k.format(slot_id), v) for k, v in slot_payload.items() if v is not None]) + return payload + + +def convert_payload_xml(payload): + """ + this function converts payload to xml and json data. + :param payload: user input for payload + :return: returns xml and json data + """ + root = """{0}""" + attr = "" + json_payload = {} + for k, v in payload.items(): + key = re.sub(r"(?<=\d)\.", "#", k) + attr += '{1}'.format(key, v) + json_payload[key] = v + root = root.format(attr) + return root, json_payload + + +def create_or_modify_account(module, idrac, slot_uri, slot_id, empty_slot_id, empty_slot_uri, user_attr): + """ + This function create user account in case not exists else update it. + :param module: user account module arguments + :param idrac: idrac object + :param slot_uri: slot uri for update + :param slot_id: slot id for update + :param empty_slot_id: empty slot id for create + :param empty_slot_uri: empty slot uri for create + :return: json + """ + generation, firmware_version = idrac.get_server_generation + msg, response = "Unable to retrieve the user details.", {} + if (slot_id and slot_uri) is None and (empty_slot_id and empty_slot_uri) is not None: + msg = "Successfully created user account." + payload = get_payload(module, empty_slot_id, action="create") + if module.check_mode: + module.exit_json(msg="Changes found to commit!", changed=True) + if generation >= 14: + response = idrac.invoke_request(ATTRIBUTE_URI, "PATCH", data={"Attributes": payload}) + elif generation < 14: + xml_payload, json_payload = convert_payload_xml(payload) + time.sleep(10) + response = idrac.import_scp(import_buffer=xml_payload, target="ALL", job_wait=True) + elif (slot_id and slot_uri) is not None: + msg = "Successfully updated user account." + payload = get_payload(module, slot_id, action="update") + xml_payload, json_payload = convert_payload_xml(payload) + value = compare_payload(json_payload, user_attr) + if module.check_mode: + if value: + module.exit_json(msg="Changes found to commit!", changed=True) + module.exit_json(msg="No changes found to commit!") + if not value: + module.exit_json(msg="Requested changes are already present in the user slot.") + if generation >= 14: + response = idrac.invoke_request(ATTRIBUTE_URI, "PATCH", data={"Attributes": payload}) + elif generation < 14: + time.sleep(10) + response = idrac.import_scp(import_buffer=xml_payload, target="ALL", job_wait=True) + elif (slot_id and slot_uri and empty_slot_id and empty_slot_uri) is None: + module.fail_json(msg="Maximum number of users reached. Delete a user account and retry the operation.") + return response, msg + + +def remove_user_account(module, idrac, slot_uri, slot_id): + """ + remove user user account by passing empty payload details. + :param module: user account module arguments. + :param idrac: idrac object. + :param slot_uri: user slot uri. + :param slot_id: user slot id. + :return: json. + """ + response, msg = {}, "Successfully deleted user account." + payload = get_payload(module, slot_id, action="delete") + xml_payload, json_payload = convert_payload_xml(payload) + if module.check_mode and (slot_id and slot_uri) is not None: + module.exit_json(msg="Changes found to commit!", changed=True) + elif module.check_mode and (slot_uri and slot_id) is None: + module.exit_json(msg="No changes found to commit!") + elif not module.check_mode and (slot_uri and slot_id) is not None: + time.sleep(10) + response = idrac.import_scp(import_buffer=xml_payload, target="ALL", job_wait=True) + else: + module.exit_json(msg="The user account is absent.") + return response, msg + + +def main(): + specs = { + "state": {"required": False, "choices": ['present', 'absent'], "default": "present"}, + "new_user_name": {"required": False}, + "user_name": {"required": True}, + "user_password": {"required": False, "no_log": True}, + "privilege": {"required": False, "choices": ['Administrator', 'ReadOnly', 'Operator', 'None']}, + "ipmi_lan_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']}, + "ipmi_serial_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']}, + "enable": {"required": False, "type": "bool"}, + "sol_enable": {"required": False, "type": "bool"}, + "protocol_enable": {"required": False, "type": "bool"}, + "authentication_protocol": {"required": False, "choices": ['SHA', 'MD5', 'None']}, + "privacy_protocol": {"required": False, "choices": ['AES', 'DES', 'None']}, + } + specs.update(idrac_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + try: + with iDRACRedfishAPI(module.params, req_session=True) as idrac: + user_attr, slot_uri, slot_id, empty_slot_id, empty_slot_uri = get_user_account(module, idrac) + if module.params["state"] == "present": + response, message = create_or_modify_account(module, idrac, slot_uri, slot_id, empty_slot_id, + empty_slot_uri, user_attr) + elif module.params["state"] == "absent": + response, message = remove_user_account(module, idrac, slot_uri, slot_id) + error = response.json_data.get("error") + oem = response.json_data.get("Oem") + if oem: + oem_msg = oem.get("Dell").get("Message") + error_msg = ["Unable to complete application of configuration profile values.", + "Import of Server Configuration Profile operation completed with errors."] + if oem_msg in error_msg: + module.fail_json(msg=oem_msg, error_info=response.json_data) + if error: + module.fail_json(msg=error.get("message"), error_info=response.json_data) + module.exit_json(msg=message, status=response.json_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, SSLError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py new file mode 100644 index 00000000..ac22541e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py @@ -0,0 +1,468 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.3.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: idrac_virtual_media +short_description: Configure the Remote File Share settings. +version_added: "6.3.0" +description: + - This module allows to configure Remote File Share settings. +extends_documentation_fragment: + - dellemc.openmanage.idrac_auth_options +options: + virtual_media: + required: true + type: list + elements: dict + description: Details of the Remote File Share. + suboptions: + insert: + required: true + type: bool + description: + - C(True) connects the remote image file. + - C(False) ejects the remote image file if connected. + image: + type: path + description: + - The path of the image file. The supported file types are .img and .iso. + - The file name with .img extension is redirected as a virtual floppy and a file name with .iso extension is + redirected as a virtual CDROM. + - This option is required when I(insert) is C(True). + - "The following are the examples of the share location: + CIFS share: //192.168.0.1/file_path/image_name.iso, + NFS share: 192.168.0.2:/file_path/image_name.img, + HTTP share: http://192.168.0.3/file_path/image_name.iso, + HTTPS share: https://192.168.0.4/file_path/image_name.img" + - CIFS share is not supported by iDRAC7 and iDRAC8. + - HTTPS share with credentials is not supported by iDRAC7 and iDRAC8. + index: + type: int + description: + - Index of the Remote File Share. For example, to specify the Remote File Share 1, the value of I(index) + should be 1. If I(index) is not specified, the order of I(virtual_media) list will be considered. + domain: + type: str + description: Domain name of network share. This option is applicable for CIFS and HTTPS share. + username: + type: str + description: Network share username. This option is applicable for CIFS and HTTPS share. + password: + type: str + description: + - Network share password. This option is applicable for CIFS and HTTPS share. + - This module always reports as the changes found when I(password) is provided. + media_type: + type: str + description: Type of the image file. This is applicable when I(insert) is C(True). + choices: [CD, DVD, USBStick] + force: + type: bool + description: C(True) ejects the image file if already connected and inserts the file provided in I(image). + This is applicable when I(insert) is C(True). + default: false + resource_id: + type: str + description: Resource id of the iDRAC, if not specified manager collection id will be used. +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to Dell iDRAC. + - This module supports C(check_mode). +""" + + +EXAMPLES = """ +--- +- name: Insert image file to Remote File Share 1 using CIFS share. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + virtual_media: + - insert: true + image: "//192.168.0.2/file_path/file.iso" + username: "username" + password: "password" + +- name: Insert image file to Remote File Share 2 using NFS share. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + virtual_media: + - index: 2 + insert: true + image: "192.168.0.4:/file_path/file.iso" + +- name: Insert image file to Remote File Share 1 and 2 using HTTP. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: "http://192.168.0.4/file_path/file.img" + - index: 2 + insert: true + image: "http://192.168.0.4/file_path/file.img" + +- name: Insert image file using HTTPS. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: "https://192.168.0.5/file_path/file.img" + username: username + password: password + +- name: Eject multiple virtual media. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: false + - index: 2 + insert: false + +- name: Ejection of image file from Remote File Share 1. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + insert: false + +- name: Insertion and ejection of image file in single task. + dellemc.openmanage.idrac_virtual_media: + idrac_ip: "192.168.0.1" + idrac_user: "user_name" + idrac_password: "user_password" + ca_path: "/path/to/ca_cert.pem" + force: true + virtual_media: + - index: 1 + insert: true + image: https://192.168.0.5/file/file.iso + username: username + password: password + - index: 2 + insert: false +""" + + +RETURN = r''' +--- +msg: + description: Successfully performed the virtual media operation. + returned: success + type: str + sample: Successfully performed the virtual media operation. +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +import copy +import time +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params +from ansible.module_utils.basic import AnsibleModule + +MANAGER_BASE = "/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia" +SYSTEM_BASE = "/redfish/v1/Systems/System.Embedded.1/VirtualMedia" + +EXCEEDED_ERROR = "Unable to complete the operation because the virtual media settings " \ + "provided exceeded the maximum limit." +NO_CHANGES_FOUND = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +INVALID_INDEX = "Unable to compete the virtual media operation because the index provided is incorrect or invalid." +FAIL_MSG = "Unable to complete the virtual media operation." +SUCCESS_MSG = "Successfully performed the virtual media operation." +UNSUPPORTED_IMAGE = "Unable to complete the virtual media operation because unsupported image " \ + "provided. The supported file types are .img and .iso." +UNSUPPORTED_MEDIA = "Unable to complete the virtual media operation because unsupported media type " \ + "provided for index {0}" +UNSUPPORTED_MSG = "The system does not support the CIFS network share feature." +UNSUPPORTED_MSG_HTTPS = "The system does not support the HTTPS network share feature with credentials." + + +def get_virtual_media_info(idrac): + resp = idrac.invoke_request("/redfish/v1/", "GET") + redfish_version = resp.json_data["RedfishVersion"] + rd_version = redfish_version.replace(".", "") + if 1131 <= int(rd_version): + vr_id = "system" + member_resp = idrac.invoke_request("{0}?$expand=*($levels=1)".format(SYSTEM_BASE), "GET") + else: + vr_id = "manager" + member_resp = idrac.invoke_request("{0}?$expand=*($levels=1)".format(MANAGER_BASE), "GET") + response = member_resp.json_data["Members"] + return response, vr_id, rd_version + + +def get_payload_data(each, vr_members, vr_id): + is_change, unsup_media, input_vr_mem = False, None, {} + vr_mem = vr_members[each["index"] - 1] + + if each["insert"]: + exist_vr_mem = dict((k, vr_mem[k]) for k in ["Inserted", "Image", "UserName", "Password"] if vr_mem.get(k) is not None) + input_vr_mem = {"Inserted": each["insert"], "Image": each["image"]} + if each["image"].startswith("//") or each["image"].lower().startswith("https://"): + username, password, domain = each.get("username"), each.get("password"), each.get("domain") + if username is not None: + if domain is not None: + username = "{0}\\{1}".format(domain, username) + input_vr_mem["UserName"] = username + if password is not None: + input_vr_mem["Password"] = password + else: + exist_vr_mem.pop("UserName", None) + exist_vr_mem.pop("Password", None) + + inp_mt = each.get("media_type") + if inp_mt is not None and inp_mt == "CD" and input_vr_mem["Image"][-4:].lower() != ".iso": + unsup_media = each["index"] + if inp_mt is not None and inp_mt == "DVD" and input_vr_mem["Image"][-4:].lower() != ".iso": + unsup_media = each["index"] + if inp_mt is not None and inp_mt == "USBStick" and input_vr_mem["Image"][-4:].lower() != ".img": + unsup_media = each["index"] + + is_change = bool(set(exist_vr_mem.items()) ^ set(input_vr_mem.items())) + else: + if vr_id == "manager": + for vr_v in vr_members: + exist_vr_mem = dict((k, vr_v[k]) for k in ["Inserted"]) + input_vr_mem = {"Inserted": each.get("insert")} + is_change = bool(set(exist_vr_mem.items()) ^ set(input_vr_mem.items())) + if is_change: + vr_mem = vr_v + break + else: + exist_vr_mem = dict((k, vr_mem[k]) for k in ["Inserted"]) + input_vr_mem = {"Inserted": each.get("insert")} + is_change = bool(set(exist_vr_mem.items()) ^ set(input_vr_mem.items())) + + return is_change, input_vr_mem, vr_mem, unsup_media + + +def _validate_params(module, vr_members, rd_version): + image = vr_members.get("image") + if image is not None and (image.startswith("//") or image.startswith("\\\\")): + if vr_members.get("username") is None or vr_members.get("password") is None: + module.fail_json(msg="CIFS share required username and password.") + if image is not None and image.startswith("\\\\"): + vr_members["image"] = image.replace("\\", "/") + if 140 >= int(rd_version) and image is not None: + if (vr_members.get("username") is not None or vr_members.get("password") is not None) and \ + image.startswith("https://"): + module.fail_json(msg=UNSUPPORTED_MSG_HTTPS) + elif image.startswith("\\\\") or image.startswith("//"): + module.fail_json(msg=UNSUPPORTED_MSG) + + +def virtual_media_operation(idrac, module, payload, vr_id): + err_payload, inserted = [], [] + force = module.params["force"] + + for i in payload: + try: + if force and i["vr_mem"]["Inserted"] and i["payload"]["Inserted"]: + idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"], + "POST", data="{}", dump=False) + time.sleep(5) + idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"], + "POST", data=i["payload"]) + elif not force and i["vr_mem"]["Inserted"] and i["payload"]["Inserted"]: + idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"], + "POST", data="{}", dump=False) + time.sleep(5) + idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"], + "POST", data=i["payload"]) + elif not i["vr_mem"]["Inserted"] and i["payload"]["Inserted"]: + idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"], + "POST", data=i["payload"]) + elif i["vr_mem"]["Inserted"] and not i["payload"]["Inserted"]: + idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"], + "POST", data="{}", dump=False) + time.sleep(5) + except Exception as err: + error = json.load(err).get("error") + if vr_id == "manager": + msg_id = error["@Message.ExtendedInfo"][0]["MessageId"] + if "VRM0021" in msg_id or "VRM0012" in msg_id: + uri = i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"] + if "RemovableDisk" in uri: + uri = uri.replace("RemovableDisk", "CD") + elif "CD" in uri: + uri = uri.replace("CD", "RemovableDisk") + idrac.invoke_request(uri, "POST", data="{}", dump=False) + time.sleep(5) + idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"], + "POST", data=i["payload"]) + else: + err_payload.append(error) + else: + err_payload.append(error) + return err_payload + + +def virtual_media(idrac, module, vr_members, vr_id, rd_version): + vr_input = module.params["virtual_media"] + vr_input_copy = copy.deepcopy(vr_input) + vr_index, invalid_idx, manager_idx = [], [], 0 + + for idx, value in enumerate(vr_input_copy, start=1): + if vr_id == "manager": + if value.get("index") is not None: + manager_idx = value["index"] + if value.get("image") is not None and value.get("image")[-4:] == ".img": + value["index"] = 1 + elif value.get("image") is not None and value.get("image")[-4:] == ".iso": + value["index"] = 2 + elif not value["insert"] and value["index"] is None: + value["index"] = idx + else: + if value.get("index") is None: + value["index"] = idx + if value["index"] == 0: + invalid_idx.append(value["index"]) + vr_index.append(value["index"]) + + _validate_params(module, value, rd_version) + + if ((len(set(vr_index)) != len(vr_index)) or (len(vr_members) < max(vr_index)) or invalid_idx) and vr_id == "system": + module.fail_json(msg=INVALID_INDEX) + if (vr_id == "manager") and (1 < manager_idx): + module.fail_json(msg=INVALID_INDEX) + payload, unsupported_media = [], [] + for each in vr_input_copy: + + is_change, ret_payload, action, unsup_media = get_payload_data(each, vr_members, vr_id) + if unsup_media is not None: + unsupported_media.append(unsup_media) + if module.params["force"] and not is_change and each["insert"]: + is_change = True + if is_change: + payload.append({"payload": ret_payload, "vr_mem": action, "input": each}) + + if unsupported_media: + if vr_id == "manager": + module.fail_json(msg=UNSUPPORTED_MEDIA.format("1")) + module.fail_json(msg=UNSUPPORTED_MEDIA.format(", ".join(list(map(str, unsupported_media))))) + + if module.check_mode and payload: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif module.check_mode and not payload: + module.exit_json(msg=NO_CHANGES_FOUND) + elif not module.check_mode and not payload: + module.exit_json(msg=NO_CHANGES_FOUND) + + status = virtual_media_operation(idrac, module, payload, vr_id) + + return status + + +def _validate_image_format(module): + unsup_image = False + for each in module.params["virtual_media"]: + if each["insert"] and each.get("image") is not None and each.get("image")[-4:].lower() not in [".iso", ".img"]: + unsup_image = True + if unsup_image: + module.fail_json(msg=UNSUPPORTED_IMAGE) + + +def main(): + specs = { + "virtual_media": { + "required": True, "type": "list", "elements": "dict", + "options": { + "insert": {"required": True, "type": "bool"}, + "image": {"required": False, "type": "path"}, + "index": {"required": False, "type": "int"}, + "domain": {"required": False, "type": "str"}, + "username": {"required": False, "type": "str"}, + "password": {"required": False, "type": "str", "no_log": True}, + "media_type": {"required": False, "type": "str", "choices": ["CD", "DVD", "USBStick"]}, + }, + "required_if": [["insert", True, ("image", )]], + }, + "force": {"required": False, "type": "bool", "default": False}, + "resource_id": {"required": False, "type": 'str'}, + } + specs.update(idrac_auth_params) + module = AnsibleModule(argument_spec=specs, supports_check_mode=True) + try: + with iDRACRedfishAPI(module.params, req_session=True) as idrac: + vr_media = module.params["virtual_media"] + vr_members, vr_id, rd_version = get_virtual_media_info(idrac) + if (len(vr_media) > len(vr_members) and vr_id == "system") or \ + (len(vr_media) > 1 and vr_id == "manager"): + module.fail_json(msg=EXCEEDED_ERROR) + _validate_image_format(module) + resp = virtual_media(idrac, module, vr_members, vr_id, rd_version) + if resp: + module.fail_json(msg=FAIL_MSG, error_info=resp) + module.exit_json(msg=SUCCESS_MSG, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py new file mode 100644 index 00000000..98235b9d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py @@ -0,0 +1,457 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_active_directory +short_description: Configure Active Directory groups to be used with Directory Services +description: "This module allows to add, modify, and delete OpenManage Enterprise connection with Active Directory +Service." +version_added: "4.0.0" +author: + - Jagadeesh N V(@jagadeeshnv) +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + domain_server: + type: list + elements: str + description: + - Enter the domain name or FQDN or IP address of the domain controller. + - If I(domain_controller_lookup) is C(DNS), enter the domain name to query DNS for the domain controllers. + - "If I(domain_controller_lookup) is C(MANUAL), enter the FQDN or the IP address of the domain controller. + The maximum number of Active Directory servers that can be added is three." + domain_controller_lookup: + type: str + description: + - Select the Domain Controller Lookup method. + choices: + - DNS + - MANUAL + default: DNS + domain_controller_port: + type: int + description: + - Domain controller port. + - By default, Global Catalog Address port number 3269 is populated. + - For the Domain Controller Access, enter 636 as the port number. + - C(NOTE), Only LDAPS ports are supported. + default: 3269 + group_domain: + type: str + description: + - Provide the group domain in the format C(example.com) or C(ou=org, dc=example, dc=com). + id: + type: int + description: + - Provide the ID of the existing Active Directory service connection. + - This is applicable for modification and deletion. + - This is mutually exclusive with I(name). + name: + type: str + description: + - Provide a name for the Active Directory connection. + - This is applicable for creation and deletion. + - This is mutually exclusive with I(name). + network_timeout: + type: int + description: + - Enter the network timeout duration in seconds. + - The supported timeout duration range is 15 to 300 seconds. + default: 120 + search_timeout: + type: int + description: + - Enter the search timeout duration in seconds. + - The supported timeout duration range is 15 to 300 seconds. + default: 120 + state: + type: str + description: + - C(present) allows to create or modify an Active Directory service. + - C(absent) allows to delete a Active Directory service. + choices: + - present + - absent + default: present + test_connection: + type: bool + description: + - Enables testing the connection to the domain controller. + - The connection to the domain controller is tested with the provided Active Directory service details. + - If test fails, module will error out. + - If C(yes), I(domain_username) and I(domain_password) has to be provided. + default: no + domain_password: + type: str + description: + - Provide the domain password. + - This is applicable when I(test_connection) is C(yes). + domain_username: + type: str + description: + - Provide the domain username either in the UPN (username@domain) or NetBIOS (domain\\\\username) format. + - This is applicable when I(test_connection) is C(yes). + validate_certificate: + type: bool + description: + - Enables validation of SSL certificate of the domain controller. + - The module will always report change when this is C(yes). + default: no + certificate_file: + type: path + description: + - Provide the full path of the SSL certificate. + - The certificate should be a Root CA Certificate encoded in Base64 format. + - This is applicable when I(validate_certificate) is C(yes). +requirements: + - "python >= 3.8.6" +notes: + - The module will always report change when I(validate_certificate) is C(yes). + - Run this module from a system that has direct access to OpenManage Enterprise. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Add Active Directory service using DNS lookup along with the test connection + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad1 + domain_server: + - domainname.com + group_domain: domainname.com + test_connection: yes + domain_username: user@domainname + domain_password: domain_password + +- name: Add Active Directory service using IP address of the domain controller with certificate validation + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + domain_controller_lookup: MANUAL + domain_server: + - 192.68.20.181 + group_domain: domainname.com + validate_certificate: yes + certificate_file: "/path/to/certificate/file.cer" + +- name: Modify domain controller IP address, network_timeout and group_domain + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + domain_controller_lookup: MANUAL + domain_server: + - 192.68.20.189 + group_domain: newdomain.in + network_timeout: 150 + +- name: Delete Active Directory service + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + state: absent + +- name: Test connection to existing Active Directory service with certificate validation + dellemc.openmanage.ome_active_directory: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: my_ad2 + test_connection: yes + domain_username: user@domainname + domain_password: domain_password + validate_certificate: yes + certificate_file: "/path/to/certificate/file.cer" +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the Active Directory operation. + returned: always + sample: "Successfully renamed the slot(s)." +active_directory: + type: dict + description: The Active Directory that was added, modified or deleted by this module. + returned: on change + sample: { + "Name": "ad_test", + "Id": 21789, + "ServerType": "MANUAL", + "ServerName": ["192.168.20.181"], + "DnsServer": [], + "GroupDomain": "dellemcdomain.com", + "NetworkTimeOut": 120, + "Password": null, + "SearchTimeOut": 120, + "ServerPort": 3269, + "CertificateValidation": false + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error_info": { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to connect to the LDAP or AD server because the entered credentials are invalid.", + "MessageArgs": [], + "MessageId": "CSEC5002", + "RelatedProperties": [], + "Resolution": "Make sure the server input configuration are valid and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } + } +""" + +import json +import os +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.common.dict_transformations import recursive_diff + +AD_URI = "AccountService/ExternalAccountProvider/ADAccountProvider" +TEST_CONNECTION = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.TestADConnection" +DELETE_AD = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.DeleteExternalAccountProvider" +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +MAX_AD_MSG = "Unable to add the account provider because the maximum number of configurations allowed for an" \ + " Active Directory service is {0}." +CREATE_SUCCESS = "Successfully added the Active Directory service." +MODIFY_SUCCESS = "Successfully modified the Active Directory service." +DELETE_SUCCESS = "Successfully deleted the Active Directory service." +DOM_SERVER_MSG = "Specify the domain server. Domain server is required to create an Active Directory service." +GRP_DOM_MSG = "Specify the group domain. Group domain is required to create an Active Directory service." +CERT_INVALID = "The provided certificate file path is invalid or not readable." +DOMAIN_ALLOWED_COUNT = "Maximum entries allowed for {0} lookup type is {1}." +TEST_CONNECTION_SUCCESS = "Test Connection is successful. " +TEST_CONNECTION_FAIL = "Test Connection has failed. " +ERR_READ_FAIL = "Unable to retrieve the error details." +INVALID_ID = "The provided Active Directory ID is invalid." +TIMEOUT_RANGE = "The {0} value is not in the range of {1} to {2}." +MAX_AD = 2 +MIN_TIMEOUT = 15 +MAX_TIMEOUT = 300 + + +def get_ad(module, rest_obj): + ad = {} + prm = module.params + resp = rest_obj.invoke_request('GET', AD_URI) + ad_list = resp.json_data.get('value') + ad_cnt = len(ad_list) + ky = 'Name' + vl = 'name' + if prm.get('id'): + ky = 'Id' + vl = 'id' + for adx in ad_list: + if str(adx.get(ky)).lower() == str(prm.get(vl)).lower(): + ad = adx + break + return ad, ad_cnt + + +def test_http_error_fail(module, err): + try: + error_info = json.load(err) + err_list = error_info.get('error', {}).get('@Message.ExtendedInfo', [ERR_READ_FAIL]) + if err_list: + err_rsn = err_list[0].get("Message") + except Exception: + err_rsn = ERR_READ_FAIL + module.fail_json(msg="{0}{1}".format(TEST_CONNECTION_FAIL, err_rsn), error_info=error_info) + + +def test_connection(module, rest_obj, create_payload): + try: + create_payload['UserName'] = module.params.get('domain_username') + create_payload['Password'] = module.params.get('domain_password') + rest_obj.invoke_request('POST', TEST_CONNECTION, data=create_payload, + api_timeout=create_payload['NetworkTimeOut']) + create_payload.pop('UserName', None) + create_payload.pop('Password', None) + except HTTPError as err: + test_http_error_fail(module, err) + except SSLError as err: + module.fail_json(msg="{0}{1}".format(TEST_CONNECTION_FAIL, str(err))) + except Exception as err: + module.fail_json(msg="{0}{1}".format(TEST_CONNECTION_FAIL, str(err))) + + +def make_payload(prm): + dc_type = {'DNS': 'DnsServer', 'MANUAL': 'ServerName'} + tmplt_ad = {'name': 'Name', 'domain_controller_port': 'ServerPort', 'domain_controller_lookup': 'ServerType', + 'domain_server': dc_type[prm.get('domain_controller_lookup')], 'group_domain': 'GroupDomain', + 'network_timeout': 'NetworkTimeOut', 'search_timeout': 'SearchTimeOut', + 'validate_certificate': 'CertificateValidation'} + payload = dict([(v, prm.get(k)) for k, v in tmplt_ad.items() if prm.get(k) is not None]) + return payload + + +def validate_n_testconnection(module, rest_obj, payload): + dc_cnt = {'DNS': 1, 'MANUAL': 3} + dc_type = {'DNS': 'DnsServer', 'MANUAL': 'ServerName'} + dc_lookup = payload.get('ServerType') + if len(payload.get(dc_type[dc_lookup])) > dc_cnt[dc_lookup]: + module.fail_json(msg=DOMAIN_ALLOWED_COUNT.format(dc_lookup, dc_cnt[dc_lookup])) + t_list = ['NetworkTimeOut', 'SearchTimeOut'] + for tx in t_list: + if payload.get(tx) not in range(MIN_TIMEOUT, MAX_TIMEOUT + 1): + module.fail_json(msg=TIMEOUT_RANGE.format(tx, MIN_TIMEOUT, MAX_TIMEOUT)) + payload['CertificateFile'] = "" + if payload.get('CertificateValidation'): + cert_path = module.params.get('certificate_file') + if os.path.exists(cert_path): + with open(cert_path, 'r') as certfile: + cert_data = certfile.read() + payload['CertificateFile'] = cert_data + else: + module.fail_json(msg=CERT_INVALID) + msg = "" + if module.params.get('test_connection'): + test_connection(module, rest_obj, payload) + msg = TEST_CONNECTION_SUCCESS + return msg + + +def create_ad(module, rest_obj): + prm = module.params + if not prm.get('domain_server'): + module.fail_json(msg=DOM_SERVER_MSG) + if not prm.get('group_domain'): + module.fail_json(msg=GRP_DOM_MSG) + create_payload = make_payload(prm) + msg = validate_n_testconnection(module, rest_obj, create_payload) + if module.check_mode: + module.exit_json(msg="{0}{1}".format(msg, CHANGES_FOUND), changed=True) + resp = rest_obj.invoke_request('POST', AD_URI, data=create_payload) + ad = resp.json_data + ad.pop('CertificateFile', "") + module.exit_json(msg="{0}{1}".format(msg, CREATE_SUCCESS), active_directory=ad, changed=True) + + +def modify_ad(module, rest_obj, ad): + prm = module.params + modify_payload = make_payload(prm) + ad = rest_obj.strip_substr_dict(ad) + if ad.get('ServerName'): + (ad.get('ServerName')).sort() + if modify_payload.get('ServerName'): + (modify_payload.get('ServerName')).sort() + diff = recursive_diff(modify_payload, ad) + is_change = False + if diff: + if diff[0]: + is_change = True + ad.update(modify_payload) + msg = validate_n_testconnection(module, rest_obj, ad) + if not is_change and not ad.get('CertificateValidation'): + module.exit_json(msg="{0}{1}".format(msg, NO_CHANGES_MSG), active_directory=ad) + if module.check_mode: + module.exit_json(msg="{0}{1}".format(msg, CHANGES_FOUND), changed=True) + resp = rest_obj.invoke_request('PUT', "{0}({1})".format(AD_URI, ad['Id']), data=ad) + ad = resp.json_data + ad.pop('CertificateFile', "") + module.exit_json(msg="{0}{1}".format(msg, MODIFY_SUCCESS), active_directory=ad, changed=True) + + +def delete_ad(module, rest_obj, ad): + ad = rest_obj.strip_substr_dict(ad) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, active_directory=ad, changed=True) + resp = rest_obj.invoke_request('POST', DELETE_AD, data={"AccountProviderIds": [int(ad['Id'])]}) + module.exit_json(msg=DELETE_SUCCESS, active_directory=ad, changed=True) + + +def main(): + specs = { + "state": {"type": 'str', "choices": ["present", "absent"], "default": 'present'}, + "name": {"type": 'str'}, + "id": {"type": 'int'}, + "domain_controller_lookup": {"type": 'str', "choices": ['MANUAL', 'DNS'], "default": 'DNS'}, + "domain_server": {"type": 'list', "elements": 'str'}, + "group_domain": {"type": 'str'}, + "domain_controller_port": {"type": 'int', "default": 3269}, + "network_timeout": {"type": 'int', "default": 120}, + "search_timeout": {"type": 'int', "default": 120}, + "validate_certificate": {"type": 'bool', "default": False}, + "certificate_file": {"type": 'path'}, + "test_connection": {"type": 'bool', "default": False}, + "domain_username": {"type": 'str'}, + "domain_password": {"type": 'str', "no_log": True} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[('name', 'id')], + required_if=[ + ('test_connection', True, ('domain_username', 'domain_password',)), + ('validate_certificate', True, ('certificate_file',))], + mutually_exclusive=[('name', 'id')], + supports_check_mode=True) + try: + with RestOME(module.params, req_session=True) as rest_obj: + ad, ad_cnt = get_ad(module, rest_obj) + if module.params.get('state') == 'present': + if ad: + modify_ad(module, rest_obj, ad) + else: + if module.params.get('id'): + module.fail_json(msg=INVALID_ID) + if ad_cnt < MAX_AD: + create_ad(module, rest_obj) + module.fail_json(msg=MAX_AD_MSG.format(MAX_AD)) + else: + if ad: + delete_ad(module, rest_obj, ad) + module.exit_json(msg=NO_CHANGES_MSG) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except ( + IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, + OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py new file mode 100644 index 00000000..66a8b26c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_alerts_smtp +short_description: This module allows to configure SMTP or email configurations +version_added: "4.3.0" +description: + - This module allows to configure SMTP or email configurations on OpenManage Enterprise + and OpenManage Enterprise Modular. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + destination_address: + description: The IP address or FQDN of the SMTP destination server. + type: str + required: true + port_number: + description: The port number of the SMTP destination server. + type: int + use_ssl: + description: Use SSL to connect with the SMTP server. + type: bool + enable_authentication: + description: + - Enable or disable authentication to access the SMTP server. + - The I(credentials) are mandatory if I(enable_authentication) is C(True). + - The module will always report change when this is C(True). + type: bool + required: true + credentials: + description: The credentials for the SMTP server + type: dict + suboptions: + username: + description: + - The username to access the SMTP server. + type: str + required: true + password: + description: + - The password to access the SMTP server. + type: str + required: true +requirements: + - "python >= 3.8.6" +notes: + - The module will always report change when I(enable_authentication) is C(True). + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise + or OpenManage Enterprise Modular. + - This module support C(check_mode). +author: + - Sachin Apagundi(@sachin-apa) +''' + +EXAMPLES = """ +--- +- name: Update SMTP destination server configuration with authentication + dellemc.openmanage.ome_application_alerts_smtp: + hostname: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination_address: "localhost" + port_number: 25 + use_ssl: true + enable_authentication: true + credentials: + username: "username" + password: "password" +- name: Update SMTP destination server configuration without authentication + dellemc.openmanage.ome_application_alerts_smtp: + hostname: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination_address: "localhost" + port_number: 25 + use_ssl: false + enable_authentication: false +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the SMTP settings update. + returned: always + sample: "Successfully updated the SMTP settings." +smtp_details: + type: dict + description: returned when SMTP settings are updated successfully. + returned: success + sample: { + "DestinationAddress": "localhost", + "PortNumber": 25, + "UseCredentials": true, + "UseSSL": false, + "Credential": { + "User": "admin", + "Password": null + } + } + +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [{ + "MessageId": "CAPP1106", + "RelatedProperties": [], + "Message": "Unable to update the SMTP settings because the entered credential is invalid or empty.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Either enter valid credentials or disable the Use Credentials option and retry the operation." + } + ] + } + } +""" + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.common.dict_transformations import recursive_diff + +SUCCESS_MSG = "Successfully updated the SMTP settings." +SMTP_URL = "AlertService/AlertDestinations/SMTPConfiguration" +NO_CHANGES = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." + + +def fetch_smtp_settings(rest_obj): + final_resp = rest_obj.invoke_request("GET", SMTP_URL) + ret_data = final_resp.json_data.get('value')[0] + ret_data.pop("@odata.type") + return ret_data + + +def update_smtp_settings(rest_obj, payload): + final_resp = rest_obj.invoke_request("POST", SMTP_URL, data=payload) + return final_resp + + +def update_payload(module, curr_payload): + smtp_data_payload = { + "DestinationAddress": get_value(module, curr_payload, "destination_address", "DestinationAddress"), + "UseCredentials": get_value(module, curr_payload, "enable_authentication", "UseCredentials"), + "PortNumber": get_value(module, curr_payload, "port_number", "PortNumber"), + "UseSSL": get_value(module, curr_payload, "use_ssl", "UseSSL") + } + if module.params.get("credentials") and smtp_data_payload.get("UseCredentials"): + cred_payload = { + "Credential": { + "User": module.params.get("credentials").get("username"), + "Password": module.params.get("credentials").get("password") + } + } + smtp_data_payload.update(cred_payload) + return smtp_data_payload + + +def get_value(module, resp, mod_key, attr_key): + ret_value = module.params.get(mod_key) + if module.params.get(mod_key) is None: + ret_value = resp.get(attr_key) + return ret_value + + +def _diff_payload(curr_resp, update_resp): + is_change = False + if update_resp: + diff = recursive_diff(update_resp, curr_resp) + if diff and diff[0]: + is_change = True + return is_change + + +def password_no_log(attributes): + if isinstance(attributes, dict) and 'password' in attributes: + attributes['password'] = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + + +def fail_module(module, **failmsg): + password_no_log(module.params.get("credentials")) + module.fail_json(**failmsg) + + +def exit_module(module, **existmsg): + password_no_log(module.params.get("credentials")) + module.exit_json(**existmsg) + + +def process_check_mode(module, diff): + if not diff and not module.check_mode: + exit_module(module, msg=NO_CHANGES) + elif not diff and module.check_mode: + exit_module(module, msg=NO_CHANGES) + elif diff and module.check_mode: + exit_module(module, msg=CHANGES_FOUND, changed=True) + + +def main(): + credentials_options = {"username": {"type": "str", "required": True}, + "password": {"type": "str", "required": True, "no_log": True}} + + specs = { + "destination_address": {"required": True, "type": "str"}, + "port_number": {"required": False, "type": "int"}, + "use_ssl": {"required": False, "type": "bool"}, + "enable_authentication": {"required": True, "type": "bool"}, + "credentials": + {"required": False, "type": "dict", + "options": credentials_options, + }, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[['enable_authentication', True, ['credentials']], ], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + curr_resp = fetch_smtp_settings(rest_obj) + payload = update_payload(module, curr_resp) + diff = _diff_payload(curr_resp, payload) + process_check_mode(module, diff) + resp = update_smtp_settings(rest_obj, payload) + exit_module(module, msg=SUCCESS_MSG, + smtp_details=resp.json_data, changed=True) + + except HTTPError as err: + fail_module(module, msg=str(err), error_info=json.load(err)) + except URLError as err: + exit_module(module, msg=str(err), unreachable=True) + except ( + IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, + OSError) as err: + fail_module(module, msg=str(err), error_info=json.load(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py new file mode 100644 index 00000000..12c21245 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_application_alerts_syslog +short_description: Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular +description: This module allows to configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular. +version_added: 4.3.0 +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + syslog_servers: + description: List of servers to forward syslog. + type: list + elements: dict + suboptions: + id: + description: The ID of the syslog server. + type: int + choices: [1, 2, 3, 4] + required: True + enabled: + description: Enable or disable syslog forwarding. + type: bool + destination_address: + description: + - The IP address, FQDN or hostname of the syslog server. + - This is required if I(enabled) is C(True). + type: str + port_number: + description: The UDP port number of the syslog server. + type: int +requirements: + - "python >= 3.8.6" +author: + - Jagadeesh N V(@jagadeeshnv) +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or Dell EMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Configure single server to forward syslog + dellemc.openmanage.ome_application_alerts_syslog: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + syslog_servers: + - id: 1 + enabled: true + destination_address: 192.168.0.2 + port_number: 514 + +- name: Configure multiple server to forward syslog + dellemc.openmanage.ome_application_alerts_syslog: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + syslog_servers: + - id: 1 + port_number: 523 + - id: 2 + enabled: true + destination_address: sysloghost1.lab.com + - id: 3 + enabled: false + - id: 4 + enabled: true + destination_address: 192.168.0.4 + port_number: 514 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the syslog forwarding operation. + returned: always + sample: Successfully updated the syslog forwarding settings. +syslog_details: + type: list + description: Syslog forwarding settings list applied. + returned: on success + sample: [ + { + "DestinationAddress": "192.168.10.43", + "Enabled": false, + "Id": 1, + "PortNumber": 514 + }, + { + "DestinationAddress": "192.168.10.46", + "Enabled": true, + "Id": 2, + "PortNumber": 514 + }, + { + "DestinationAddress": "192.168.10.44", + "Enabled": true, + "Id": 3, + "PortNumber": 514 + }, + { + "DestinationAddress": "192.168.10.42", + "Enabled": true, + "Id": 4, + "PortNumber": 515 + } + ] +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CAPP1108", + "RelatedProperties": [], + "Message": "Unable to update the Syslog settings because the request contains an invalid number of + configurations. The request must contain no more than 4 configurations but contains 5.", + "MessageArgs": [ + "4", + "5" + ], + "Severity": "Warning", + "Resolution": "Enter only the required number of configurations as identified in the message and + retry the operation." + } + ] + } +} +""" + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.common.dict_transformations import recursive_diff +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +SYSLOG_GET = "AlertService/AlertDestinations/SyslogConfiguration" +SYSLOG_SET = "AlertService/AlertDestinations/Actions/AlertDestinations.ApplySyslogConfig" +SUCCESS_MSG = "Successfully updated the syslog forwarding settings." +DUP_ID_MSG = "Duplicate server IDs are provided." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +SYSLOG_UDP = 514 + + +def validate_input(module): + mparams = module.params + syslog_list = mparams.get("syslog_servers") + if not syslog_list: + module.exit_json(msg=NO_CHANGES_MSG) + syslog_dict = {} + for sys in syslog_list: + trim_sys = dict((k, v) for k, v in sys.items() if v is not None) + syslog_dict[sys.get('id')] = snake_dict_to_camel_dict(trim_sys, capitalize_first=True) + if len(syslog_dict) < len(syslog_list): + module.exit_json(msg=DUP_ID_MSG, failed=True) + return syslog_dict + + +def strip_substr_dict(odata_dict, chkstr='@odata.'): + cp = odata_dict.copy() + klist = cp.keys() + for k in klist: + if chkstr in str(k).lower(): + odata_dict.pop(k) + if not odata_dict.get('PortNumber'): + odata_dict['PortNumber'] = SYSLOG_UDP + return odata_dict + + +def get_current_syslog(rest_obj): + resp = rest_obj.invoke_request("GET", SYSLOG_GET) + syslog_list = resp.json_data.get('value') + return syslog_list + + +def compare_get_payload(module, current_list, input_config): + payload_list = [strip_substr_dict(sys) for sys in current_list] # preserving list order + current_config = dict([(sys.get('Id'), sys) for sys in payload_list]) + diff = 0 + for k, v in current_config.items(): + i_dict = input_config.get(k) + if i_dict: + d = recursive_diff(i_dict, v) + if d and d[0]: + v.update(d[0]) + diff = diff + 1 + v.pop("Id", None) # not mandatory + payload_list[int(k) - 1] = v # The order in list needs to be maintained + if not diff: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + return payload_list + + +def main(): + specs = { + "syslog_servers": + {"type": 'list', "elements": 'dict', "options": + {"id": {"type": 'int', "choices": [1, 2, 3, 4], "required": True}, + "enabled": {"type": 'bool'}, + "destination_address": {"type": 'str'}, + "port_number": {"type": 'int'} + }, + "required_one_of": [("enabled", "destination_address", "port_number")], + "required_if": [("enabled", True, ("destination_address",))] + } + } + specs.update(ome_auth_params) + + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + input_config = validate_input(module) + current_list = get_current_syslog(rest_obj) + payload = compare_get_payload(module, current_list, input_config) + resp = rest_obj.invoke_request("POST", SYSLOG_SET, data=payload, api_timeout=120) + # POST Call taking average 50-60 seconds so api_timeout=120 + module.exit_json(msg=SUCCESS_MSG, syslog_details=resp.json_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except ( + IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, + OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py new file mode 100644 index 00000000..3c9b2699 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_certificate +short_description: This module allows to generate a CSR and upload the certificate +version_added: "2.1.0" +description: + - This module allows the generation a new certificate signing request (CSR) and to upload the certificate + on OpenManage Enterprise. +notes: + - If a certificate is uploaded, which is identical to an already existing certificate, it is accepted by the module. + - This module does not support C(check_mode). +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + command: + description: C(generate_csr) allows the generation of a CSR and C(upload) uploads the certificate. + type: str + default: generate_csr + choices: [generate_csr, upload] + distinguished_name: + description: Name of the certificate issuer. This option is applicable for C(generate_csr). + type: str + department_name: + description: Name of the department that issued the certificate. This option is applicable for C(generate_csr). + type: str + business_name: + description: Name of the business that issued the certificate. This option is applicable for C(generate_csr). + type: str + locality: + description: Local address of the issuer of the certificate. This option is applicable for C(generate_csr). + type: str + country_state: + description: State in which the issuer resides. This option is applicable for C(generate_csr). + type: str + country: + description: Country in which the issuer resides. This option is applicable for C(generate_csr). + type: str + email: + description: Email associated with the issuer. This option is applicable for C(generate_csr). + type: str + upload_file: + type: str + description: Local path of the certificate file to be uploaded. This option is applicable for C(upload). + Once the certificate is uploaded, OpenManage Enterprise cannot be accessed for a few seconds. +requirements: + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +''' + +EXAMPLES = r''' +--- +- name: Generate a certificate signing request + dellemc.openmanage.ome_application_certificate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "generate_csr" + distinguished_name: "hostname.com" + department_name: "Remote Access Group" + business_name: "Dell Inc." + locality: "Round Rock" + country_state: "Texas" + country: "US" + email: "support@dell.com" + +- name: Upload the certificate + dellemc.openmanage.ome_application_certificate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "upload" + upload_file: "/path/certificate.cer" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the certificate signing request. + returned: always + sample: "Successfully generated certificate signing request." +csr_status: + type: dict + description: Details of the generated certificate. + returned: on success + sample: + {"CertificateData": "-----BEGIN CERTIFICATE REQUEST-----GHFSUEKLELE + af3u4h2rkdkfjasczjfefhkrr/frjrfrjfrxnvzklf/nbcvxmzvndlskmcvbmzkdk + kafhaksksvklhfdjtrhhffgeth/tashdrfstkm@kdjFGD/sdlefrujjfvvsfeikdf + yeufghdkatbavfdomehtdnske/tahndfavdtdfgeikjlagmdfbandfvfcrfgdtwxc + qwgfrteyupojmnsbajdkdbfs/ujdfgthedsygtamnsuhakmanfuarweyuiwruefjr + etwuwurefefgfgurkjkdmbvfmvfvfk==-----END CERTIFICATE REQUEST-----" + } +error_info: + description: Details of the HTTP error. + returned: on HTTP error + type: dict + sample: + { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CSEC9002", + "RelatedProperties": [], + "Message": "Unable to upload the certificate because the certificate file provided is invalid.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Make sure the CA certificate and private key are correct and retry the operation." + } + ] + } + } +''' + +import json +import os +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def get_resource_parameters(module): + command = module.params["command"] + csr_uri = "ApplicationService/Actions/ApplicationService.{0}" + method = "POST" + if command == "generate_csr": + uri = csr_uri.format("GenerateCSR") + payload = {"DistinguishedName": module.params["distinguished_name"], + "DepartmentName": module.params["department_name"], + "BusinessName": module.params["business_name"], + "Locality": module.params["locality"], "State": module.params["country_state"], + "Country": module.params["country"], "Email": module.params["email"]} + else: + file_path = module.params["upload_file"] + uri = csr_uri.format("UploadCertificate") + if os.path.exists(file_path): + with open(file_path, 'rb') as payload: + payload = payload.read() + else: + module.fail_json(msg="No such file or directory.") + return method, uri, payload + + +def main(): + specs = { + "command": {"type": "str", "required": False, + "choices": ["generate_csr", "upload"], "default": "generate_csr"}, + "distinguished_name": {"required": False, "type": "str"}, + "department_name": {"required": False, "type": "str"}, + "business_name": {"required": False, "type": "str"}, + "locality": {"required": False, "type": "str"}, + "country_state": {"required": False, "type": "str"}, + "country": {"required": False, "type": "str"}, + "email": {"required": False, "type": "str"}, + "upload_file": {"required": False, "type": "str"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[["command", "generate_csr", ["distinguished_name", "department_name", + "business_name", "locality", "country_state", + "country", "email"]], + ["command", "upload", ["upload_file"]]], + supports_check_mode=False + ) + header = {"Content-Type": "application/octet-stream", "Accept": "application/octet-stream"} + try: + with RestOME(module.params, req_session=False) as rest_obj: + method, uri, payload = get_resource_parameters(module) + command = module.params.get("command") + dump = False if command == "upload" else True + headers = header if command == "upload" else None + resp = rest_obj.invoke_request(method, uri, headers=headers, data=payload, dump=dump) + if resp.success: + if command == "generate_csr": + module.exit_json(msg="Successfully generated certificate signing request.", + csr_status=resp.json_data) + module.exit_json(msg="Successfully uploaded application certificate.", changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py new file mode 100644 index 00000000..67b00dc8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py @@ -0,0 +1,669 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_console_preferences +short_description: Configure console preferences on OpenManage Enterprise. +description: This module allows user to configure the console preferences on OpenManage Enterprise. +version_added: "5.2.0" +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + report_row_limit: + description: The maximum number of rows that you can view on OpenManage Enterprise reports. + type: int + device_health: + description: The time after which the health of the devices must be automatically monitored and updated + on the OpenManage Enterprise dashboard. + type: dict + suboptions: + health_check_interval: + description: The frequency at which the device health must be recorded and data stored. + type: int + health_check_interval_unit: + description: + - The time unit of the frequency at which the device health must be recorded and data stored. + - C(Hourly) to set the frequency in hours. + - C(Minutes) to set the frequency in minutes. + type: str + choices: [Hourly, Minutes] + health_and_power_state_on_connection_lost: + description: + - The latest recorded device health. + - C(last_known) to display the latest recorded device health when the power connection was lost. + - C(unknown) to display the latest recorded device health when the device status moved to unknown. + type: str + choices: [last_known, unknown] + discovery_settings: + description: The device naming to be used by the OpenManage Enterprise to identify the discovered iDRACs + and other devices. + type: dict + suboptions: + general_device_naming: + description: + - Applicable to all the discovered devices other than the iDRACs. + - C(DNS) to use the DNS name. + - C(NETBIOS) to use the NetBIOS name. + type: str + choices: [DNS, NETBIOS] + default: DNS + server_device_naming: + description: + - Applicable to iDRACs only. + - C(IDRAC_HOSTNAME) to use the iDRAC hostname. + - C(IDRAC_SYSTEM_HOSTNAME) to use the system hostname. + type: str + choices: [IDRAC_HOSTNAME, IDRAC_SYSTEM_HOSTNAME] + default: IDRAC_SYSTEM_HOSTNAME + invalid_device_hostname: + description: The invalid hostnames separated by a comma. + type: str + common_mac_addresses: + description: The common MAC addresses separated by a comma. + type: str + server_initiated_discovery: + description: Server initiated discovery settings. + type: dict + suboptions: + device_discovery_approval_policy: + description: + - Discovery approval policies. + - "C(Automatic) allows servers with iDRAC Firmware version 4.00.00.00, which are on the same network as the + console, to be discovered automatically by the console." + - C(Manual) for the servers to be discovered by the user manually. + type: str + choices: [Automatic, Manual] + set_trap_destination: + description: Trap destination settings. + type: bool + mx7000_onboarding_preferences: + description: + - Alert-forwarding behavior on chassis when they are onboarded. + - C(all) to receive all alert. + - C(chassis) to receive chassis category alerts only. + type: str + choices: [all, chassis] + builtin_appliance_share: + description: The external network share that the appliance must access to complete operations. + type: dict + suboptions: + share_options: + description: + - The share options. + - C(CIFS) to select CIFS share type. + - C(HTTPS) to select HTTPS share type. + type: str + choices: [CIFS, HTTPS] + cifs_options: + description: + - The SMB protocol version. + - I(cifs_options) is required I(share_options) is C(CIFS). + - C(V1) to enable SMBv1. + - C(V2) to enable SMBv2 + type: str + choices: [V1, V2] + email_sender_settings: + description: The email address of the user who is sending an email message. + type: str + trap_forwarding_format: + description: + - The trap forwarding format. + - C(Original) to retain the trap data as is. + - C(Normalized) to normalize the trap data. + type: str + choices: [Original, Normalized] + metrics_collection_settings: + description: The frequency of the PowerManager extension data maintenance and purging. + type: int +requirements: + - "python >= 3.8.6" +notes: + - This module supports C(check_mode). +author: + - Sachin Apagundi(@sachin-apa) + - Husniya Hameed (@husniya-hameed) +''' + +EXAMPLES = r''' +--- +- name: Update Console preferences with all the settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + report_row_limit: 123 + device_health: + health_check_interval: 1 + health_check_interval_unit: Hourly + health_and_power_state_on_connection_lost: last_known + discovery_settings: + general_device_naming: DNS + server_device_naming: IDRAC_HOSTNAME + invalid_device_hostname: "localhost" + common_mac_addresses: "::" + server_initiated_discovery: + device_discovery_approval_policy: Automatic + set_trap_destination: True + mx7000_onboarding_preferences: all + builtin_appliance_share: + share_options: CIFS + cifs_options: V1 + email_sender_settings: "admin@dell.com" + trap_forwarding_format: Normalized + metrics_collection_settings: 31 + +- name: Update Console preferences with report and device health settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + report_row_limit: 236 + device_health: + health_check_interval: 10 + health_check_interval_unit: Hourly + health_and_power_state_on_connection_lost: last_known + +- name: Update Console preferences with invalid device health settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_health: + health_check_interval: 65 + health_check_interval_unit: Minutes + +- name: Update Console preferences with discovery and built in appliance share settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_settings: + general_device_naming: DNS + server_device_naming: IDRAC_SYSTEM_HOSTNAME + invalid_device_hostname: "localhost" + common_mac_addresses: "00:53:45:00:00:00" + builtin_appliance_share: + share_options: CIFS + cifs_options: V1 + +- name: Update Console preferences with server initiated discovery, mx7000 onboarding preferences, email sender, + trap forwarding format, and metrics collection settings. + dellemc.openmanage.ome_application_console_preferences: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + server_initiated_discovery: + device_discovery_approval_policy: Automatic + set_trap_destination: True + mx7000_onboarding_preferences: chassis + email_sender_settings: "admin@dell.com" + trap_forwarding_format: Original + metrics_collection_settings: 365 +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the console preferences. + returned: always + sample: "Successfully update the console preferences." +console_preferences: + type: list + description: Details of the console preferences. + returned: on success + sample: + [ + { + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "SLOT_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + }, + { + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost,localhost.localdomain,not defined,pv132t,pv136t,default,dell,idrac-", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + }, + { + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "00:53:45:00:00:00,33:50:6F:45:30:30,50:50:54:50:30:30,00:00:FF:FF:FF:FF,20:41:53:59:4E:FF,00:00:00:00:00:00,20:41:53:59:4e:ff,00:00:00:00:00:00", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + }, + { + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS" + }, + { + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "3650000", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + { + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING" + }, + { + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V1", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS" + }, + { + "Name": "ALERT_ACKNOWLEDGEMENT_VIEW", + "DefaultValue": "2000", + "Value": "2000", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + { + "Name": "AUTO_CONSOLE_UPDATE_AFTER_DOWNLOAD", + "DefaultValue": "false", + "Value": "false", + "DataType": "java.lang.Boolean", + "GroupName": "CONSOLE_UPDATE_SETTING_GROUP" + }, + { + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "false", + "DataType": "java.lang.Boolean", + "GroupName": "" + }, + { + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "2000000000000000000000000", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + { + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin1@dell.com@dell.com@dell.com", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "test_chassis", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic_test", + "DataType": "java.lang.String", + "GroupName": "" + } + ] +error_info: + description: Details of the HTTP error. + returned: on HTTP error + type: dict + sample: + { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CGEN1006", + "RelatedProperties": [], + "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Enter a valid URI and retry the operation." + } + ] + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict + +SUCCESS_MSG = "Successfully updated the Console Preferences settings." +SETTINGS_URL = "ApplicationService/Settings" +NO_CHANGES = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +HEALTH_CHECK_UNIT_REQUIRED = "The health check unit is required when health check interval is specified." +HEALTH_CHECK_INTERVAL_REQUIRED = "The health check interval is required when health check unit is specified." +HEALTH_CHECK_INTERVAL_INVALID = "The health check interval specified is invalid for the {0}" +JOB_URL = "JobService/Jobs" +CIFS_URL = "ApplicationService/Actions/ApplicationService.UpdateShareTypeSettings" +CONSOLE_SETTINGS_VALUES = ["DATA_PURGE_INTERVAL", "EMAIL_SENDER", "TRAP_FORWARDING_SETTING", + "MX7000_ONBOARDING_PREF", "REPORTS_MAX_RESULTS_LIMIT", + "DISCOVERY_APPROVAL_POLICY", "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DEVICE_PREFERRED_NAME", "INVALID_DEVICE_HOSTNAME", "COMMON_MAC_ADDRESSES", + "CONSOLE_CONNECTION_SETTING", "MIN_PROTOCOL_VERSION", "SHARE_TYPE"] + + +def job_details(rest_obj): + query_param = {"$filter": "JobType/Id eq 6"} + job_resp = rest_obj.invoke_request("GET", JOB_URL, query_param=query_param) + job_data = job_resp.json_data.get('value') + tmp_list = [x["Id"] for x in job_data] + sorted_id = sorted(tmp_list) + latest_job = [val for val in job_data if val["Id"] == sorted_id[-1]] + return latest_job[0] + + +def create_job(module): + schedule = None + job_payload = None + device_health = module.params.get("device_health") + if device_health: + if device_health.get("health_check_interval_unit") == "Hourly": + schedule = "0 0 0/" + str(device_health.get("health_check_interval")) + " 1/1 * ? *" + elif device_health.get("health_check_interval_unit") == "Minutes": + schedule = "0 0/" + str(device_health.get("health_check_interval")) + " * 1/1 * ? *" + job_payload = {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": schedule, + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]} + return job_payload, schedule + + +def fetch_cp_settings(rest_obj): + final_resp = rest_obj.invoke_request("GET", SETTINGS_URL) + ret_data = final_resp.json_data.get('value') + return ret_data + + +def create_payload_dict(curr_payload): + payload = {} + for pay in curr_payload: + payload[pay["Name"]] = pay + return payload + + +def create_payload(module, curr_payload): + console_setting_list = [] + updated_payload = {"ConsoleSetting": []} + payload_dict = create_payload_dict(curr_payload) + get_sid = module.params.get("server_initiated_discovery") + get_ds = module.params.get("discovery_settings") + get_mcs = module.params.get("metrics_collection_settings") + get_email = module.params.get("email_sender_settings") + get_tff = module.params.get("trap_forwarding_format") + get_mx = module.params.get("mx7000_onboarding_preferences") + get_rrl = module.params.get("report_row_limit") + get_dh = module.params.get("device_health") + get_bas = module.params.get("builtin_appliance_share") + if get_mcs: + payload1 = payload_dict["DATA_PURGE_INTERVAL"].copy() + payload1["Value"] = get_mcs + console_setting_list.append(payload1) + if get_email: + payload2 = payload_dict["EMAIL_SENDER"].copy() + payload2["Value"] = get_email + console_setting_list.append(payload2) + if get_tff: + dict1 = {"Original": "AsIs", "Normalized": "Normalized"} + payload3 = payload_dict["TRAP_FORWARDING_SETTING"].copy() + payload3["Value"] = dict1.get(get_tff) + console_setting_list.append(payload3) + if get_mx: + payload4 = payload_dict["MX7000_ONBOARDING_PREF"].copy() + payload4["Value"] = get_mx + console_setting_list.append(payload4) + if get_rrl: + payload5 = payload_dict["REPORTS_MAX_RESULTS_LIMIT"].copy() + payload5["Value"] = get_rrl + console_setting_list.append(payload5) + if get_sid: + if get_sid.get("device_discovery_approval_policy"): + payload6 = payload_dict["DISCOVERY_APPROVAL_POLICY"].copy() + payload6["Value"] = get_sid.get("device_discovery_approval_policy") + console_setting_list.append(payload6) + if get_sid.get("set_trap_destination") is not None: + payload7 = payload_dict["NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION"].copy() + payload7["Value"] = get_sid.get("set_trap_destination") + console_setting_list.append(payload7) + if get_ds: + if get_ds.get("general_device_naming") and get_ds.get("server_device_naming"): + value = "PREFER_" + module.params["discovery_settings"]["general_device_naming"] + "," + "PREFER_" +\ + get_ds["server_device_naming"] + payload8 = payload_dict["DEVICE_PREFERRED_NAME"].copy() + payload8["Value"] = value + console_setting_list.append(payload8) + elif get_ds.get("general_device_naming"): + payload9 = payload_dict["DEVICE_PREFERRED_NAME"].copy() + payload9["Value"] = "PREFER_" + get_ds["general_device_naming"] + console_setting_list.append(payload9) + elif get_ds.get("server_device_naming"): + payload10 = payload_dict["DEVICE_PREFERRED_NAME"].copy() + payload10["Value"] = "PREFER_" + get_ds["server_device_naming"] + console_setting_list.append(payload10) + if get_ds.get("invalid_device_hostname"): + payload11 = payload_dict["INVALID_DEVICE_HOSTNAME"].copy() + payload11["Value"] = get_ds.get("invalid_device_hostname") + console_setting_list.append(payload11) + if get_ds.get("common_mac_addresses"): + payload12 = payload_dict["COMMON_MAC_ADDRESSES"].copy() + payload12["Value"] = get_ds.get("common_mac_addresses") + console_setting_list.append(payload12) + if get_dh and get_dh.get("health_and_power_state_on_connection_lost"): + payload13 = payload_dict["CONSOLE_CONNECTION_SETTING"].copy() + payload13["Value"] = get_dh.get("health_and_power_state_on_connection_lost") + console_setting_list.append(payload13) + if get_bas and get_bas.get("share_options") == "CIFS": + payload14 = payload_dict["MIN_PROTOCOL_VERSION"].copy() + payload14["Value"] = get_bas.get("cifs_options") + console_setting_list.append(payload14) + updated_payload["ConsoleSetting"] = console_setting_list + return updated_payload, payload_dict + + +def create_cifs_payload(module, curr_payload): + console_setting_list = [] + updated_payload = {"ConsoleSetting": []} + payload_dict = create_payload_dict(curr_payload) + get_bas = module.params.get("builtin_appliance_share") + if get_bas and get_bas.get("share_options"): + payload = payload_dict["SHARE_TYPE"].copy() + payload["Value"] = get_bas.get("share_options") + console_setting_list.append(payload) + updated_payload["ConsoleSetting"] = console_setting_list + return updated_payload + + +def update_console_preferences(module, rest_obj, payload, payload_cifs, job_payload, job, payload_dict, schedule): + cifs_resp = None + job_final_resp = None + get_bas = module.params.get("builtin_appliance_share") + device_health = module.params.get("device_health") + [payload["ConsoleSetting"].remove(i) for i in payload["ConsoleSetting"] if i["Name"] == "SHARE_TYPE"] + if device_health and device_health.get("health_check_interval_unit") and job["Schedule"] != schedule: + job_final_resp = rest_obj.invoke_request("POST", JOB_URL, data=job_payload) + if get_bas and get_bas.get("share_options") and payload_dict["SHARE_TYPE"]["Value"] != \ + get_bas.get("share_options"): + cifs_resp = rest_obj.invoke_request("POST", CIFS_URL, data=payload_cifs) + final_resp = rest_obj.invoke_request("POST", SETTINGS_URL, data=payload) + return final_resp, cifs_resp, job_final_resp + + +def _diff_payload(curr_resp, update_resp, payload_cifs, schedule, job_det): + diff = 0 + update_resp["ConsoleSetting"].extend(payload_cifs["ConsoleSetting"]) + if schedule and job_det["Schedule"] != schedule: + diff += 1 + for i in curr_resp: + for j in update_resp["ConsoleSetting"]: + if i["Name"] == j["Name"]: + if isinstance(j["Value"], bool): + j["Value"] = str(j["Value"]).lower() + if isinstance(j["Value"], int): + j["Value"] = str(j["Value"]) + if i["Value"] != j["Value"]: + diff += 1 + return diff + + +def process_check_mode(module, diff): + if not diff: + module.exit_json(msg=NO_CHANGES) + elif diff and module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + + +def _validate_params(module): + error_message = _validate_health_check_interval(module) + if error_message: + module.fail_json(msg=error_message) + + +def _validate_health_check_interval(module): + error_message = None + device_health = module.params.get("device_health") + if device_health: + hci = device_health.get("health_check_interval") + hciu = device_health.get("health_check_interval_unit") + if hci and not hciu: + error_message = HEALTH_CHECK_UNIT_REQUIRED + if hciu and not hci: + error_message = HEALTH_CHECK_INTERVAL_REQUIRED + if hciu and hci: + if hciu == "Hourly" and (hci < 1 or hci > 23): + error_message = HEALTH_CHECK_INTERVAL_INVALID.format(hciu) + if hciu == "Minutes" and (hci < 1 or hci > 59): + error_message = HEALTH_CHECK_INTERVAL_INVALID.format(hciu) + return error_message + + +def main(): + device_health_opt = {"health_check_interval": {"type": "int", "required": False}, + "health_check_interval_unit": {"type": "str", "required": False, + "choices": ["Hourly", "Minutes"]}, + "health_and_power_state_on_connection_lost": {"type": "str", "required": False, + "choices": ["last_known", "unknown"]} + } + discovery_settings_opt = { + "general_device_naming": {"type": "str", "required": False, "default": "DNS", + "choices": ["DNS", "NETBIOS"]}, + "server_device_naming": {"type": "str", "required": False, "default": "IDRAC_SYSTEM_HOSTNAME", + "choices": ["IDRAC_HOSTNAME", "IDRAC_SYSTEM_HOSTNAME"]}, + "invalid_device_hostname": {"type": "str", "required": False}, + "common_mac_addresses": {"type": "str", "required": False} + } + server_initiated_discovery_opt = { + "device_discovery_approval_policy": {"type": "str", "required": False, "choices": ["Automatic", "Manual"]}, + "set_trap_destination": {"type": "bool", "required": False, }, + } + builtin_appliance_share_opt = { + "share_options": {"type": "str", "required": False, + "choices": ["CIFS", "HTTPS"]}, + "cifs_options": {"type": "str", "required": False, + "choices": ["V1", "V2"] + }, + } + + specs = { + "report_row_limit": {"required": False, "type": "int"}, + "device_health": {"required": False, "type": "dict", + "options": device_health_opt + }, + "discovery_settings": {"required": False, "type": "dict", + "options": discovery_settings_opt + }, + "server_initiated_discovery": {"required": False, "type": "dict", + "options": server_initiated_discovery_opt + }, + "mx7000_onboarding_preferences": {"required": False, "type": "str", "choices": ["all", "chassis"]}, + "builtin_appliance_share": {"required": False, "type": "dict", + "options": builtin_appliance_share_opt, + "required_if": [['share_options', "CIFS", ('cifs_options',)]] + }, + "email_sender_settings": {"required": False, "type": "str"}, + "trap_forwarding_format": {"required": False, "type": "str", "choices": ["Normalized", "Original"]}, + "metrics_collection_settings": {"required": False, "type": "int"}, + } + specs.update(ome_auth_params) + module = AnsibleModule(argument_spec=specs, + required_one_of=[["report_row_limit", "device_health", "discovery_settings", + "server_initiated_discovery", "mx7000_onboarding_preferences", + "builtin_appliance_share", "email_sender_settings", + "trap_forwarding_format", "metrics_collection_settings"]], + supports_check_mode=True, ) + + try: + _validate_params(module) + with RestOME(module.params, req_session=True) as rest_obj: + job = job_details(rest_obj) + job_payload, schedule = create_job(module) + curr_resp = fetch_cp_settings(rest_obj) + payload, payload_dict = create_payload(module, curr_resp) + cifs_payload = create_cifs_payload(module, curr_resp) + diff = _diff_payload(curr_resp, payload, cifs_payload, schedule, job) + process_check_mode(module, diff) + resp, cifs_resp, job_resp = update_console_preferences(module, rest_obj, payload, cifs_payload, + job_payload, job, payload_dict, schedule) + resp_req = fetch_cp_settings(rest_obj) + cp_list = [] + resp_data = list(filter(lambda d: d['Name'] in CONSOLE_SETTINGS_VALUES, resp_req)) + for cp in resp_data: + cp_data = strip_substr_dict(cp) + cp_list.append(cp_data) + module.exit_json(msg=SUCCESS_MSG, console_preferences=cp_list) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py new file mode 100644 index 00000000..03eef19e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py @@ -0,0 +1,751 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_network_address +short_description: Updates the network configuration on OpenManage Enterprise +version_added: "2.1.0" +description: + - This module allows the configuration of a DNS and an IPV4 or IPV6 network on OpenManage Enterprise. +notes: + - The configuration changes can only be applied to one interface at a time. + - The system management consoles might be unreachable for some time after the configuration changes are applied. + - This module supports C(check_mode). +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + enable_nic: + description: Enable or disable Network Interface Card (NIC) configuration. + type: bool + default: true + interface_name: + description: + - "If there are multiple interfaces, network configuration changes can be applied to a single interface using the + interface name of the NIC." + - If this option is not specified, Primary interface is chosen by default. + type: str + ipv4_configuration: + description: + - IPv4 network configuration. + - "I(Warning) Ensure that you have an alternate interface to access OpenManage Enterprise as these options can + change the current IPv4 address for I(hostname)." + type: dict + suboptions: + enable: + description: + - Enable or disable access to the network using IPv4. + type: bool + required: true + enable_dhcp: + description: + - "Enable or disable the automatic request to get an IPv4 address from the IPv4 Dynamic Host Configuration + Protocol (DHCP) server" + - "If I(enable_dhcp) option is true, OpenManage Enterprise retrieves the IP configuration—IPv4 address, + subnet mask, and gateway from a DHCP server on the existing network." + type: bool + static_ip_address: + description: + - Static IPv4 address + - This option is applicable when I(enable_dhcp) is false. + type: str + static_subnet_mask: + description: + - Static IPv4 subnet mask address + - This option is applicable when I(enable_dhcp) is false. + type: str + static_gateway: + description: + - Static IPv4 gateway address + - This option is applicable when I(enable_dhcp) is false. + type: str + use_dhcp_for_dns_server_names: + description: + - This option allows to automatically request and obtain a DNS server IPv4 address from the DHCP server. + - This option is applicable when I(enable_dhcp) is true. + type: bool + static_preferred_dns_server: + description: + - Static IPv4 DNS preferred server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + static_alternate_dns_server: + description: + - Static IPv4 DNS alternate server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + ipv6_configuration: + description: + - IPv6 network configuration. + - "I(Warning) Ensure that you have an alternate interface to access OpenManage Enterprise as these options can + change the current IPv6 address for I(hostname)." + type: dict + suboptions: + enable: + description: Enable or disable access to the network using the IPv6. + type: bool + required: true + enable_auto_configuration: + description: + - "Enable or disable the automatic request to get an IPv6 address from the IPv6 DHCP server or router + advertisements(RA)" + - "If I(enable_auto_configuration) is true, OME retrieves IP configuration-IPv6 address, prefix, and gateway, + from a DHCPv6 server on the existing network" + type: bool + static_ip_address: + description: + - Static IPv6 address + - This option is applicable when I(enable_auto_configuration) is false. + type: str + static_prefix_length: + description: + - Static IPv6 prefix length + - This option is applicable when I(enable_auto_configuration) is false. + type: int + static_gateway: + description: + - Static IPv6 gateway address + - This option is applicable when I(enable_auto_configuration) is false. + type: str + use_dhcp_for_dns_server_names: + description: + - This option allows to automatically request and obtain a DNS server IPv6 address from the DHCP server. + - This option is applicable when I(enable_auto_configuration) is true + type: bool + static_preferred_dns_server: + description: + - Static IPv6 DNS preferred server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + static_alternate_dns_server: + description: + - Static IPv6 DNS alternate server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + management_vlan: + description: + - vLAN configuration. + - These settings are applicable for OpenManage Enterprise Modular. + type: dict + suboptions: + enable_vlan: + description: + - Enable or disable vLAN for management. + - The vLAN configuration cannot be updated if the I(register_with_dns) field under I(dns_configuration) is true. + - "I(WARNING) Ensure that the network cable is plugged to the correct port after the vLAN configuration + changes have been made. If not, the configuration change may not be effective." + required: true + type: bool + vlan_id: + description: + - vLAN ID. + - This option is applicable when I(enable_vlan) is true. + type: int + dns_configuration: + description: Domain Name System(DNS) settings. + type: dict + suboptions: + register_with_dns: + description: + - Register/Unregister I(dns_name) on the DNS Server. + - This option cannot be updated if vLAN configuration changes. + type: bool + use_dhcp_for_dns_domain_name: + description: Get the I(dns_domain_name) using a DHCP server. + type: bool + dns_name: + description: + - DNS name for I(hostname) + - This is applicable when I(register_with_dns) is true. + type: str + dns_domain_name: + description: + - Static DNS domain name + - This is applicable when I(use_dhcp_for_dns_domain_name) is false. + type: str + reboot_delay: + description: + - The time in seconds, after which settings are applied. + - This option is not mandatory. + type: int +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V(@jagadeeshnv)" +''' + +EXAMPLES = r''' +--- +- name: IPv4 network configuration for primary interface + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_nic: true + ipv4_configuration: + enable: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + reboot_delay: 5 + +- name: IPv6 network configuration for primary interface + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + ipv6_configuration: + enable: true + enable_auto_configuration: true + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2 + use_dhcp_for_dns_server_names: true + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + +- name: Management vLAN configuration for primary interface + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + management_vlan: + enable_vlan: true + vlan_id: 3344 + dns_configuration: + register_with_dns: false + reboot_delay: 1 + +- name: DNS settings + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + ipv4_configuration: + enable: true + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "dnslocaldomain" + +- name: Disbale nic interface eth1 + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_nic: false + interface_name: eth1 + +- name: Complete network settings for interface eth1 + dellemc.openmanage.ome_application_network_address: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_nic: true + interface_name: eth1 + ipv4_configuration: + enable: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_for_dns_server_names: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable: true + enable_auto_configuration: true + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcp_for_dns_server_names: true + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "dnslocaldomain" + reboot_delay: 5 +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the network address configuration change. + returned: always + sample: Successfully updated network address configuration +network_configuration: + type: dict + description: Updated application network address configuration. + returned: on success + sample: { + "Delay": 0, + "DnsConfiguration": { + "DnsDomainName": "", + "DnsName": "MX-SVCTAG", + "RegisterWithDNS": false, + "UseDHCPForDNSDomainName": true + }, + "EnableNIC": true, + "InterfaceName": "eth0", + "PrimaryInterface": true, + "Ipv4Configuration": { + "Enable": true, + "EnableDHCP": false, + "StaticAlternateDNSServer": "", + "StaticGateway": "192.168.0.2", + "StaticIPAddress": "192.168.0.3", + "StaticPreferredDNSServer": "192.168.0.4", + "StaticSubnetMask": "255.255.254.0", + "UseDHCPForDNSServerNames": false + }, + "Ipv6Configuration": { + "Enable": true, + "EnableAutoConfiguration": true, + "StaticAlternateDNSServer": "", + "StaticGateway": "", + "StaticIPAddress": "", + "StaticPreferredDNSServer": "", + "StaticPrefixLength": 0, + "UseDHCPForDNSServerNames": true + }, + "ManagementVLAN": { + "EnableVLAN": false, + "Id": 1 + } + } +job_info: + description: Details of the job to update in case OME version is >= 3.3. + returned: on success + type: dict + sample: { + "Builtin": false, + "CreatedBy": "system", + "Editable": true, + "EndTime": null, + "Id": 14902, + "JobDescription": "Generic OME runtime task", + "JobName": "OMERealtime_Task", + "JobStatus": { + "Id": 2080, + "Name": "New" + }, + "JobType": { + "Id": 207, + "Internal": true, + "Name": "OMERealtime_Task" + }, + "LastRun": null, + "LastRunStatus": { + "Id": 2080, + "Name": "New" + }, + "NextRun": null, + "Params": [ + { + "JobId": 14902, + "Key": "Nmcli_Update", + "Value": "{\"interfaceName\":\"eth0\",\"profileName\":\"eth0\",\"enableNIC\":true, + \"ipv4Configuration\":{\"enable\":true,\"enableDHCP\":true,\"staticIPAddress\":\"\", + \"staticSubnetMask\":\"\",\"staticGateway\":\"\",\"useDHCPForDNSServerNames\":true, + \"staticPreferredDNSServer\":\"\",\"staticAlternateDNSServer\":\"\"}, + \"ipv6Configuration\":{\"enable\":false,\"enableAutoConfiguration\":true,\"staticIPAddress\":\"\", + \"staticPrefixLength\":0,\"staticGateway\":\"\",\"useDHCPForDNSServerNames\":false, + \"staticPreferredDNSServer\":\"\",\"staticAlternateDNSServer\":\"\"}, + \"managementVLAN\":{\"enableVLAN\":false,\"id\":0},\"dnsConfiguration\":{\"registerWithDNS\":false, + \"dnsName\":\"\",\"useDHCPForDNSDomainName\":false,\"dnsDomainName\":\"\",\"fqdndomainName\":\"\", + \"ipv4CurrentPreferredDNSServer\":\"\",\"ipv4CurrentAlternateDNSServer\":\"\", + \"ipv6CurrentPreferredDNSServer\":\"\",\"ipv6CurrentAlternateDNSServer\":\"\"}, + \"currentSettings\":{\"ipv4Address\":[],\"ipv4Gateway\":\"\",\"ipv4Dns\":[],\"ipv4Domain\":\"\", + \"ipv6Address\":[],\"ipv6LinkLocalAddress\":\"\",\"ipv6Gateway\":\"\",\"ipv6Dns\":[], + \"ipv6Domain\":\"\"},\"delay\":0,\"primaryInterface\":true,\"modifiedConfigs\":{}}" + } + ], + "Schedule": "startnow", + "StartTime": null, + "State": "Enabled", + "Targets": [], + "UpdatedBy": null, + "Visible": true + } +error_info: + description: Details of the HTTP error. + returned: on HTTP error + type: dict + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to update the address configuration because a dependent field is missing for Use DHCP + for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration .", + "MessageArgs": [ + "Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid + configuration" + ], + "MessageId": "CAPP1304", + "RelatedProperties": [], + "Resolution": "Make sure that all dependent fields contain valid content and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +''' + +import json +import socket +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +IP_CONFIG = "ApplicationService/Network/AddressConfiguration" +JOB_IP_CONFIG = "ApplicationService/Network/AdapterConfigurations" +POST_IP_CONFIG = "ApplicationService/Actions/Network.ConfigureNetworkAdapter" +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." + + +def validate_ip_address(address): + try: + socket.inet_aton(address) + except socket.error: + return False + return address.count('.') == 3 + + +def validate_ip_v6_address(address): + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: + return False + return True + + +def remove_unwanted_keys(key_list, payload): + for key in key_list: + if key in payload: + payload.pop(key) + + +def format_payload(src_dict): + address_payload_map = {"enable_nic": "EnableNIC", + "interface_name": "InterfaceName", + "enable": "Enable", + "enable_dhcp": "EnableDHCP", + "static_ip_address": "StaticIPAddress", + "static_subnet_mask": "StaticSubnetMask", + "static_gateway": "StaticGateway", + "use_dhcp_for_dns_server_names": "UseDHCPForDNSServerNames", + "static_preferred_dns_server": "StaticPreferredDNSServer", + "static_alternate_dns_server": "StaticAlternateDNSServer", + "enable_auto_configuration": "EnableAutoConfiguration", + "static_prefix_length": "StaticPrefixLength", + "enable_vlan": "EnableVLAN", + "vlan_id": "Id", + "register_with_dns": "RegisterWithDNS", + "use_dhcp_for_dns_domain_name": "UseDHCPForDNSDomainName", + "dns_name": "DnsName", + "dns_domain_name": "DnsDomainName", + "reboot_delay": "Delay"} + if src_dict: + return dict([(address_payload_map[key], val) for key, val in src_dict.items() if val is not None]) + + +def get_payload(module): + params = module.params + backup_params = params.copy() + remove_keys = ["hostname", "username", "password", "port"] + remove_unwanted_keys(remove_keys, backup_params) + ipv4_payload = format_payload(backup_params.get("ipv4_configuration", {})) + ipv6_payload = format_payload(backup_params.get("ipv6_configuration", {})) + dns_payload = format_payload(backup_params.get("dns_configuration", {})) + vlan_payload = format_payload(backup_params.get("management_vlan", {})) + return ipv4_payload, ipv6_payload, dns_payload, vlan_payload + + +def _compare_dict_merge(src_dict, new_dict, param_list): + diff = 0 + for parm in param_list: + val = new_dict.get(parm) + if val is not None: + if val != src_dict.get(parm): + src_dict[parm] = val + diff += 1 + return diff + + +def update_ipv4_payload(src_dict, new_dict): + diff = 0 + if new_dict: + if new_dict.get("Enable") != src_dict.get("Enable"): # Mandatory + src_dict["Enable"] = new_dict.get("Enable") + diff += 1 + if new_dict.get("Enable"): + tmp_dict = {"EnableDHCP": ["StaticIPAddress", "StaticSubnetMask", "StaticGateway"], + "UseDHCPForDNSServerNames": ["StaticPreferredDNSServer", "StaticAlternateDNSServer"]} + for key, val in tmp_dict.items(): + if new_dict.get(key) is not None: + if new_dict.get(key) != src_dict.get(key): + src_dict[key] = new_dict.get(key) + diff += 1 + if not new_dict.get(key): + diff = diff + _compare_dict_merge(src_dict, new_dict, val) + return diff + + +def update_ipv6_payload(src_dict, new_dict): + diff = 0 + if new_dict: + if new_dict.get("Enable") != src_dict.get("Enable"): # Mandatory + src_dict["Enable"] = new_dict.get("Enable") + diff += 1 + if new_dict.get("Enable"): + tmp_dict = {"EnableAutoConfiguration": ["StaticIPAddress", "StaticPrefixLength", "StaticGateway"], + "UseDHCPForDNSServerNames": ["StaticPreferredDNSServer", "StaticAlternateDNSServer"]} + for key, val in tmp_dict.items(): + if new_dict.get(key) is not None: + if new_dict.get(key) != src_dict.get(key): + src_dict[key] = new_dict.get(key) + diff += 1 + if not new_dict.get(key): + diff = diff + _compare_dict_merge(src_dict, new_dict, val) + return diff + + +def update_dns_payload(src_dict, new_dict): + diff = 0 + if new_dict: + mkey = "RegisterWithDNS" + if new_dict.get(mkey) is not None: + if new_dict.get(mkey) != src_dict.get(mkey): + src_dict[mkey] = new_dict.get(mkey) + diff += 1 + if new_dict.get(mkey) is True: + diff = diff + _compare_dict_merge(src_dict, new_dict, ["DnsName"]) + mkey = "UseDHCPForDNSDomainName" + if new_dict.get(mkey) is not None: + if new_dict.get(mkey) != src_dict.get(mkey): + src_dict[mkey] = new_dict.get(mkey) + diff += 1 + if not new_dict.get(mkey): + diff = diff + _compare_dict_merge(src_dict, new_dict, ["DnsDomainName"]) + return diff + + +def update_vlan_payload(src_dict, new_dict): + diff = 0 + if new_dict: + mkey = "EnableVLAN" + if new_dict.get(mkey) is not None: + if new_dict.get(mkey) != src_dict.get(mkey): + src_dict[mkey] = new_dict.get(mkey) + diff += 1 + if new_dict.get(mkey) is True: + diff = diff + _compare_dict_merge(src_dict, new_dict, ["Id"]) + return diff + + +def get_network_config_data(rest_obj, module): + try: + interface = module.params.get("interface_name") + resp = rest_obj.invoke_request("GET", JOB_IP_CONFIG) + adapter_list = resp.json_data.get("value") + int_adp = None + pri_adp = None + if adapter_list: + for adp in adapter_list: + if interface and adp.get("InterfaceName") == interface: + int_adp = adp + break + if adp.get("PrimaryInterface"): + pri_adp = adp + if interface and int_adp is None: + module.fail_json(msg="The 'interface_name' value provided {0} is invalid".format(interface)) + elif int_adp: + return int_adp, "POST", POST_IP_CONFIG + else: + return pri_adp, "POST", POST_IP_CONFIG + except HTTPError as err: + pass + except Exception as err: + raise err + resp = rest_obj.invoke_request("GET", IP_CONFIG) + return resp.json_data, "PUT", IP_CONFIG + + +def get_updated_payload(rest_obj, module, ipv4_payload, ipv6_payload, dns_payload, vlan_payload): + current_setting = {} + remove_keys = ["@odata.context", "@odata.type", "@odata.id", "CurrentSettings"] + current_setting, rest_method, uri = get_network_config_data(rest_obj, module) + remove_unwanted_keys(remove_keys, current_setting) + payload_dict = {"Ipv4Configuration": [ipv4_payload, update_ipv4_payload], + "Ipv6Configuration": [ipv6_payload, update_ipv6_payload], + "DnsConfiguration": [dns_payload, update_dns_payload], + "ManagementVLAN": [vlan_payload, update_vlan_payload]} + diff = 0 + enable_nic = module.params.get("enable_nic") + if current_setting.get("EnableNIC") != enable_nic: + current_setting["EnableNIC"] = enable_nic + diff += 1 + if enable_nic: + for config, pload in payload_dict.items(): + if pload[0]: + diff = diff + pload[1](current_setting.get(config), pload[0]) + delay = module.params.get("reboot_delay") + if delay is not None: + if current_setting["Delay"] != delay: + current_setting["Delay"] = delay + if diff == 0: + module.exit_json( + msg=NO_CHANGES_FOUND, network_configuration=current_setting) + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND) + return current_setting, rest_method, uri + + +def validate_ipaddress(module, ip_type, config, var_list, ip_func): + ipv_input = module.params.get(config) + if ipv_input: + for ipname in var_list: + val = ipv_input.get(ipname) + if val and not ip_func(val): + module.fail_json(msg="Invalid {0} address provided for the {1}".format(ip_type, ipname)) + + +def validate_input(module): + ip_addr = ["static_ip_address", "static_gateway", "static_preferred_dns_server", "static_alternate_dns_server"] + validate_ipaddress(module, "IPv6", "ipv6_configuration", ip_addr, validate_ip_v6_address) + ip_addr.append("static_subnet_mask") + validate_ipaddress(module, "IPv4", "ipv4_configuration", ip_addr, validate_ip_address) + delay = module.params.get("reboot_delay") + if delay and delay < 0: + module.fail_json(msg="Invalid value provided for 'reboot_delay'") + + +def main(): + ipv4_options = {"enable": {"required": True, "type": "bool"}, + "enable_dhcp": {"required": False, "type": "bool"}, + "static_ip_address": {"required": False, "type": "str"}, + "static_subnet_mask": {"required": False, "type": "str"}, + "static_gateway": {"required": False, "type": "str"}, + "use_dhcp_for_dns_server_names": {"required": False, "type": "bool"}, + "static_preferred_dns_server": {"required": False, "type": "str"}, + "static_alternate_dns_server": {"required": False, "type": "str"}} + ipv6_options = {"enable": {"required": True, "type": "bool"}, + "enable_auto_configuration": {"required": False, "type": "bool"}, + "static_ip_address": {"required": False, "type": "str"}, + "static_prefix_length": {"required": False, "type": "int"}, + "static_gateway": {"required": False, "type": "str"}, + "use_dhcp_for_dns_server_names": {"required": False, "type": "bool"}, + "static_preferred_dns_server": {"required": False, "type": "str"}, + "static_alternate_dns_server": {"required": False, "type": "str"}} + dns_options = {"register_with_dns": {"required": False, "type": "bool"}, + "use_dhcp_for_dns_domain_name": {"required": False, "type": "bool"}, + "dns_name": {"required": False, "type": "str"}, + "dns_domain_name": {"required": False, "type": "str"}} + management_vlan = {"enable_vlan": {"required": True, "type": "bool"}, + "vlan_id": {"required": False, "type": "int"}} + + specs = { + "enable_nic": {"required": False, "type": "bool", "default": True}, + "interface_name": {"required": False, "type": "str"}, + "ipv4_configuration": + {"required": False, "type": "dict", "options": ipv4_options, + "required_if": [ + ['enable', True, ('enable_dhcp',), True], + ['enable_dhcp', False, ('static_ip_address', 'static_subnet_mask', "static_gateway"), False], + ['use_dhcp_for_dns_server_names', False, + ('static_preferred_dns_server', 'static_alternate_dns_server'), True] + ] + }, + "ipv6_configuration": + {"required": False, "type": "dict", "options": ipv6_options, + "required_if": [ + ['enable', True, ('enable_auto_configuration',), True], + ['enable_auto_configuration', False, ('static_ip_address', 'static_prefix_length', "static_gateway"), + False], + ['use_dhcp_for_dns_server_names', False, + ('static_preferred_dns_server', 'static_alternate_dns_server'), True] + ] + }, + "dns_configuration": + {"required": False, "type": "dict", "options": dns_options, + "required_if": [ + ['register_with_dns', True, ('dns_name',), False], + ['use_dhcp_for_dns_domain_name', False, ('dns_domain_name',)] + ] + }, + "management_vlan": + {"required": False, "type": "dict", "options": management_vlan, + "required_if": [ + ['enable_vlan', True, ('vlan_id',), True] + ] + }, + "reboot_delay": {"required": False, "type": "int"} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ["enable_nic", True, + ("ipv4_configuration", "ipv6_configuration", "dns_configuration", "management_vlan"), True] + ], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + validate_input(module) + ipv4_payload, ipv6_payload, dns_payload, vlan_payload = get_payload(module) + updated_payload, rest_method, uri = get_updated_payload( + rest_obj, module, ipv4_payload, ipv6_payload, dns_payload, vlan_payload) + resp = rest_obj.invoke_request(rest_method, uri, data=updated_payload, api_timeout=150) + if rest_method == "POST": + module.exit_json(msg="Successfully triggered job to update network address configuration.", + network_configuration=updated_payload, job_info=resp.json_data, changed=True) + module.exit_json(msg="Successfully triggered task to update network address configuration.", + network_configuration=resp.json_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py new file mode 100644 index 00000000..3659d8a3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_network_proxy +short_description: Updates the proxy configuration on OpenManage Enterprise +version_added: "2.1.0" +description: This module allows to configure a network proxy on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + enable_proxy: + description: + - Enables or disables the HTTP proxy configuration. + - If I(enable proxy) is false, then the HTTP proxy configuration is set to its default value. + required: true + type: bool + ip_address: + description: + - Proxy server address. + - This option is mandatory when I(enable_proxy) is true. + type: str + proxy_port: + description: + - Proxy server's port number. + - This option is mandatory when I(enable_proxy) is true. + type: int + enable_authentication: + description: + - Enable or disable proxy authentication. + - If I(enable_authentication) is true, I(proxy_username) and I(proxy_password) must be provided. + - If I(enable_authentication) is false, the proxy username and password are set to its default values. + type: bool + proxy_username: + description: + - Proxy server username. + - This option is mandatory when I(enable_authentication) is true. + type: str + proxy_password: + description: + - Proxy server password. + - This option is mandatory when I(enable_authentication) is true. + type: str +requirements: + - "python >= 3.8.6" +author: + - "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module does not support C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Update proxy configuration and enable authentication + dellemc.openmanage.ome_application_network_proxy: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: true + ip_address: "192.168.0.2" + proxy_port: 444 + enable_authentication: true + proxy_username: "proxy_username" + proxy_password: "proxy_password" + +- name: Reset proxy authentication + dellemc.openmanage.ome_application_network_proxy: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: true + ip_address: "192.168.0.2" + proxy_port: 444 + enable_authentication: false + +- name: Reset proxy configuration + dellemc.openmanage.ome_application_network_proxy: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_proxy: false +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the network proxy configuration change. + returned: always + sample: "Successfully updated network proxy configuration." +proxy_configuration: + type: dict + description: Updated application network proxy configuration. + returned: success + sample: { + "EnableAuthentication": true, + "EnableProxy": true, + "IpAddress": "192.168.0.2", + "Password": null, + "PortNumber": 444, + "Username": "root" + } +error_info: + description: Details of the HTTP error. + returned: on HTTP error + type: dict + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to complete the request because the input value + for PortNumber is missing or an invalid value is entered.", + "MessageArgs": [ + "PortNumber" + ], + "MessageId": "CGEN6002", + "RelatedProperties": [], + "Resolution": "Enter a valid value and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +PROXY_CONFIG = "ApplicationService/Network/ProxyConfiguration" +CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied." +CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No Changes found to be applied." + + +def remove_unwanted_keys(key_list, payload): + [payload.pop(key) for key in key_list if key in payload] + + +def validate_check_mode_for_network_proxy(payload_diff, module): + """ + check mode support validation + :param payload_diff: payload difference + :param module: ansible module object + :return: None + """ + if module.check_mode: + if payload_diff: + module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True) + else: + module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False) + + +def get_payload(module): + params = module.params + proxy_payload_map = { + "ip_address": "IpAddress", + "proxy_port": "PortNumber", + "enable_proxy": "EnableProxy", + "proxy_username": "Username", + "proxy_password": "Password", + "enable_authentication": "EnableAuthentication" + } + backup_params = params.copy() + remove_keys = ["hostname", "username", "password", "port", "ca_path", "validate_certs", "timeout"] + remove_unwanted_keys(remove_keys, backup_params) + payload = dict([(proxy_payload_map[key], val) for key, val in backup_params.items() if val is not None]) + return payload + + +def get_updated_payload(rest_obj, module, payload): + current_setting = {} + if not any(payload): + module.fail_json(msg="Unable to configure the proxy because proxy configuration settings are not provided.") + else: + params = module.params + remove_keys = ["@odata.context", "@odata.type", "@odata.id", "Password"] + enable_authentication = params.get("enable_authentication") + if enable_authentication is False: + """when enable auth is disabled, ignore proxy username and password """ + remove_keys.append("Username") + payload.pop('Username', None) + payload.pop('Password', None) + resp = rest_obj.invoke_request("GET", PROXY_CONFIG) + current_setting = resp.json_data + remove_unwanted_keys(remove_keys, current_setting) + diff = any(key in current_setting and val != current_setting[key] for key, val in payload.items()) + validate_check_mode_for_network_proxy(diff, module) + if not diff: + module.exit_json(msg="No changes made to proxy configuration as entered values are the same as current " + "configuration values.") + else: + current_setting.update(payload) + return current_setting + + +def main(): + specs = { + "ip_address": {"required": False, "type": "str"}, + "proxy_port": {"required": False, "type": "int"}, + "enable_proxy": {"required": True, "type": "bool"}, + "proxy_username": {"required": False, "type": "str"}, + "proxy_password": {"required": False, "type": "str", "no_log": True}, + "enable_authentication": {"required": False, "type": "bool"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[['enable_proxy', True, ['ip_address', 'proxy_port']], + ['enable_authentication', True, ['proxy_username', 'proxy_password']], ], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + payload = get_payload(module) + updated_payload = get_updated_payload(rest_obj, module, payload) + resp = rest_obj.invoke_request("PUT", PROXY_CONFIG, data=updated_payload) + module.exit_json(msg="Successfully updated network proxy configuration.", + proxy_configuration=resp.json_data, + changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py new file mode 100644 index 00000000..2dfd13a5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_network_settings +short_description: This module allows you to configure the session inactivity timeout settings +version_added: "4.4.0" +description: + - This module allows you to configure the session inactivity timeout settings on OpenManage Enterprise + and OpenManage Enterprise Modular. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + session_inactivity_timeout: + description: Session inactivity timeout settings. + type: dict + suboptions: + enable_universal_timeout: + description: + - Enable or disable the universal inactivity timeout. + type: bool + universal_timeout: + description: + - Duration of inactivity in minutes after which all sessions end. + - This is applicable when I(enable_universal_timeout) is C(true). + - This is mutually exclusive with I(api_timeout), I(gui_timeout), I(ssh_timeout) and I(serial_timeout). + type: float + api_timeout: + description: + - Duration of inactivity in minutes after which the API session ends. + - This is mutually exclusive with I(universal_timeout). + type: float + api_sessions: + description: + - The maximum number of API sessions to be allowed. + type: int + gui_timeout: + description: + - Duration of inactivity in minutes after which the web interface of + Graphical User Interface (GUI) session ends. + - This is mutually exclusive with I(universal_timeout). + type: float + gui_sessions: + description: + - The maximum number of GUI sessions to be allowed. + type: int + ssh_timeout: + description: + - Duration of inactivity in minutes after which the SSH session ends. + - This is applicable only for OpenManage Enterprise Modular. + - This is mutually exclusive with I(universal_timeout). + type: float + ssh_sessions: + description: + - The maximum number of SSH sessions to be allowed. + - This is applicable to OME-M only. + type: int + serial_timeout: + description: + - Duration of inactivity in minutes after which the serial console session ends. + - This is applicable only for OpenManage Enterprise Modular. + - This is mutually exclusive with I(universal_timeout). + type: float + serial_sessions: + description: + - The maximum number of serial console sessions to be allowed. + - This is applicable only for OpenManage Enterprise Modular. + type: int +requirements: + - "python >= 3.8.6" +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise + or OpenManage Enterprise Modular. + - To configure other network settings such as network address, web server, and so on, refer to the respective + OpenManage Enterprise application network setting modules. + - This module supports C(check_mode). +author: + - Sachin Apagundi(@sachin-apa) +''' + +EXAMPLES = """ +--- +- name: Configure universal inactivity timeout + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + enable_universal_timeout: true + universal_timeout: 30 + api_sessions: 90 + gui_sessions: 5 + ssh_sessions: 2 + serial_sessions: 1 + +- name: Configure API and GUI timeout and sessions + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + api_timeout: 20 + api_sessions: 100 + gui_timeout: 25 + gui_sessions: 5 + +- name: Configure timeout and sessions for all parameters + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + api_timeout: 20 + api_sessions: 100 + gui_timeout: 15 + gui_sessions: 5 + ssh_timeout: 30 + ssh_sessions: 2 + serial_timeout: 35 + serial_sessions: 1 + +- name: Disable universal timeout and configure timeout and sessions for other parameters + ome_application_network_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + session_inactivity_timeout: + enable_universal_timeout: false + api_timeout: 20 + api_sessions: 100 + gui_timeout: 15 + gui_sessions: 5 + ssh_timeout: 30 + ssh_sessions: 2 + serial_timeout: 35 + serial_sessions: 1 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the Session timeout settings. + returned: always + sample: "Successfully updated the session timeout settings." +session_inactivity_setting: + type: dict + description: Returned when session inactivity timeout settings are updated successfully. + returned: success + sample: [ + { + "SessionType": "API", + "MaxSessions": 32, + "SessionTimeout": 99600, + "MinSessionTimeout": 60000, + "MaxSessionTimeout": 86400000, + "MinSessionsAllowed": 1, + "MaxSessionsAllowed": 100, + "MaxSessionsConfigurable": true, + "SessionTimeoutConfigurable": true + }, + { + "SessionType": "GUI", + "MaxSessions": 6, + "SessionTimeout": 99600, + "MinSessionTimeout": 60000, + "MaxSessionTimeout": 7200000, + "MinSessionsAllowed": 1, + "MaxSessionsAllowed": 6, + "MaxSessionsConfigurable": true, + "SessionTimeoutConfigurable": true + }, + { + "SessionType": "SSH", + "MaxSessions": 4, + "SessionTimeout": 99600, + "MinSessionTimeout": 60000, + "MaxSessionTimeout": 10800000, + "MinSessionsAllowed": 1, + "MaxSessionsAllowed": 4, + "MaxSessionsConfigurable": true, + "SessionTimeoutConfigurable": true + }, + { + "SessionType": "Serial", + "MaxSessions": 1, + "SessionTimeout": 99600, + "MinSessionTimeout": 60000, + "MaxSessionTimeout": 86400000, + "MinSessionsAllowed": 1, + "MaxSessionsAllowed": 1, + "MaxSessionsConfigurable": false, + "SessionTimeoutConfigurable": true + }, + { + "SessionType": "UniversalTimeout", + "MaxSessions": 0, + "SessionTimeout": -1, + "MinSessionTimeout": -1, + "MaxSessionTimeout": 86400000, + "MinSessionsAllowed": 0, + "MaxSessionsAllowed": 0, + "MaxSessionsConfigurable": false, + "SessionTimeoutConfigurable": true + } + ] +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CUSR1233", + "RelatedProperties": [], + "Message": "The number of allowed concurrent sessions for API must be between 1 and 100 sessions.", + "MessageArgs": [ + "API", + "1", + "100" + ], + "Severity": "Critical", + "Resolution": "Enter values in the correct range and retry the operation." + } + ] + } + } +""" + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params + +SUCCESS_MSG = "Successfully updated the session timeout settings." +SESSION_INACTIVITY_GET = "SessionService/SessionConfiguration" +SESSION_INACTIVITY_POST = "SessionService/Actions/SessionService.SessionConfigurationUpdate" +NO_CHANGES = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +session_type_map = { + "UniversalTimeout": {"SessionTimeout": "universal_timeout", "MaxSessions": None}, + "API": {"SessionTimeout": "api_timeout", "MaxSessions": "api_sessions"}, + "GUI": {"SessionTimeout": "gui_timeout", "MaxSessions": "gui_sessions"}, + "SSH": {"SessionTimeout": "ssh_timeout", "MaxSessions": "ssh_sessions"}, + "Serial": {"SessionTimeout": "serial_timeout", "MaxSessions": "serial_sessions"} +} + + +def fetch_session_inactivity_settings(rest_obj): + final_resp = rest_obj.invoke_request("GET", SESSION_INACTIVITY_GET) + ret_data = final_resp.json_data.get('value') + return ret_data + + +def update_session_inactivity_settings(rest_obj, payload): + final_resp = rest_obj.invoke_request("POST", SESSION_INACTIVITY_POST, data=payload) + return final_resp + + +def update_payload(module, curr_payload): + diff = 0 + sit_param = module.params.get("session_inactivity_timeout").copy() + eut = sit_param.get("enable_universal_timeout") + eut_enabled = is_universal_timeout_enabled(curr_payload) + if eut is False: + sit_param["universal_timeout"] = -1 # to disable universal timeout set value to -1 + for up in curr_payload: + stm = session_type_map.get(up.get("SessionType"), None) + if stm and not ((up.get("SessionType") == "UniversalTimeout") and (eut is None)): + sess_time = get_value(sit_param, up, stm.get("SessionTimeout", None), "SessionTimeout") + if sess_time != up.get("SessionTimeout") and ((not eut_enabled) or eut is not None): + diff += 1 + up["SessionTimeout"] = sess_time + max_sess = get_value(sit_param, up, stm.get("MaxSessions", None), "MaxSessions") + if max_sess != up.get("MaxSessions"): + diff += 1 + up["MaxSessions"] = max_sess + return curr_payload, diff + + +def is_universal_timeout_enabled(payload): + u_sess_timeout = -1 + for up in payload: + if up.get("SessionType") == "UniversalTimeout": + u_sess_timeout = up.get("SessionTimeout") + break + return u_sess_timeout > 0 + + +def get_value(input_module, resp, mod_key, attr_key): + ret_value = input_module.get(mod_key) + if ret_value is None: + ret_value = resp.get(attr_key) + elif attr_key == "SessionTimeout" and ret_value != -1: + ret_value = ret_value * 60000 + return ret_value + + +def process_check_mode(module, diff): + if not diff: + module.exit_json(msg=NO_CHANGES) + elif module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + + +def main(): + session_inactivity_options = { + "enable_universal_timeout": {"type": "bool", "required": False}, + "universal_timeout": {"type": "float", "required": False}, + "api_timeout": {"type": "float", "required": False}, + "api_sessions": {"type": "int", "required": False}, + "gui_timeout": {"type": "float", "required": False}, + "gui_sessions": {"type": "int", "required": False}, + "ssh_timeout": {"type": "float", "required": False}, + "ssh_sessions": {"type": "int", "required": False}, + "serial_timeout": {"type": "float", "required": False}, + "serial_sessions": {"type": "int", "required": False}, + } + specs = { + "session_inactivity_timeout": { + "required": False, + "type": "dict", + "options": session_inactivity_options, + "mutually_exclusive": [ + ['universal_timeout', 'api_timeout'], + ['universal_timeout', 'gui_timeout'], + ['universal_timeout', 'ssh_timeout'], + ['universal_timeout', 'serial_timeout'] + ], + "required_if": [ + ['enable_universal_timeout', True, ['universal_timeout']] + ] + } + } + specs.update(ome_auth_params) + + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + curr_resp = fetch_session_inactivity_settings(rest_obj) + payload, diff = update_payload(module, curr_resp) + process_check_mode(module, diff) + resp = update_session_inactivity_settings(rest_obj, payload) + module.exit_json(msg=SUCCESS_MSG, + session_inactivity_setting=resp.json_data, changed=True) + + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except ( + IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, + OSError) as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py new file mode 100644 index 00000000..381ef319 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_network_time +short_description: Updates the network time on OpenManage Enterprise +version_added: "2.1.0" +description: This module allows the configuration of network time on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + enable_ntp: + description: + - Enables or disables Network Time Protocol(NTP). + - If I(enable_ntp) is false, then the NTP addresses reset to their default values. + required: true + type: bool + system_time: + description: + - Time in the current system. + - This option is only applicable when I(enable_ntp) is false. + - This option must be provided in following format 'yyyy-mm-dd hh:mm:ss'. + type: str + time_zone: + description: + - The valid timezone ID to be used. + - This option is applicable for both system time and NTP time synchronization. + type: str + primary_ntp_address: + description: + - The primary NTP address. + - This option is applicable when I(enable_ntp) is true. + type: str + secondary_ntp_address1: + description: + - The first secondary NTP address. + - This option is applicable when I(enable_ntp) is true. + type: str + secondary_ntp_address2: + description: + - The second secondary NTP address. + - This option is applicable when I(enable_ntp) is true. + type: str +requirements: + - "python >= 3.8.6" +author: + - "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Configure system time + dellemc.openmanage.ome_application_network_time: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_ntp: false + system_time: "2020-03-31 21:35:18" + time_zone: "TZ_ID_11" + +- name: Configure NTP server for time synchronization + dellemc.openmanage.ome_application_network_time: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_ntp: true + time_zone: "TZ_ID_66" + primary_ntp_address: "192.168.0.2" + secondary_ntp_address1: "192.168.0.2" + secondary_ntp_address2: "192.168.0.4" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the network time configuration change. + returned: always + sample: "Successfully configured network time." +proxy_configuration: + type: dict + description: Updated application network time configuration. + returned: success + sample: { + "EnableNTP": false, + "JobId": null, + "PrimaryNTPAddress": null, + "SecondaryNTPAddress1": null, + "SecondaryNTPAddress2": null, + "SystemTime": null, + "TimeSource": "Local Clock", + "TimeZone": "TZ_ID_1", + "TimeZoneIdLinux": null, + "TimeZoneIdWindows": null, + "UtcTime": null + } +error_info: + description: Details of the HTTP error. + returned: on HTTP error + type: dict + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to complete the request because the input value + for SystemTime is missing or an invalid value is entered.", + "MessageArgs": [ + "SystemTime" + ], + "MessageId": "CGEN6002", + "RelatedProperties": [], + "Resolution": "Enter a valid value and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +TIME_CONFIG = "ApplicationService/Network/TimeConfiguration" +TIME_ZONE = "ApplicationService/Network/TimeZones" + + +def remove_unwanted_keys(key_list, payload): + [payload.pop(key) for key in key_list if key in payload] + + +def get_payload(module): + params = module.params + proxy_payload_map = { + "enable_ntp": "EnableNTP", + "time_zone": "TimeZone", + "system_time": "SystemTime", + "primary_ntp_address": "PrimaryNTPAddress", + "secondary_ntp_address1": "SecondaryNTPAddress1", + "secondary_ntp_address2": "SecondaryNTPAddress2" + } + backup_params = params.copy() + remove_keys = ["hostname", "username", "password", "port", "ca_path", "validate_certs", "timeout"] + remove_unwanted_keys(remove_keys, backup_params) + payload = dict([(proxy_payload_map[key], val) for key, val in backup_params.items() if val is not None]) + return payload + + +def update_time_config_output(back_up_settings): + remove_keys = ["@odata.context", "@odata.type", "@odata.id"] + remove_unwanted_keys(remove_keys, back_up_settings) + back_up_settings.update({"JobId": None}) + + +def get_updated_payload(rest_obj, module, payload): + remove_keys = ["@odata.context", "@odata.type", "@odata.id", "TimeZoneIdLinux", "TimeZoneIdWindows", "TimeSource", "UtcTime"] + resp = rest_obj.invoke_request("GET", TIME_CONFIG, api_timeout=150) + current_setting = resp.json_data + back_up_settings = current_setting.copy() + remove_unwanted_keys(remove_keys, current_setting) + diff = any(key in current_setting and val != current_setting[key] for key, val in payload.items()) + if module.check_mode: + if diff: + module.exit_json(changed=True, msg="Changes found to be applied to the time configuration.") + else: + module.exit_json(changed=False, msg="No changes found to be applied to the time configuration.") + else: + if diff: + current_setting.update(payload) + else: + update_time_config_output(back_up_settings) + module.exit_json(changed=False, msg="No changes made to the time configuration as the entered" + " values are the same as the current configuration.", time_configuration=back_up_settings) + return current_setting + + +def validate_time_zone(module, rest_obj): + params = module.params + time_zone = params.get("time_zone", None) + if time_zone is not None: + time_zone_resp = rest_obj.invoke_request("GET", TIME_ZONE) + time_zone_val = time_zone_resp.json_data["value"] + time_id_list = [time_dict["Id"] for time_dict in time_zone_val] + if time_zone not in time_id_list: + sorted_time_id_list = sorted(time_id_list, key=lambda time_id: [int(i) for i in time_id.split("_") if i.isdigit()]) + module.fail_json(msg="Provide valid time zone.Choices are {0}".format(",".join(sorted_time_id_list))) + + +def validate_input(module): + system_time = module.params.get("system_time") + enable_ntp = module.params["enable_ntp"] + primary_ntp_address = module.params.get("primary_ntp_address") + secondary_ntp_address1 = module.params.get("secondary_ntp_address1") + secondary_ntp_address2 = module.params.get("secondary_ntp_address2") + if enable_ntp is True and system_time is not None: + module.fail_json(msg="When enable NTP is true,the option system time is not accepted.") + if enable_ntp is False and any([primary_ntp_address, secondary_ntp_address1, secondary_ntp_address2]): + module.fail_json(msg="When enable NTP is false,the option(s) primary_ntp_address, secondary_ntp_address1 and secondary_ntp_address2 is not accepted.") + + +def main(): + specs = { + "enable_ntp": {"required": True, "type": "bool"}, + "time_zone": {"required": False, "type": "str"}, + "system_time": {"required": False, "type": "str"}, + "primary_ntp_address": {"required": False, "type": "str"}, + "secondary_ntp_address1": {"required": False, "type": "str"}, + "secondary_ntp_address2": {"required": False, "type": "str"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[['enable_ntp', False, ('time_zone', 'system_time',), True], + ['enable_ntp', True, ('time_zone', 'primary_ntp_address', + 'secondary_ntp_address1', 'secondary_ntp_address2'), True]], + mutually_exclusive=[['system_time', 'primary_ntp_address'], + ['system_time', 'secondary_ntp_address1'], + ['system_time', 'secondary_ntp_address2']], + supports_check_mode=True, + ) + try: + validate_input(module) + with RestOME(module.params, req_session=False) as rest_obj: + validate_time_zone(module, rest_obj) + payload = get_payload(module) + updated_payload = get_updated_payload(rest_obj, module, payload) + resp = rest_obj.invoke_request("PUT", TIME_CONFIG, data=updated_payload, api_timeout=150) + module.exit_json(msg="Successfully configured network time.", time_configuration=resp.json_data, + changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py new file mode 100644 index 00000000..adee29dc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_network_webserver +short_description: Updates the Web server configuration on OpenManage Enterprise +version_added: "2.1.0" +description: This module allows to configure a network web server on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + webserver_port: + description: + - Port number used by OpenManage Enterprise to establish a secure server connection. + - "I(WARNING) A change in port number results in a loss of connectivity in the current session + for more than a minute." + type: int + webserver_timeout: + description: + - The duration in minutes after which a web user interface session is automatically disconnected. + - If a change is made to the session timeout, it will only take effect after the next log in. + type: int +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V(@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Update web server port and session time out + dellemc.openmanage.ome_application_network_webserver: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + webserver_port: 9443 + webserver_timeout: 20 + +- name: Update session time out + dellemc.openmanage.ome_application_network_webserver: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + webserver_timeout: 30 + +- name: Update web server port + dellemc.openmanage.ome_application_network_webserver: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + webserver_port: 8443 +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the network web server configuration change. + returned: always + sample: "Successfully updated network web server configuration." +webserver_configuration: + type: dict + description: Updated application network web server configuration. + returned: success + sample: { + "TimeOut": 20, + "PortNumber": 443, + "EnableWebServer": true + } +error_info: + description: Details of the HTTP error. + returned: on HTTP error + type: dict + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to complete the request because the input value + for PortNumber is missing or an invalid value is entered.", + "MessageArgs": [ + "PortNumber" + ], + "MessageId": "CGEN6002", + "RelatedProperties": [], + "Resolution": "Enter a valid value and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +WEBSERVER_CONFIG = "ApplicationService/Network/WebServerConfiguration" + + +def get_updated_payload(rest_obj, module): + params = module.params + resp = rest_obj.invoke_request("GET", WEBSERVER_CONFIG) + current_setting = resp.json_data + port_changed = 0 + # Remove odata keys ["@odata.context", "@odata.type", "@odata.id"] + cp = current_setting.copy() + klist = cp.keys() + for k in klist: + if str(k).lower().startswith('@odata'): + current_setting.pop(k) + diff = 0 + webserver_payload_map = { + "webserver_port": "PortNumber", + "webserver_timeout": "TimeOut", + } + for config, pload in webserver_payload_map.items(): + pval = params.get(config) + if pval is not None: + if current_setting.get(pload) != pval: + current_setting[pload] = pval + if pload == "PortNumber": + port_changed = pval + diff += 1 + if diff == 0: # Idempotency + if module.check_mode: + module.exit_json(msg="No changes found to be applied to the web server.") + module.exit_json( + msg="No changes made to the web server configuration as the entered" + " values are the same as the current configuration.", webserver_configuration=current_setting) + if module.check_mode: + module.exit_json(changed=True, msg="Changes found to be applied to the web server.") + return current_setting, port_changed + + +def main(): + specs = { + "webserver_port": {"required": False, "type": "int"}, + "webserver_timeout": {"required": False, "type": "int"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[["webserver_port", "webserver_timeout"]], + supports_check_mode=True + ) + + port_change = False + try: + with RestOME(module.params, req_session=False) as rest_obj: + updated_payload, port_change = get_updated_payload(rest_obj, module) + msg = "Successfully updated network web server configuration." + resp = rest_obj.invoke_request("PUT", WEBSERVER_CONFIG, data=updated_payload) + module.exit_json(msg=msg, webserver_configuration=resp.json_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except SSLError as err: + if port_change: + module.exit_json(msg="{0} Port has changed to {1}.".format(msg, port_change), + webserver_configuration=updated_payload, changed=True) + else: + module.fail_json(msg=str(err)) + except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py new file mode 100644 index 00000000..d2b23c25 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py @@ -0,0 +1,360 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_application_security_settings +short_description: Configure the login security properties +description: This module allows you to configure the login security properties on OpenManage Enterprise or OpenManage Enterprise Modular +version_added: "4.4.0" +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + restrict_allowed_ip_range: + description: + - Restrict to allow inbound connections only from the specified IP address range. + - This is mutually exclusive with I(fips_mode_enable). + - "C(NOTE) When I(restrict_allowed_ip_range) is configured on the appliance, any inbound connection to the appliance, + such as alert reception, firmware update, and network identities are blocked from the devices that are + outside the specified IP address range. However, any outbound connection from the appliance will work on all devices." + type: dict + suboptions: + enable_ip_range: + description: Allow connections based on the IP address range. + type: bool + required: true + ip_range: + description: "The IP address range in Classless Inter-Domain Routing (CIDR) format. + For example: 192.168.100.14/24 or 2001:db8::/24" + type: str + login_lockout_policy: + description: + - Locks the application after multiple unsuccessful login attempts. + - This is mutually exclusive with I(fips_mode_enable). + type: dict + suboptions: + by_user_name: + description: "Enable or disable lockout policy settings based on the user name. This restricts the number of + unsuccessful login attempts from a specific user for a specific time interval." + type: bool + by_ip_address: + description: "Enable or disable lockout policy settings based on the IP address. This restricts the number of + unsuccessful login attempts from a specific IP address for a specific time interval." + type: bool + lockout_fail_count: + description: "The number of unsuccessful login attempts that are allowed after which the appliance prevents log + in from the specific username or IP Address." + type: int + lockout_fail_window: + description: "Lockout fail window is the time in seconds within which the lockout fail count event must occur to + trigger the lockout penalty time. Enter the duration for which OpenManage Enterprise must display information + about a failed attempt." + type: int + lockout_penalty_time: + description: "The duration of time, in seconds, that login attempts from the specific user or IP address must + not be allowed." + type: int + job_wait: + description: + - Provides an option to wait for job completion. + type: bool + default: true + job_wait_timeout: + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 120 + fips_mode_enable: + description: + - "The FIPS mode is intended to meet the requirements of FIPS 140-2 level 1. For more information refer to the FIPS + user guide" + - This is applicable only for OpenManage Enterprise Modular only + - This is mutually exclusive with I(restrict_allowed_ip_range) and I(login_lockout_policy). + - "C(WARNING) Enabling or Disabling this option resets your chassis to default settings. This may cause change in + IP settings and loss of network connectivity." + - "C(WARNING) The FIPS mode cannot be enabled on a lead chassis in a multi-chassis management configuration. To toggle + enable FIPS on a lead chassis, delete the chassis group, enable FIPS and recreate the group." + - "C(WARNING) For a Standalone or member chassis, enabling the FIPS mode deletes any fabrics created. This may cause + loss of network connectivity and data paths to the compute sleds." + type: bool +author: + - Jagadeesh N V(@jagadeeshnv) +requirements: + - "python >= 3.8.6" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Configure restricted allowed IP range + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + restrict_allowed_ip_range: + enable_ip_range: true + ip_range: 192.1.2.3/24 + +- name: Configure login lockout policy + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + login_lockout_policy: + by_user_name: true + by_ip_address: true + lockout_fail_count: 3 + lockout_fail_window: 30 + lockout_penalty_time: 900 + +- name: Configure restricted allowed IP range and login lockout policy with job wait time out of 60 seconds + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + restrict_allowed_ip_range: + enable_ip_range: true + ip_range: 192.1.2.3/24 + login_lockout_policy: + by_user_name: true + by_ip_address: true + lockout_fail_count: 3 + lockout_fail_window: 30 + lockout_penalty_time: 900 + job_wait_timeout: 60 + +- name: Enable FIPS mode + dellemc.openmanage.ome_application_security_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fips_mode_enable: yes +''' + +RETURN = r''' +--- +msg: + description: Overall status of the login security configuration. + returned: always + type: str + sample: "Successfully applied the security settings." +job_id: + description: Job ID of the security configuration task. + returned: When security configuration properties are provided + type: int + sample: 10123 +error_info: + type: dict + description: Details of http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to process the request because the domain information cannot be retrieved.", + "MessageArgs": [], + "MessageId": "CGEN8007", + "RelatedProperties": [], + "Resolution": "Verify the status of the database and domain configuration, and then retry the + operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } +} +''' + +GET_SETTINGS = "ApplicationService/Actions/ApplicationService.GetConfiguration" +SET_SETTINGS = "ApplicationService/Actions/ApplicationService.ApplyConfiguration" +FIPS_MODE = "ApplicationService/Security/SecurityConfiguration" +JOB_EXEC_HISTORY = "JobService/Jobs({job_id})/ExecutionHistories" +SEC_JOB_TRIGGERED = "Successfully triggered the job to apply security settings." +SEC_JOB_COMPLETE = "Successfully applied the security settings." +FIPS_TOGGLED = "Successfully {0} the FIPS mode." +FIPS_CONN_RESET = "The network connection may have changed. Verify the connection and try again." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +SETTLING_TIME = 2 +JOB_POLL_INTERVAL = 3 + +import json +import time +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def fips_mode_enable(module, rest_obj): + resp = rest_obj.invoke_request("GET", FIPS_MODE) + fips_payload = resp.json_data + curr_fips_mode = fips_payload.get("FipsMode") + if module.params.get("fips_mode_enable") is True: + fips_mode = "ON" + else: + fips_mode = "OFF" + if curr_fips_mode.lower() == fips_mode.lower(): + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + payload = rest_obj.strip_substr_dict(fips_payload) + payload["FipsMode"] = fips_mode + rest_obj.invoke_request("PUT", FIPS_MODE, data=payload) + module.exit_json(msg=FIPS_TOGGLED.format("disabled" if fips_mode == "OFF" else "enabled"), changed=True) + + +def get_security_payload(rest_obj): + resp = rest_obj.invoke_request("POST", GET_SETTINGS, data={}) + full_set = resp.json_data + comps = full_set.get("SystemConfiguration", {}).get("Components", [{"Attributes": []}]) + attribs = comps[0].get("Attributes") + attr_dict = dict( + [(sys.get('Name'), sys.get("Value")) for sys in attribs if "loginsecurity" in sys.get('Name').lower()]) + return full_set, attr_dict + + +def compare_merge(module, attr_dict): + val_map = { + "ip_range": "LoginSecurity.1#IPRangeAddr", + "enable_ip_range": "LoginSecurity.1#IPRangeEnable", + "by_ip_address": "LoginSecurity.1#LockoutByIPEnable", + "by_user_name": "LoginSecurity.1#LockoutByUsernameEnable", + "lockout_fail_count": "LoginSecurity.1#LockoutFailCount", + "lockout_fail_window": "LoginSecurity.1#LockoutFailCountTime", + "lockout_penalty_time": "LoginSecurity.1#LockoutPenaltyTime" + } + diff = 0 + inp_dicts = ["restrict_allowed_ip_range", "login_lockout_policy"] + for d in inp_dicts: + inp_dict = module.params.get(d, {}) + if inp_dict: + for k, v in inp_dict.items(): + if v is not None: + if attr_dict[val_map[k]] != v: + attr_dict[val_map[k]] = v + diff = diff + 1 + if attr_dict.get("LoginSecurity.1#IPRangeEnable") is False: + if attr_dict.get("LoginSecurity.1#IPRangeAddr") is not None: + attr_dict["LoginSecurity.1#IPRangeAddr"] = None + diff = diff - 1 + if not diff: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + return attr_dict + + +def get_execution_details(rest_obj, job_id, job_message): + try: + resp = rest_obj.invoke_request('GET', JOB_EXEC_HISTORY.format(job_id=job_id)) + ex_hist = resp.json_data.get('value') + # Sorting based on startTime and to get latest execution instance. + tmp_dict = dict((x["StartTime"], x["Id"]) for x in ex_hist) + sorted_dates = sorted(tmp_dict.keys()) + ex_url = JOB_EXEC_HISTORY.format(job_id=job_id) + "({0})/ExecutionHistoryDetails".format(tmp_dict[sorted_dates[-1]]) + resp = rest_obj.invoke_request('GET', ex_url) + ex_hist = resp.json_data.get('value') + message = job_message + if len(ex_hist) > 0: + message = ex_hist[0].get("Value") + except Exception: + message = job_message + message = message.replace('\n', '. ') + return message + + +def exit_settings(module, rest_obj, job_id): + msg = SEC_JOB_TRIGGERED + time.sleep(SETTLING_TIME) + if module.params.get("job_wait"): + job_failed, job_message = rest_obj.job_tracking( + job_id=job_id, job_wait_sec=module.params["job_wait_timeout"], sleep_time=JOB_POLL_INTERVAL) + if job_failed is True: + job_message = get_execution_details(rest_obj, job_id, job_message) + module.exit_json(msg=job_message, failed=True, job_id=job_id) + msg = SEC_JOB_COMPLETE + module.exit_json(msg=msg, job_id=job_id, changed=True) + + +def login_security_setting(module, rest_obj): + security_set, attr_dict = get_security_payload(rest_obj) + new_attr_dict = compare_merge(module, attr_dict) + comps = security_set.get("SystemConfiguration", {}).get("Components", [{"Attributes": []}]) + comps[0]["Attributes"] = [{"Name": k, "Value": v} for k, v in new_attr_dict.items()] + resp = rest_obj.invoke_request("POST", SET_SETTINGS, data=security_set) + job_id = resp.json_data.get("JobId") + exit_settings(module, rest_obj, job_id) + + +def main(): + specs = { + "restrict_allowed_ip_range": { + "type": 'dict', "options": { + "enable_ip_range": {"type": 'bool', "required": True}, + "ip_range": {"type": 'str'} + }, + "required_if": [("enable_ip_range", True, ("ip_range",))] + }, + "login_lockout_policy": { + "type": 'dict', "options": { + "by_user_name": {"type": 'bool'}, + "by_ip_address": {"type": 'bool'}, + "lockout_fail_count": {"type": 'int'}, + "lockout_fail_window": {"type": 'int'}, + "lockout_penalty_time": {"type": 'int'} + }, + "required_one_of": [("by_user_name", "by_ip_address", "lockout_fail_count", + "lockout_fail_window", "lockout_penalty_time")] + }, + "fips_mode_enable": {"type": 'bool'}, + "job_wait": {"type": 'bool', "default": True}, + "job_wait_timeout": {"type": 'int', "default": 120} + } + specs.update(ome_auth_params) + + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[("fips_mode_enable", "login_lockout_policy"), + ("fips_mode_enable", "restrict_allowed_ip_range")], + required_one_of=[("restrict_allowed_ip_range", "login_lockout_policy", "fips_mode_enable")], + supports_check_mode=True) + + try: + with RestOME(module.params, req_session=True) as rest_obj: + if module.params.get("fips_mode_enable") is not None: + fips_mode_enable(module, rest_obj) + else: + login_security_setting(module, rest_obj) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py new file mode 100644 index 00000000..6b89fea1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py @@ -0,0 +1,611 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_chassis_slots +short_description: Rename sled slots on OpenManage Enterprise Modular +description: "This module allows to rename sled slots on OpenManage Enterprise Modular either using device id or device +service tag or using chassis service tag and slot number." +version_added: "3.6.0" +author: + - Jagadeesh N V(@jagadeeshnv) +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_options: + type: list + elements: dict + description: + - The ID or service tag of the sled in the slot and the new name for the slot. + - I(device_options) is mutually exclusive with I(slot_options). + suboptions: + device_id: + type: int + description: + - Device ID of the sled in the slot. + - This is mutually exclusive with I(device_service_tag). + device_service_tag: + type: str + description: + - Service tag of the sled in the slot. + - This is mutually exclusive with I(device_id). + slot_name: + type: str + description: Provide name for the slot. + required: True + slot_options: + type: list + elements: dict + description: + - The service tag of the chassis, slot number of the slot to be renamed, and the new name for the slot. + - I(slot_options) is mutually exclusive with I(device_options). + suboptions: + chassis_service_tag: + type: str + description: Service tag of the chassis. + required: True + slots: + type: list + elements: dict + description: + - The slot number and the new name for the slot. + required: true + suboptions: + slot_number: + type: int + description: The slot number of the slot to be renamed. + required: True + slot_name: + type: str + description: Provide name for the slot. + required: True +requirements: + - "python >= 3.8.6" +notes: + - "This module initiates the refresh inventory task. It may take a minute for new names to be reflected. + If the task exceeds 300 seconds to refresh, the task times out." + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Rename the slots in multiple chassis using slot number and chassis service tag + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + slot_options: + - chassis_service_tag: ABC1234 + slots: + - slot_number: 1 + slot_name: sled_name_1 + - slot_number: 2 + slot_name: sled_name_2 + - chassis_service_tag: ABC1235 + slots: + - slot_number: 1 + slot_name: sled_name_1 + - slot_number: 2 + slot_name: sled_name_2 + +- name: Rename single slot name of the sled using sled ID + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_id: 10054 + slot_name: slot_device_name_1 + +- name: Rename single slot name of the sled using sled service tag + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_service_tag: ABC1234 + slot_name: service_tag_slot + +- name: Rename multiple slot names of the devices + dellemc.openmanage.ome_chassis_slots: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_options: + - device_id: 10054 + slot_name: sled_name_1 + - device_service_tag: ABC1234 + slot_name: sled_name_2 + - device_id: 10055 + slot_name: sled_name_3 + - device_service_tag: PQR1234 + slot_name: sled_name_4 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the slot rename operation. + returned: always + sample: "Successfully renamed the slot(s)." +slot_info: + description: + - Information of the slots that are renamed successfully. + - The C(DeviceServiceTag) and C(DeviceId) options are available only if I(device_options) is used. + - C(NOTE) Only the slots which were renamed are listed. + type: list + elements: dict + returned: if at least one slot renamed + sample: [ + { + "ChassisId": 10053, + "ChassisServiceTag": "ABCD123", + "DeviceName": "", + "DeviceType": 1000, + "JobId": 15746, + "SlotId": "10072", + "SlotName": "slot_op2", + "SlotNumber": "6", + "SlotType": 2000 + }, + { + "ChassisId": 10053, + "ChassisName": "MX-ABCD123", + "ChassisServiceTag": "ABCD123", + "DeviceType": "3000", + "JobId": 15747, + "SlotId": "10070", + "SlotName": "slot_op2", + "SlotNumber": "4", + "SlotType": "2000" + }, + { + "ChassisId": "10053", + "ChassisName": "MX-PQRS123", + "ChassisServiceTag": "PQRS123", + "DeviceId": "10054", + "DeviceServiceTag": "XYZ5678", + "DeviceType": "1000", + "JobId": 15761, + "SlotId": "10067", + "SlotName": "a1", + "SlotNumber": "1", + "SlotType": "2000" + } + ] +rename_failed_slots: + description: + - Information of the valid slots that are not renamed. + - C(JobStatus) is shown if rename job fails. + - C(NOTE) Only slots which were not renamed are listed. + type: list + elements: dict + returned: if at least one slot renaming fails + sample: [ + { + "ChassisId": "12345", + "ChassisName": "MX-ABCD123", + "ChassisServiceTag": "ABCD123", + "DeviceType": "4000", + "JobId": 1234, + "JobStatus": "Aborted", + "SlotId": "10061", + "SlotName": "c2", + "SlotNumber": "1", + "SlotType": "4000" + }, + { + "ChassisId": "10053", + "ChassisName": "MX-PQRS123", + "ChassisServiceTag": "PQRS123", + "DeviceType": "1000", + "JobId": 0, + "JobStatus": "HTTP Error 400: Bad Request", + "SlotId": "10069", + "SlotName": "b2", + "SlotNumber": "3", + "SlotType": "2000" + } + ] +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CGEN1014", + "RelatedProperties": [], + "Message": "Unable to complete the operation because an invalid value is entered for the property + Invalid json type: STRING for Edm.Int64 property: Id .", + "MessageArgs": [ + "Invalid json type: STRING for Edm.Int64 property: Id" + ], + "Severity": "Critical", + "Resolution": "Enter a valid value for the property and retry the operation. For more information about + valid values, see the OpenManage Enterprise-Modular User's Guide available on the support site." + } + ] + } +} +""" + +import json +import time +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.common.dict_transformations import recursive_diff + +DEVICE_URI = "DeviceService/Devices" +JOB_URI = "JobService/Jobs" +DEVICE_REPEATED = "Duplicate device entry found for devices with identifiers {0}." +INVALID_SLOT_DEVICE = "Unable to rename one or more slots because either the specified device is invalid or slots " \ + "cannot be configured. The devices for which the slots cannot be renamed are: {0}." +JOBS_TRIG_FAIL = "Unable to initiate the slot name rename jobs." +SUCCESS_MSG = "Successfully renamed the slot(s)." +SUCCESS_REFRESH_MSG = "The rename slot job(s) completed successfully. " \ + "For changes to reflect, refresh the inventory task manually." +FAILED_MSG = "Failed to rename {0} of {1} slot names." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +SLOT_JOB_DESC = "The rename slot task initiated from OpenManage Ansible Module collections" +REFRESH_JOB_DESC = "The refresh inventory task initiated from OpenManage Ansible Module collections" +CHASSIS_TAG_INVALID = "Provided chassis {0} is invalid." +INVALID_SLOT_NUMBERS = "Unable to rename one or more slots because the slot number(s) are invalid: {0}." +SLOT_NUM_DUP = "Slot numbers are repeated for chassis {0}." +CHASSIS_REPEATED = "Duplicate chassis entry found for chassis with service tags {0}." +SETTLING_TIME = 2 # time gap between so consecutive job trigger +JOB_TIMEOUT = 300 +JOB_INTERVAL = 5 + + +def get_device_slot_config(module, rest_obj): + ids, tags = {}, {} + dvc_list = [] + for dvc in module.params.get('device_options'): + sn = dvc.get('slot_name') + id = dvc.get('device_id') + st = dvc.get('device_service_tag') + if id: + ids[str(id)] = sn + dvc_list.append(str(id)) + else: + tags[st] = sn + dvc_list.append(st) + duplicate = [x for i, x in enumerate(dvc_list) if i != dvc_list.index(x)] + if duplicate: + module.fail_json(msg=DEVICE_REPEATED.format((';'.join(set(duplicate))))) + resp = rest_obj.get_all_items_with_pagination(DEVICE_URI) + devices = resp.get('value') + all_dvcs = {} + invalid_slots = set() + ident_map, name_map = {}, {} + for dvc in devices: + if not ids and not tags: + break + id = str(dvc.get('Id')) + tag = dvc.get('Identifier') + slot_cfg = dvc.get('SlotConfiguration') + all_dvcs[tag] = slot_cfg + if id in ids: + if not slot_cfg or not slot_cfg.get("SlotNumber"): + invalid_slots.add(id) + else: + ident_map[id] = tag + name_map[id] = slot_cfg['SlotName'] + slot_cfg['new_name'] = ids[id] + slot_cfg['DeviceServiceTag'] = tag + slot_cfg['DeviceId'] = id + if tag in tags: + if not slot_cfg or not slot_cfg.get("SlotNumber"): + invalid_slots.add(tag) + else: + ident_map[tag] = tag + name_map[tag] = slot_cfg['SlotName'] + slot_cfg['new_name'] = tags[tag] + slot_cfg['DeviceServiceTag'] = tag + slot_cfg['DeviceId'] = id + idf_list = list(ident_map.values()) + duplicate = [x for i, x in enumerate(idf_list) if i != idf_list.index(x)] + if duplicate: + module.fail_json(msg=DEVICE_REPEATED.format((';'.join(set(duplicate))))) + invalid_slots.update(set(ids.keys()) - set(ident_map.keys())) + invalid_slots.update(set(tags.keys()) - set(ident_map.keys())) + if invalid_slots: + module.fail_json(msg=INVALID_SLOT_DEVICE.format(';'.join(invalid_slots))) + slot_dict_diff = {} + id_diff = recursive_diff(ids, name_map) + if id_diff and id_diff[0]: + diff = dict([(int(k), all_dvcs[ident_map[k]]) for k, v in (id_diff[0]).items()]) + slot_dict_diff.update(diff) + tag_diff = recursive_diff(tags, name_map) + if tag_diff and tag_diff[0]: + diff = dict([(ident_map[k], all_dvcs[k]) for k, v in (tag_diff[0]).items()]) + slot_dict_diff.update(diff) + if not slot_dict_diff: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + return slot_dict_diff + + +def start_slot_name_jobs(rest_obj, slot_data): + slot_type = {'2000': "Sled Slot", '4000': "IO Module Slot", '2100': "Storage Sled"} + failed_jobs = {} + job_description = SLOT_JOB_DESC + job_type = {"Id": 3, "Name": "DeviceAction_Task"} + for k, slot in slot_data.items(): + job_params, target_param = [{"Key": "operationName", "Value": "UPDATE_SLOT_DATA"}], [] + num = slot.get('SlotNumber') + type_id = str(slot.get('SlotType')) + job_name = "Rename {0} {1}".format(slot_type.get(type_id, 'Slot'), num) + target_param.append({"Id": int(slot.get('ChassisId')), "Data": "", + "TargetType": {"Id": 1000, "Name": "DEVICE"}}) + slot_config = "{0}|{1}|{2}".format(num, type_id, slot.get('new_name')) + job_params.append({'Key': 'slotConfig', 'Value': slot_config}) + try: + job_resp = rest_obj.job_submission(job_name, job_description, target_param, + job_params, job_type) + slot['JobId'] = job_resp.json_data.get('Id', 0) + time.sleep(SETTLING_TIME) + except HTTPError as err: + slot['JobId'] = 0 + slot['JobStatus'] = str(err) + failed_jobs[k] = slot + [slot_data.pop(key) for key in failed_jobs.keys()] + return failed_jobs + + +def get_job_states(module, rest_obj, slot_data): + job_dict = dict([(slot['JobId'], k) for k, slot in slot_data.items() if slot['JobId']]) + query_params = {"$filter": "JobType/Id eq 3"} # optimize this + count = JOB_TIMEOUT // SETTLING_TIME + job_incomplete = [2050, 2030, 2040, 2080] # Running, Queued, Starting, New + while count > 0 and job_dict: + try: + job_resp = rest_obj.invoke_request("GET", JOB_URI, query_param=query_params) + jobs = job_resp.json_data.get('value') + except HTTPError: + count = count - 50 # 3 times retry for HTTP error + time.sleep(SETTLING_TIME) + continue + job_over = [] + for job in jobs: + id = job.get('Id') + if id in job_dict: + lrs = job.get('LastRunStatus') + slot = slot_data[job_dict[id]] + if lrs.get('Id') in job_incomplete: # Running, not failed, not completed state + job_over.append(False) + elif lrs.get('Id') == 2060: + job_over.append(True) + slot['SlotName'] = slot.pop('new_name') + job_dict.pop(id) + else: + slot['JobStatus'] = lrs.get('Name') + job_over.append(True) # Failed states - job not running + if all(job_over) or not job_dict: + break + count = count - 1 + time.sleep(SETTLING_TIME) + failed_jobs = dict([(k, slot_data.pop(k)) for k in job_dict.values()]) + return failed_jobs + + +def trigger_refresh_inventory(rest_obj, slot_data): + chassis_dict = dict([(slot['ChassisId'], slot['ChassisServiceTag']) for slot in slot_data.values()]) + jobs = [] + for chassis in chassis_dict: + job_type = {"Id": 8, "Name": "Inventory_Task"} + job_name = "Refresh Inventory Chassis {0}".format(chassis_dict[chassis]) + job_description = REFRESH_JOB_DESC + target_param = [{"Id": int(chassis), "Data": "''", "TargetType": {"Id": 1000, "Name": "DEVICE"}}] + job_params = [{"Key": "operationName", "Value": "EC_SLOT_DEVICE_INVENTORY_REFRESH"}] + job_resp = rest_obj.job_submission(job_name, job_description, target_param, job_params, job_type) + job_id = job_resp.json_data.get('Id') + jobs.append(int(job_id)) + time.sleep(SETTLING_TIME) + return jobs + + +def trigger_all_inventory_task(rest_obj): + job_type = {"Id": 8, "Name": "Inventory_Task"} + job_name = "Refresh Inventory All Devices" + job_description = REFRESH_JOB_DESC + target_param = [{"Id": 500, "Data": "All-Devices", "TargetType": {"Id": 6000, "Name": "GROUP"}}] + job_params = [{"Key": "defaultInventoryTask", "Value": "TRUE"}] + job_resp = rest_obj.job_submission(job_name, job_description, target_param, job_params, job_type) + job_id = job_resp.json_data.get('Id') + return job_id + + +def get_formatted_slotlist(slot_dict): + slot_list = list(slot_dict.values()) + req_tup = ('slot', 'job', 'chassis', 'device') + for slot in slot_list: + cp = slot.copy() + klist = cp.keys() + for k in klist: + if not str(k).lower().startswith(req_tup): + slot.pop(k) + return slot_list + + +def exit_slot_config(module, rest_obj, failed_jobs, invalid_jobs, slot_data): + failed_jobs.update(invalid_jobs) + if failed_jobs: + f = len(failed_jobs) + s = len(slot_data) + slot_info = get_formatted_slotlist(slot_data) + failed_jobs_list = get_formatted_slotlist(failed_jobs) + module.fail_json(msg=FAILED_MSG.format(f, s + f), + slot_info=slot_info, rename_failed_slots=failed_jobs_list) + if slot_data: + job_failed_list = [] + try: + rfrsh_job_list = trigger_refresh_inventory(rest_obj, slot_data) + for job in rfrsh_job_list: + job_failed, job_message = rest_obj.job_tracking( + job, job_wait_sec=JOB_TIMEOUT, sleep_time=JOB_INTERVAL) + job_failed_list.append(job_failed) + all_dv_rfrsh = trigger_all_inventory_task(rest_obj) + job_failed, job_message = rest_obj.job_tracking( + all_dv_rfrsh, job_wait_sec=JOB_TIMEOUT, sleep_time=JOB_INTERVAL) + job_failed_list.append(job_failed) + except Exception: # Refresh is secondary task hence not failing module + job_failed_list = [True] + if any(job_failed_list) is True: + slot_info = get_formatted_slotlist(slot_data) + failed_jobs_list = get_formatted_slotlist(failed_jobs) + module.exit_json(changed=True, msg=SUCCESS_REFRESH_MSG, slot_info=slot_info, + rename_failed_slots=failed_jobs_list) + slot_info = get_formatted_slotlist(slot_data) + module.exit_json(changed=True, msg=SUCCESS_MSG, slot_info=slot_info, + rename_failed_slots=list(failed_jobs.values())) + + +def get_device_type(rest_obj, type): + filter = {"$filter": "Type eq {0}".format(str(type))} + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param=filter) + return resp.json_data + + +def get_slot_data(module, rest_obj, ch_slots, chass_id): + uri = DEVICE_URI + "({0})/DeviceBladeSlots".format(chass_id) + chsvc_tag = ch_slots.get('chassis_service_tag') + resp = rest_obj.invoke_request("GET", uri) + blade_slots = resp.json_data.get('value') + if len(blade_slots) < 8: + # Storage type 3000 + resp = get_device_type(rest_obj, 3000) + storage = resp.get('value') + for stx in storage: + if stx.get('ChassisServiceTag') == chsvc_tag: + blade_slots.append(stx.get('SlotConfiguration')) + blade_dict = {} + for slot in blade_slots: + slot["ChassisId"] = chass_id + slot["ChassisServiceTag"] = chsvc_tag + if slot.get('Id'): + slot["SlotId"] = str(slot.get('Id')) + blade_dict[slot['SlotNumber']] = slot + rest_obj.strip_substr_dict(slot) + inp_slots = ch_slots.get('slots') + existing_dict = dict([(slot['SlotNumber'], slot['SlotName']) for slot in blade_slots]) + input_dict = dict([(str(slot['slot_number']), slot['slot_name']) for slot in inp_slots]) + invalid_slot_number = set(input_dict.keys()) - set(existing_dict.keys()) + if invalid_slot_number: + module.fail_json(msg=INVALID_SLOT_NUMBERS.format(';'.join(invalid_slot_number))) + if len(input_dict) < len(inp_slots): + module.fail_json(msg=SLOT_NUM_DUP.format(chsvc_tag)) + slot_dict_diff = {} + slot_diff = recursive_diff(input_dict, existing_dict) + if slot_diff and slot_diff[0]: + diff = {} + for k, v in (slot_diff[0]).items(): + blade_dict[k]['new_name'] = input_dict.get(k) + diff["{0}_{1}".format(chsvc_tag, k)] = blade_dict[k] + slot_dict_diff.update(diff) + return slot_dict_diff + + +def slot_number_config(module, rest_obj): + chslots = module.params.get("slot_options") + resp = get_device_type(rest_obj, 2000) + chassi_dict = dict([(chx['Identifier'], chx['Id']) for chx in resp.get('value')]) + slot_data = {} + input_chassi_list = list(chx.get('chassis_service_tag') for chx in chslots) + duplicate = [x for i, x in enumerate(input_chassi_list) if i != input_chassi_list.index(x)] + if duplicate: + module.fail_json(msg=CHASSIS_REPEATED.format((';'.join(set(duplicate))))) + for chx in chslots: + chsvc_tag = chx.get('chassis_service_tag') + if chsvc_tag not in chassi_dict.keys(): + module.fail_json(msg=CHASSIS_TAG_INVALID.format(chsvc_tag)) + slot_dict = get_slot_data(module, rest_obj, chx, chassi_dict[chsvc_tag]) + slot_data.update(slot_dict) + if not slot_data: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + return slot_data + + +def main(): + specs = { + "device_options": {"type": 'list', "elements": 'dict', + "options": { + "slot_name": {"required": True, 'type': 'str'}, + "device_id": {"type": 'int'}, + "device_service_tag": {"type": 'str'} + }, + "mutually_exclusive": [('device_id', 'device_service_tag')], + "required_one_of": [('device_id', 'device_service_tag')] + }, + "slot_options": {"type": 'list', "elements": 'dict', + "options": { + "chassis_service_tag": {"required": True, 'type': 'str'}, + "slots": {"required": True, "type": 'list', "elements": 'dict', + "options": { + "slot_number": {"required": True, 'type': 'int'}, + "slot_name": {"required": True, "type": 'str'} + }, + }, + }, + }, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[('slot_options', 'device_options')], + mutually_exclusive=[('slot_options', 'device_options')], + supports_check_mode=True + ) + + try: + with RestOME(module.params, req_session=True) as rest_obj: + if module.params.get("slot_options"): + slot_data = slot_number_config(module, rest_obj) + else: + slot_data = get_device_slot_config(module, rest_obj) + invalid_jobs = start_slot_name_jobs(rest_obj, slot_data) + failed_jobs = {} + if slot_data: + failed_jobs = get_job_states(module, rest_obj, slot_data) + else: + module.fail_json(msg=JOBS_TRIG_FAIL, rename_failed_slots=list(invalid_jobs.values())) + exit_slot_config(module, rest_obj, failed_jobs, invalid_jobs, slot_data) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py new file mode 100644 index 00000000..5cac7352 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py @@ -0,0 +1,842 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_configuration_compliance_baseline +short_description: Create, modify, and delete a configuration compliance baseline and remediate non-compliant devices on + OpenManage Enterprise +version_added: "3.2.0" +description: "This module allows to create, modify, and delete a configuration compliance baseline on OpenManage Enterprise. + This module also allows to remediate devices that are non-compliant with the baseline by changing the attributes of devices + to match with the associated baseline attributes." +extends_documentation_fragment: + - dellemc.openmanage.oment_auth_options +options: + command: + description: + - "C(create) creates a configuration baseline from an existing compliance template.C(create) supports + C(check_mode) or idempotency checking for only I(names)." + - "C(modify) modifies an existing baseline.Only I(names), I(description), I(device_ids), I(device_service_tags), + and I(device_group_names) can be modified" + - "I(WARNING) When a baseline is modified, the provided I(device_ids), I(device_group_names), and I(device_service_tags) + replaces the devices previously present in the baseline." + - C(delete) deletes the list of configuration compliance baselines based on the baseline name. Invalid baseline + names are ignored. + - "C(remediate) remediates devices that are non-compliant with the baseline by changing the attributes of devices + to match with the associated baseline attributes." + - "C(remediate) is performed on all the non-compliant devices if either I(device_ids), or I(device_service_tags) + is not provided." + choices: [create, modify, delete, remediate] + default: create + type: str + names: + description: + - Name(s) of the configuration compliance baseline. + - This option is applicable when I(command) is C(create), C(modify), or C(delete). + - Provide the list of configuration compliance baselines names that are supported when I(command) is C(delete). + required: true + type: list + elements: str + new_name: + description: + - New name of the compliance baseline to be modified. + - This option is applicable when I(command) is C(modify). + type: str + template_name: + description: + - Name of the compliance template for creating the compliance baseline(s). + - Name of the deployment template to be used for creating a compliance baseline. + - This option is applicable when I(command) is C(create) and is mutually exclusive with I(template_id). + type: str + template_id: + description: + - ID of the deployment template to be used for creating a compliance baseline. + - This option is applicable when I(command) is C(create) and is mutually exclusive with I(template_name). + type: int + device_ids: + description: + - IDs of the target devices. + - This option is applicable when I(command) is C(create), C(modify), or C(remediate), and is mutually exclusive + with I(device_service_tag) and I(device_group_names). + type: list + elements: int + device_service_tags: + description: + - Service tag of the target device. + - This option is applicable when I(command) is C(create), C(modify), or C(remediate) and is mutually exclusive with + I(device_ids) and I(device_group_names). + type: list + elements: str + device_group_names: + description: + - Name of the target device group. + - This option is applicable when I(command) is C(create), or C(modify) + and is mutually exclusive with I(device_ids) and I(device_service_tag). + type: list + elements: str + description: + description: + - Description of the compliance baseline. + - This option is applicable when I(command) is C(create), or C(modify). + type: str + job_wait: + description: + - Provides the option to wait for job completion. + - This option is applicable when I(command) is C(create), C(modify), or C(remediate). + type: bool + default: true + job_wait_timeout: + description: + - The maximum wait time of I(job_wait) in seconds.The job will only be tracked for this duration. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 10800 +requirements: + - "python >= 3.8.6" +author: "Sajna Shetty(@Sajna-Shetty)" +notes: + - This module supports C(check_mode). + - Ensure that the devices have the required licenses to perform the baseline compliance operations. +''' + +EXAMPLES = r''' +--- +- name: Create a configuration compliance baseline using device IDs + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + names: "baseline1" + template_name: "template1" + description: "description of baseline" + device_ids: + - 1111 + - 2222 + +- name: Create a configuration compliance baseline using device service tags + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + names: "baseline1" + template_id: 1234 + description: "description of baseline" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + +- name: Create a configuration compliance baseline using group names + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + names: "baseline2" + template_id: 2 + job_wait_timeout: 1000 + description: "description of baseline" + device_group_names: + - "Group1" + - "Group2" + +- name: Delete the configuration compliance baselines + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: delete + names: + - baseline1 + - baseline2 + +- name: Modify a configuration compliance baseline using group names + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: modify + names: "baseline1" + new_name: "baseline_update" + template_name: "template2" + description: "new description of baseline" + job_wait_timeout: 1000 + device_group_names: + - Group1 + +- name: Remediate specific non-compliant devices to a configuration compliance baseline using device IDs + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + device_ids: + - 1111 + +- name: Remediate specific non-compliant devices to a configuration compliance baseline using device service tags + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + +- name: Remediate all the non-compliant devices to a configuration compliance baseline + dellemc.openmanage.ome_configuration_compliance_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "remediate" + names: "baseline1" +''' + +RETURN = r''' +--- +msg: + description: Overall status of the configuration compliance baseline operation. + returned: always + type: str + sample: "Successfully created the configuration compliance baseline." +incompatible_devices: + description: Details of the devices which cannot be used to perform baseline compliance operations + returned: when I(device_service_tags) or I(device_ids) contains incompatible devices for C(create) or C(modify) + type: list + sample: [1234, 5678] +compliance_status: + description: Status of compliance baseline operation. + returned: when I(command) is C(create) or C(modify) + type: dict + sample: { + "Id": 13, + "Name": "baseline1", + "Description": null, + "TemplateId": 102, + "TemplateName": "one", + "TemplateType": 2, + "TaskId": 26584, + "PercentageComplete": "100", + "TaskStatus": 2070, + "LastRun": "2021-02-27 13:15:13.751", + "BaselineTargets": [ + { + "Id": 1111, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "ConfigComplianceSummary": { + "ComplianceStatus": "OK", + "NumberOfCritical": 0, + "NumberOfWarning": 0, + "NumberOfNormal": 0, + "NumberOfIncomplete": 0 + } + } +job_id: + description: + - Task ID created when I(command) is C(remediate). + returned: when I(command) is C(remediate) + type: int + sample: 14123 +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +import time +import re +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +COMPLIANCE_BASELINE = "TemplateService/Baselines" +REMEDIATE_BASELINE = "TemplateService/Actions/TemplateService.Remediate" +DELETE_COMPLIANCE_BASELINE = "TemplateService/Actions/TemplateService.RemoveBaseline" +MODIFY_COMPLIANCE_BASELINE = "api/TemplateService/Baselines({baseline_id})" +TEMPLATE_VIEW = "TemplateService/Templates" +DEVICE_VIEW = "DeviceService/Devices" +GROUP_VIEW = "GroupService/Groups" +OME_INFO = "ApplicationService/Info" +CONFIG_COMPLIANCE_URI = "TemplateService/Baselines({0})/DeviceConfigComplianceReports" +INVALID_DEVICES = "{identifier} details are not available." +TEMPLATE_ID_ERROR_MSG = "Template with ID '{template_id}' not found." +TEMPLATE_NAME_ERROR_MSG = "Template '{template_name}' not found." +NAMES_ERROR = "Only delete operations accept multiple baseline names. All the other operations accept only a single " \ + "baseline name." +BASELINE_CHECK_MODE_CHANGE_MSG = "Baseline '{name}' already exists." +CHECK_MODE_CHANGES_MSG = "Changes found to be applied." +CHECK_MODE_NO_CHANGES_MSG = "No changes found to be applied." +BASELINE_CHECK_MODE_NOCHANGE_MSG = "Baseline '{name}' does not exist." +CREATE_MSG = "Successfully created the configuration compliance baseline." +DELETE_MSG = "Successfully deleted the configuration compliance baseline(s)." +MODIFY_MSG = "Successfully modified the configuration compliance baseline." +TASK_PROGRESS_MSG = "The initiated task for the configuration compliance baseline is in progress." +INVALID_IDENTIFIER = "Target with {identifier} {invalid_val} not found." +IDEMPOTENCY_MSG = "The specified configuration compliance baseline details are the same as the existing settings." +INVALID_COMPLIANCE_IDENTIFIER = "Unable to complete the operation because the entered target {0} {1}" \ + " is not associated or complaint with the baseline '{2}'." +INVALID_TIME = "job_wait_timeout {0} is not valid." +REMEDIATE_MSG = "Successfully completed the remediate operation." +JOB_FAILURE_PROGRESS_MSG = "The initiated task for the configuration compliance baseline has failed." +NO_CAPABLE_DEVICES = "Target {0} contains devices which cannot be used for a baseline compliance operation." + + +def validate_identifiers(available_values, requested_values, identifier_types, module): + """ + Validate if requested group/device ids are valid + """ + val = set(requested_values) - set(available_values) + if val: + module.fail_json(msg=INVALID_IDENTIFIER.format(identifier=identifier_types, invalid_val=",".join(map(str, val)))) + + +def get_identifiers(available_identifiers_map, requested_values): + """ + Get the device id from service tag + or Get the group id from Group names + or get the id from baseline names + """ + id_list = [] + for key, val in available_identifiers_map.items(): + if val in requested_values: + id_list.append(key) + return id_list + + +def get_template_details(module, rest_obj): + """ + Validate the template. + """ + template_identifier = module.params.get('template_id') + query_param = {"$filter": "Id eq {0}".format(template_identifier)} + identifier = 'Id' + if not template_identifier: + template_identifier = module.params.get('template_name') + query_param = {"$filter": "Name eq '{0}'".format(template_identifier)} + identifier = 'Name' + resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param) + if resp.success and resp.json_data.get('value'): + template_list = resp.json_data.get('value', []) + for each_template in template_list: + if each_template.get(identifier) == template_identifier: + return each_template + if identifier == "Id": + module.fail_json(msg=TEMPLATE_ID_ERROR_MSG.format(template_id=template_identifier)) + else: + module.fail_json(msg=TEMPLATE_NAME_ERROR_MSG.format(template_name=template_identifier)) + + +def get_group_ids(module, rest_obj): + """ + Get the group ids + """ + params = module.params + resp_data = rest_obj.get_all_items_with_pagination(GROUP_VIEW) + values = resp_data["value"] + device_group_names_list = params.get("device_group_names") + final_target_list = [] + if values: + available_ids_tag_map = dict([(item["Id"], item["Name"]) for item in values]) + available_device_tags = available_ids_tag_map.values() + tags_identifier = "device_group_names" + validate_identifiers(available_device_tags, device_group_names_list, tags_identifier, module) + final_target_list = get_identifiers(available_ids_tag_map, device_group_names_list) + else: + module.fail_json(msg=INVALID_DEVICES.format(identifier="Group")) + return final_target_list + + +def get_device_capabilities(devices_list, identifier): + if identifier == "device_ids": + available_ids_capability_map = dict([(item["Id"], item.get("DeviceCapabilities", [])) for item in devices_list]) + else: + available_ids_capability_map = dict( + [(item["Identifier"], item.get("DeviceCapabilities", [])) for item in devices_list]) + capable_devices = [] + noncapable_devices = [] + for key, val in available_ids_capability_map.items(): + if 33 in val: + capable_devices.append(key) + else: + noncapable_devices.append(key) + return {"capable": capable_devices, "non_capable": noncapable_devices} + + +def get_device_ids(module, rest_obj): + """ + Get the requested device ids + """ + params = module.params + resp_data = rest_obj.get_all_report_details(DEVICE_VIEW) + values = resp_data["report_list"] + id_list = params.get("device_ids") + service_tags_list = params.get("device_service_tags") + final_target_list = [] + device_capability_map = {} + identifier = "device_ids" + if values: + available_ids_tag_map = dict([(item["Id"], item["Identifier"]) for item in values]) + if id_list: + available_device_ids = available_ids_tag_map.keys() + validate_identifiers(available_device_ids, id_list, "device_ids", module) + final_target_list = id_list + if service_tags_list: + available_device_tags = available_ids_tag_map.values() + validate_identifiers(available_device_tags, service_tags_list, "device_service_tags", module) + id_list = get_identifiers(available_ids_tag_map, service_tags_list) + identifier = "device_service_tags" + final_target_list = id_list + else: + module.fail_json(msg=INVALID_DEVICES.format(identifier="Device")) + if final_target_list: + device_capability_map = get_device_capabilities(values, identifier) + return final_target_list, device_capability_map + + +def validate_capability(module, device_capability_map): + """ + For any non capable devices return the module with failure with list of + non capable devices + """ + if module.params.get("device_ids"): + device_id_list = module.params.get("device_ids") + identifier_types = "device_ids" + else: + device_id_list = module.params.get("device_service_tags") + identifier_types = "device_service_tags" + capable_devices = set(device_id_list) & set(device_capability_map.get("capable", [])) + if len(capable_devices) == 0 or capable_devices and len(capable_devices) != len(device_id_list): + non_capable_devices = list(set(device_id_list) - capable_devices) + module.fail_json(msg=NO_CAPABLE_DEVICES.format(identifier_types), + incompatible_devices=non_capable_devices) + + +def create_payload(module, rest_obj): + """ + create the compliance baseline payload + """ + params = module.params + device_id_list = params.get("device_ids") + device_service_tags_list = params.get("device_service_tags") + group_service_tags_list = params.get("device_group_names") + final_target_list = [] + if device_id_list or device_service_tags_list: + device_id_list, device_capability_map = get_device_ids(module, rest_obj) + validate_capability(module, device_capability_map) + final_target_list = device_id_list + if group_service_tags_list: + group_id_list = get_group_ids(module, rest_obj) + final_target_list.extend(group_id_list) + payload = { + "Name": params["names"][0] + } + if module.params.get("template_id") or module.params.get("template_name"): + template = get_template_details(module, rest_obj) + payload["TemplateId"] = template["Id"] + if module.params.get("description"): + payload["Description"] = module.params["description"] + if final_target_list: + payload["BaselineTargets"] = [{"Id": item} for item in final_target_list] + return payload + + +def get_baseline_compliance_info(rest_obj, baseline_identifier_val, attribute="Id"): + """ + Get the baseline info for the created compliance baseline + """ + data = rest_obj.get_all_items_with_pagination(COMPLIANCE_BASELINE) + value = data["value"] + baseline_info = {} + for item in value: + if item[attribute] == baseline_identifier_val: + baseline_info = item + baseline_info.pop("@odata.type", None) + baseline_info.pop("@odata.id", None) + baseline_info.pop("DeviceConfigComplianceReports@odata.navigationLink", None) + break + return baseline_info + + +def track_compliance_task_completion(rest_obj, baseline_identifier_val, module): + """ + wait for the compliance configuration task to complete + """ + baseline_info = get_baseline_compliance_info(rest_obj, baseline_identifier_val) + command = module.params["command"] + if module.params.get("job_wait"): + wait_time = 5 + retries_count_limit = module.params["job_wait_timeout"] / wait_time + retries_count = 0 + time.sleep(wait_time) + if command == "create": + msg = CREATE_MSG + else: + msg = MODIFY_MSG + while retries_count <= retries_count_limit: + if baseline_info["PercentageComplete"] == "100": + break + retries_count += 1 + time.sleep(wait_time) + baseline_info = get_baseline_compliance_info(rest_obj, baseline_identifier_val) + if baseline_info["PercentageComplete"] != "100": + msg = TASK_PROGRESS_MSG + else: + msg = TASK_PROGRESS_MSG + return msg, baseline_info + + +def validate_create_baseline_idempotency(module, rest_obj): + """ + Idempotency check for compliance baseline create. + Return error message if baseline name already exists in the system + """ + name = module.params["names"][0] + baseline_info = get_baseline_compliance_info(rest_obj, name, attribute="Name") + if any(baseline_info): + module.exit_json(msg=BASELINE_CHECK_MODE_CHANGE_MSG.format(name=name), changed=False) + if not any(baseline_info) and module.check_mode: + module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True) + + +def create_baseline(module, rest_obj): + """ + Create the compliance baseline. + update the response by getting compliance info. + Note: The response is updated from GET info reason many attribute values are gving null + value. which can be retrieved by getting the created compliance info. + """ + payload = create_payload(module, rest_obj) + validate_create_baseline_idempotency(module, rest_obj) + resp = rest_obj.invoke_request('POST', COMPLIANCE_BASELINE, data=payload) + data = resp.json_data + compliance_id = data["Id"] + baseline_info = get_baseline_compliance_info(rest_obj, compliance_id) + if module.params.get("job_wait"): + job_failed, message = rest_obj.job_tracking(baseline_info["TaskId"], + job_wait_sec=module.params["job_wait_timeout"], + sleep_time=5) + baseline_updated_info = get_baseline_compliance_info(rest_obj, compliance_id) + if job_failed is True: + module.fail_json(msg=message, compliance_status=baseline_updated_info, changed=False) + else: + if "successfully" in message: + module.exit_json(msg=CREATE_MSG, compliance_status=baseline_updated_info, changed=True) + else: + module.exit_json(msg=message, compliance_status=baseline_updated_info, changed=False) + else: + module.exit_json(msg=TASK_PROGRESS_MSG, compliance_status=baseline_info, changed=True) + + +def validate_names(command, module): + """ + The command create, remediate and modify doest not supports more than one name + """ + names = module.params["names"] + if command != "delete" and len(names) > 1: + module.fail_json(msg=NAMES_ERROR) + + +def delete_idempotency_check(module, rest_obj): + delete_names = module.params["names"] + data = rest_obj.get_all_items_with_pagination(COMPLIANCE_BASELINE) + available_baseline_map = dict([(item["Id"], item["Name"]) for item in data["value"]]) + valid_names = set(delete_names) & set(available_baseline_map.values()) + valid_id_list = get_identifiers(available_baseline_map, valid_names) + if module.check_mode and len(valid_id_list) > 0: + module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True) + if len(valid_id_list) == 0: + module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG, changed=False) + return valid_id_list + + +def delete_compliance(module, rest_obj): + """ + Deletes the list of baselines + """ + valid_id_list = delete_idempotency_check(module, rest_obj) + rest_obj.invoke_request('POST', DELETE_COMPLIANCE_BASELINE, data={"BaselineIds": valid_id_list}) + module.exit_json(msg=DELETE_MSG, changed=True) + + +def compare_payloads(modify_payload, current_payload): + """ + :param modify_payload: payload created to update existing setting + :param current_payload: already existing payload for specified baseline + :return: bool - compare existing and requested setting values of baseline in case of modify operations + if both are same return True + """ + diff = False + for key, val in modify_payload.items(): + if current_payload is None or current_payload.get(key) is None: + return True + elif isinstance(val, dict): + if compare_payloads(val, current_payload.get(key)): + return True + elif val != current_payload.get(key): + return True + return diff + + +def idempotency_check_for_command_modify(current_payload, expected_payload, module): + """ + idempotency check in case of modify operation + :param current_payload: payload modify + :param expected_payload: already existing payload for specified. + :param module: ansible module object + :return: None + """ + payload_diff = compare_payloads(expected_payload, current_payload) + if module.check_mode: + if payload_diff: + module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True) + else: + module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG, changed=False) + elif not module.check_mode and not payload_diff: + module.exit_json(msg=IDEMPOTENCY_MSG, changed=False) + + +def modify_baseline(module, rest_obj): + name = module.params["names"][0] + baseline_info = get_baseline_compliance_info(rest_obj, name, attribute="Name") + if not any(baseline_info): + module.fail_json(msg=BASELINE_CHECK_MODE_NOCHANGE_MSG.format(name=name)) + current_payload = create_payload(module, rest_obj) + current_payload["Id"] = baseline_info["Id"] + if module.params.get("new_name"): + new_name = module.params.get("new_name") + if name != new_name: + baseline_info_new = get_baseline_compliance_info(rest_obj, new_name, attribute="Name") + if any(baseline_info_new): + module.fail_json(msg=BASELINE_CHECK_MODE_CHANGE_MSG.format(name=new_name)) + current_payload["Name"] = new_name + required_attributes = ["Id", "Name", "Description", "TemplateId", "BaselineTargets"] + existing_payload = dict([(key, val) for key, val in baseline_info.items() if key in required_attributes and val]) + if existing_payload.get("BaselineTargets"): + target = [{"Id": item["Id"]} for item in existing_payload["BaselineTargets"]] + existing_payload["BaselineTargets"] = target + idempotency_check_for_command_modify(existing_payload, current_payload, module) + existing_payload.update(current_payload) + baseline_update_uri = COMPLIANCE_BASELINE + "({baseline_id})".format(baseline_id=existing_payload["Id"]) + resp = rest_obj.invoke_request('PUT', baseline_update_uri, data=existing_payload) + data = resp.json_data + compliance_id = data["Id"] + baseline_info = get_baseline_compliance_info(rest_obj, compliance_id) + if module.params.get("job_wait"): + job_failed, message = rest_obj.job_tracking(baseline_info["TaskId"], + job_wait_sec=module.params["job_wait_timeout"], sleep_time=5) + baseline_updated_info = get_baseline_compliance_info(rest_obj, compliance_id) + if job_failed is True: + module.fail_json(msg=message, compliance_status=baseline_updated_info, changed=False) + else: + if "successfully" in message: + module.exit_json(msg=MODIFY_MSG, compliance_status=baseline_updated_info, changed=True) + else: + module.exit_json(msg=message, compliance_status=baseline_updated_info, changed=False) + else: + module.exit_json(msg=TASK_PROGRESS_MSG, compliance_status=baseline_info, changed=True) + + +def get_ome_version(rest_obj): + resp = rest_obj.invoke_request('GET', OME_INFO) + data = resp.json_data + return data["Version"] + + +def validate_remediate_idempotency(module, rest_obj): + name = module.params["names"][0] + baseline_info = get_baseline_compliance_info(rest_obj, name, attribute="Name") + if not any(baseline_info): + module.fail_json(msg=BASELINE_CHECK_MODE_NOCHANGE_MSG.format(name=name)) + valid_id_list, device_capability_map = get_device_ids(module, rest_obj) + compliance_reports = rest_obj.get_all_items_with_pagination(CONFIG_COMPLIANCE_URI.format(baseline_info["Id"])) + device_id_list = module.params.get("device_ids") + device_service_tags_list = module.params.get("device_service_tags") + if device_id_list: + compliance_report_map = dict([(item["Id"], item["ComplianceStatus"]) for item in compliance_reports["value"]]) + if not any(compliance_report_map): + module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG) + invalid_values = list(set(device_id_list) - set(compliance_report_map.keys())) + if invalid_values: + module.fail_json( + INVALID_COMPLIANCE_IDENTIFIER.format("device_ids", ",".join(map(str, invalid_values)), name)) + report_devices = list(set(device_id_list) & set(compliance_report_map.keys())) + noncomplaint_devices = [device for device in report_devices if compliance_report_map[device] == "NONCOMPLIANT" + or compliance_report_map[device] == 2] + elif device_service_tags_list: + compliance_report_map = dict( + [(item["ServiceTag"], item["ComplianceStatus"]) for item in compliance_reports["value"]]) + if not any(compliance_report_map): + module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG) + invalid_values = list(set(device_service_tags_list) - set(compliance_report_map.keys())) + if invalid_values: + module.fail_json( + INVALID_COMPLIANCE_IDENTIFIER.format("device_service_tags", ",".join(map(str, invalid_values)), name)) + report_devices = list(set(device_service_tags_list) & set(compliance_report_map.keys())) + service_tag_id_map = dict( + [(item["ServiceTag"], item["Id"]) for item in compliance_reports["value"]]) + noncomplaint_devices = [service_tag_id_map[device] for device in report_devices if compliance_report_map[device] == "NONCOMPLIANT" + or compliance_report_map[device] == 2] + else: + compliance_report_map = dict([(item["Id"], item["ComplianceStatus"]) for item in compliance_reports["value"]]) + if not any(compliance_report_map): + module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG) + noncomplaint_devices = [device for device, compliance_status in compliance_report_map.items() if + compliance_status == "NONCOMPLIANT" or compliance_status == 2] + if len(noncomplaint_devices) == 0: + module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG) + if module.check_mode and noncomplaint_devices: + module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True) + return noncomplaint_devices, baseline_info + + +def create_remediate_payload(noncomplaint_devices, baseline_info, rest_obj): + ome_version = get_ome_version(rest_obj) + payload = { + "Id": baseline_info["Id"], + "Schedule": { + "RunNow": True, + "RunLater": False + } + } + pattern = re.compile(r'(1|2|3)\.(0|1|2|3|4)\.?') + if pattern.match(ome_version): + payload["TargetIds"] = noncomplaint_devices + else: + payload["DeviceIds"] = noncomplaint_devices + return payload + + +def remediate_baseline(module, rest_obj): + noncomplaint_devices, baseline_info = validate_remediate_idempotency(module, rest_obj) + remediate_payload = create_remediate_payload(noncomplaint_devices, baseline_info, rest_obj) + resp = rest_obj.invoke_request('POST', REMEDIATE_BASELINE, data=remediate_payload) + job_id = resp.json_data + if module.params.get("job_wait"): + job_failed, message = rest_obj.job_tracking(job_id, job_wait_sec=module.params["job_wait_timeout"]) + if job_failed is True: + module.fail_json(msg=message, job_id=job_id, changed=False) + else: + if "successfully" in message: + module.exit_json(msg=REMEDIATE_MSG, job_id=job_id, changed=True) + else: + module.exit_json(msg=message, job_id=job_id, changed=False) + else: + module.exit_json(msg=TASK_PROGRESS_MSG, job_id=job_id, changed=True) + + +def validate_job_time(command, module): + """ + The command create, remediate and modify time validation + """ + job_wait = module.params["job_wait"] + if command != "delete" and job_wait: + job_wait_timeout = module.params["job_wait_timeout"] + if job_wait_timeout <= 0: + module.fail_json(msg=INVALID_TIME.format(job_wait_timeout)) + + +def compliance_operation(module, rest_obj): + command = module.params.get("command") + validate_names(command, module) + validate_job_time(command, module) + if command == "create": + create_baseline(module, rest_obj) + if command == "modify": + modify_baseline(module, rest_obj) + if command == "delete": + delete_compliance(module, rest_obj) + if command == "remediate": + remediate_baseline(module, rest_obj) + + +def main(): + specs = { + "command": {"default": "create", + "choices": ['create', 'modify', 'delete', 'remediate']}, + "names": {"required": True, "type": 'list', "elements": 'str'}, + "template_name": {"type": 'str'}, + "template_id": {"type": 'int'}, + "device_ids": {"required": False, "type": 'list', "elements": 'int'}, + "device_service_tags": {"required": False, "type": 'list', "elements": 'str'}, + "device_group_names": {"required": False, "type": 'list', "elements": 'str'}, + "description": {"type": 'str'}, + "job_wait": {"required": False, "type": 'bool', "default": True}, + "job_wait_timeout": {"required": False, "type": 'int', "default": 10800}, + "new_name": {"type": 'str'}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ['command', 'create', ['template_name', 'template_id'], True], + ['command', 'remediate', ['device_ids', 'device_service_tags', 'job_wait', 'job_wait_timeout'], True], + ['command', 'modify', + ['new_name', 'description', 'template_name', 'template_id', 'device_ids', 'device_service_tags', + 'device_group_names'], True], + ], + mutually_exclusive=[ + ('device_ids', 'device_service_tags'), + ('device_ids', 'device_group_names'), + ('device_service_tags', 'device_group_names'), + ('template_id', 'template_name')], + + supports_check_mode=True) + try: + with RestOME(module.params, req_session=True) as rest_obj: + compliance_operation(module, rest_obj) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py new file mode 100644 index 00000000..d96cd376 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ome_configuration_compliance_info +short_description: Device compliance report for devices managed in OpenManage Enterprise +version_added: "3.2.0" +description: This module allows the generation of a compliance report of a specific or all + of devices in a configuration compliance baseline. +extends_documentation_fragment: + - dellemc.openmanage.oment_auth_options +options: + baseline: + required: True + description: + - The name of the created baseline. + - A compliance report is generated even when the template is not associated with the baseline. + type: str + device_id: + required: False + description: + - The ID of the target device which is associated with the I(baseline). + type: int + device_service_tag: + required: False + description: + - The device service tag of the target device associated with the I(baseline). + - I(device_service_tag) is mutually exclusive with I(device_id). + type: str +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen A (@felixs88)" + - "Kritika Bhateja (@Kritika-Bhateja)" +notes: + - Run this module from a system that has direct access to Dell OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline. + dellemc.openmanage.ome_configuration_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + +- name: Retrieve the compliance report for a specific device associated with the baseline using the device ID. + dellemc.openmanage.ome_configuration_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + device_id: 10001 + +- name: Retrieve the compliance report for a specific device associated with the baseline using the device service tag. + dellemc.openmanage.ome_configuration_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline: baseline_name + device_service_tag: 2HFGH3 +''' + +RETURN = r''' +--- +msg: + type: str + description: Over all compliance report status. + returned: on error + sample: "Unable to complete the operation because the entered target baseline name 'baseline' is invalid." +compliance_info: + type: dict + description: Returns the compliance report information. + returned: success + sample: [{ + "ComplianceAttributeGroups": [{ + "Attributes": [], + "ComplianceReason": "One or more attributes on the target device(s) does not match the compliance template.", + "ComplianceStatus": 2, + "ComplianceSubAttributeGroups": [{ + "Attributes": [{ + "AttributeId": 75369, + "ComplianceReason": "Attribute has different value from template", + "ComplianceStatus": 3, + "CustomId": 0, + "Description": null, + "DisplayName": "Workload Profile", + "ExpectedValue": "HpcProfile", + "Value": "NotAvailable" + }], + "ComplianceReason": "One or more attributes on the target device(s) does not match the compliance template.", + "ComplianceStatus": 2, + "ComplianceSubAttributeGroups": [], + "DisplayName": "System Profile Settings", + "GroupNameId": 1 + }], + "DisplayName": "BIOS", + "GroupNameId": 1 + }], + "ComplianceStatus": "NONCOMPLIANT", + "DeviceName": "WIN-PLOV8MPIP40", + "DeviceType": 1000, + "Id": 25011, + "InventoryTime": "2021-03-18 00:01:57.809771", + "Model": "PowerEdge R7525", + "ServiceTag": "JHMBX53" + }] +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +BASELINE_URI = "TemplateService/Baselines" +CONFIG_COMPLIANCE_URI = "TemplateService/Baselines({0})/DeviceConfigComplianceReports" +COMPLIANCE_URI = "TemplateService/Baselines({0})/DeviceConfigComplianceReports({1})/DeviceComplianceDetails" + + +def validate_device(module, report, device_id=None, service_tag=None, base_id=None): + for each in report.get("value"): + if each["Id"] == device_id: + break + if each["ServiceTag"] == service_tag: + device_id = each["Id"] + break + else: + device_name = device_id if device_id is not None else service_tag + module.fail_json(msg="Unable to complete the operation because the entered " + "target device id or service tag '{0}' is invalid.".format(device_name)) + return device_id + + +def get_baseline_id(module, baseline_name, rest_obj): + report = rest_obj.get_all_report_details(BASELINE_URI) + base_id, template_id = None, None + for base in report["report_list"]: + if base["Name"] == baseline_name: + base_id = base["Id"] + template_id = base["TemplateId"] + break + else: + module.fail_json(msg="Unable to complete the operation because the entered " + "target baseline name '{0}' is invalid.".format(baseline_name)) + return base_id, template_id + + +def compliance_report(module, rest_obj): + baseline_name = module.params.get("baseline") + device_id = module.params.get("device_id") + device_service_tag = module.params.get("device_service_tag") + baseline_id, template_id = get_baseline_id(module, baseline_name, rest_obj) + report = [] + if device_id: + compliance_uri = COMPLIANCE_URI.format(baseline_id, device_id) + baseline_report = rest_obj.invoke_request("GET", compliance_uri) + if not baseline_report.json_data.get("ComplianceAttributeGroups") and template_id == 0: + module.fail_json(msg="The compliance report of the device not found as " + "there is no template associated with the baseline.") + device_compliance = baseline_report.json_data.get("ComplianceAttributeGroups") + else: + baseline_report = rest_obj.get_all_items_with_pagination(CONFIG_COMPLIANCE_URI.format(baseline_id)) + if device_service_tag: + device_id = validate_device(module, baseline_report, device_id=device_id, + service_tag=device_service_tag, base_id=baseline_id) + report = list(filter(lambda d: d['Id'] in [device_id], baseline_report.get("value"))) + else: + report = baseline_report.get("value") + device_compliance = report + if device_compliance: + for each in device_compliance: + compliance_uri = COMPLIANCE_URI.format(baseline_id, each["Id"]) + attr_group = rest_obj.invoke_request("GET", compliance_uri) + each["ComplianceAttributeGroups"] = attr_group.json_data.get("ComplianceAttributeGroups") + return device_compliance + + +def main(): + specs = { + "baseline": {"required": True, "type": "str"}, + "device_id": {"required": False, "type": "int"}, + "device_service_tag": {"required": False, "type": "str"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[["device_id", "device_service_tag"]], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + report = compliance_report(module, rest_obj) + module.exit_json(compliance_info=report) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py new file mode 100644 index 00000000..56c1def6 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py @@ -0,0 +1,526 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_device_group +short_description: Add or remove device(s) from a static device group on OpenManage Enterprise +version_added: "3.3.0" +description: This module allows to add or remove device(s) from a static device group on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.oment_auth_options +options: + state: + type: str + description: + - C(present) allows to add the device(s) to a static device group. + - C(absent) allows to remove the device(s) from a static device group. + choices: [present, absent] + default: present + name: + type: str + description: + - Name of the static group. + - I(name) is mutually exclusive with I(group_id). + group_id: + type: int + description: + - ID of the static device. + - I(group_id) is mutually exclusive with I(name). + device_ids: + type: list + elements: int + description: + - List of ID(s) of the device(s) to be added or removed from the device group. + - I(device_ids) is mutually exclusive with I(device_service_tags) and I(ip_addresses). + device_service_tags: + type: list + elements: str + description: + - List of service tag(s) of the device(s) to be added or removed from the device group. + - I(device_service_tags) is mutually exclusive with I(device_ids) and I(ip_addresses). + ip_addresses: + type: list + elements: str + description: + - List of IPs of the device(s) to be added or removed from the device group. + - I(ip_addresses) is mutually exclusive with I(device_ids) and I(device_service_tags). + - "Supported IP address range formats:" + - " - 192.35.0.1" + - " - 10.36.0.0-192.36.0.255" + - " - 192.37.0.0/24" + - " - fe80::ffff:ffff:ffff:ffff" + - " - fe80::ffff:192.0.2.0/125" + - " - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff" + - C(NOTE) Hostname is not supported. + - C(NOTE) I(ip_addresses) requires python's netaddr packages to work on IP Addresses. + - C(NOTE) This module reports success even if one of the IP addresses provided in the I(ip_addresses) list is + available in OpenManage Enterprise.The module reports failure only if none of the IP addresses provided in the + list are available in OpenManage Enterprise. +requirements: + - "python >= 3.8.6" + - "netaddr >= 0.7.19" +author: + - "Felix Stephen (@felixs88)" + - "Sajna Shetty(@Sajna-Shetty)" + - "Abhishek Sinha (@Abhishek-Dell)" +notes: + - Run this module from a system that has direct access to Dell OpenManage Enterprise. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Add devices to a static device group by using the group name and device IDs + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + device_ids: + - 11111 + - 11112 + - 11113 + +- name: Add devices to a static device group by using the group name and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + device_service_tags: + - GHRT2RL + - KJHDF3S + - LKIJNG6 + +- name: Add devices to a static device group by using the group ID and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + device_service_tags: + - GHRT2RL + - KJHDF3S + +- name: Add devices to a static device group by using the group name and IPv4 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Storage Services" + ip_addresses: + - 192.35.0.1 + - 192.35.0.5 + +- name: Add devices to a static device group by using the group ID and IPv6 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + ip_addresses: + - fe80::ffff:ffff:ffff:ffff + - fe80::ffff:ffff:ffff:2222 + +- name: Add devices to a static device group by using the group ID and supported IPv4 and IPv6 address formats. + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 12345 + ip_addresses: + - 192.35.0.1 + - 10.36.0.0-192.36.0.255 + - 192.37.0.0/24 + - fe80::ffff:ffff:ffff:ffff + - ::ffff:192.0.2.0/125 + - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff + +- name: Remove devices from a static device group by using the group name and device IDs + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + device_ids: + - 11111 + - 11112 + - 11113 + +- name: Remove devices from a static device group by using the group name and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + device_service_tags: + - GHRT2RL + - KJHDF3S + - LKIJNG6 + +- name: Remove devices from a static device group by using the group ID and device service tags + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + device_service_tags: + - GHRT2RL + - KJHDF3S + +- name: Remove devices from a static device group by using the group name and IPv4 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "Storage Services" + ip_addresses: + - 192.35.0.1 + - 192.35.0.5 + +- name: Remove devices from a static device group by using the group ID and IPv6 addresses + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + ip_addresses: + - fe80::ffff:ffff:ffff:ffff + - fe80::ffff:ffff:ffff:2222 + +- name: Remove devices from a static device group by using the group ID and supported IPv4 and IPv6 address formats. + dellemc.openmanage.ome_device_group: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + group_id: 12345 + ip_addresses: + - 192.35.0.1 + - 10.36.0.0-192.36.0.255 + - 192.37.0.0/24 + - fe80::ffff:ffff:ffff:ffff + - ::ffff:192.0.2.0/125 + - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff + +""" + + +RETURN = """ +--- +msg: + type: str + description: Overall status of the device group settings. + returned: always + sample: + - "Successfully added member(s) to the device group." +group_id: + type: int + description: ID of the group. + returned: success + sample: 21078 +ip_addresses_added: + type: list + description: IP Addresses which are added to the device group. + returned: success + sample: 21078 +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +try: + from netaddr import IPAddress, IPNetwork, IPRange + from netaddr.core import AddrFormatError + + HAS_NETADDR = True +except ImportError: + HAS_NETADDR = False + +GROUP_URI = "GroupService/Groups" +DEVICE_URI = "DeviceService/Devices" +ADD_MEMBER_URI = "GroupService/Actions/GroupService.AddMemberDevices" +REMOVE_MEMBER_URI = "GroupService/Actions/GroupService.RemoveMemberDevices" +ADD_STATIC_GROUP_MESSAGE = "Devices can be added only to the static device groups created using OpenManage Enterprise." +REMOVE_STATIC_GROUP_MESSAGE = "Devices can be removed only from the static device groups created using OpenManage Enterprise." +NETADDR_ERROR = "The module requires python's netaddr be installed on the ansible controller to work on IP Addresses." +INVALID_IP_FORMAT = "The format {0} of the IP address provided is not supported or invalid." +IP_NOT_EXISTS = "The IP addresses provided do not exist in OpenManage Enterprise." + + +def validate_group(group_resp, module, identifier, identifier_val): + if not group_resp: + module.fail_json(msg="Unable to complete the operation because the entered " + "target group {identifier} '{val}' is invalid.".format(identifier=identifier, + val=identifier_val)) + system_groups = group_resp["TypeId"] + membership_id = group_resp["MembershipTypeId"] + if system_groups != 3000 or (system_groups == 3000 and membership_id == 24): + msg = ADD_STATIC_GROUP_MESSAGE if module.params.get("state", "present") == "present" else \ + REMOVE_STATIC_GROUP_MESSAGE + module.fail_json(msg=msg) + + +def get_group_id(rest_obj, module): + group_name = module.params.get("name") + group_id = module.params.get("group_id") + if group_name is not None: + group_resp = rest_obj.invoke_request("GET", GROUP_URI, + query_param={"$filter": "Name eq '{0}'".format(group_name)}) + value = group_resp.json_data.get("value") + if value: + value = value[0] + else: + value = [] + validate_group(value, module, "name", group_name) + group_id = value["Id"] + + else: + uri = GROUP_URI + "(" + str(group_id) + ")" + try: + group_resp = rest_obj.invoke_request("GET", uri) + validate_group(group_resp.json_data, module, "Id", group_id) + except HTTPError: + validate_group({}, module, "Id", group_id) + return group_id + + +def get_all_ips(ip_addresses, module): + ip_addresses_list = [] + for ip in ip_addresses: + try: + if "/" in ip: + cidr_list = IPNetwork(ip) + ip_addresses_list.append(cidr_list) + elif "-" in ip and ip.count("-") == 1: + range_addr = ip.split("-") + range_list = IPRange(range_addr[0], range_addr[1]) + ip_addresses_list.append(range_list) + else: + single_ip = IPAddress(ip) + ip_addresses_list.append(single_ip) + except (AddrFormatError, ValueError): + module.fail_json(msg=INVALID_IP_FORMAT.format(ip)) + return ip_addresses_list + + +def get_device_id_from_ip(ip_addresses, device_list, module): + ip_map = dict( + [(each_device["DeviceManagement"][0]["NetworkAddress"], each_device["Id"]) for each_device in device_list + if each_device["DeviceManagement"]]) + device_id_list_map = {} + for available_ip, device_id in ip_map.items(): + for ip_formats in ip_addresses: + if isinstance(ip_formats, IPAddress): + try: + ome_ip = IPAddress(available_ip) + except AddrFormatError: + ome_ip = IPAddress(available_ip.replace(']', '').replace('[', '')) + if ome_ip == ip_formats: + device_id_list_map.update({device_id: str(ip_formats)}) + if not isinstance(ip_formats, IPAddress): + try: + ome_ip = IPAddress(available_ip) + except AddrFormatError: + ome_ip = IPAddress(available_ip.replace(']', '').replace('[', '')) + if ome_ip in ip_formats: + device_id_list_map.update({device_id: str(ome_ip)}) + if len(device_id_list_map) == 0: + module.fail_json(msg=IP_NOT_EXISTS) + return device_id_list_map + + +def get_device_id(rest_obj, module): + device_id_list = module.params.get("device_ids") + device_tag_list = module.params.get("device_service_tags") + ip_addresses = module.params.get("ip_addresses") + device_list = rest_obj.get_all_report_details(DEVICE_URI) + invalid, each_device_list, each_tag_to_id = [], [], [] + if device_id_list or device_tag_list: + if device_id_list: + key = "Id" + each_device_list = device_id_list + elif device_tag_list: + key = "DeviceServiceTag" + each_device_list = device_tag_list + + for each in each_device_list: + each_device = list(filter(lambda d: d[key] in [each], device_list["report_list"])) + if key == "DeviceServiceTag" and each_device: + each_tag_to_id.append(each_device[0]["Id"]) + if not each_device: + invalid.append(str(each)) + if invalid: + value = "id" if key == "Id" else "service tag" + module.fail_json(msg="Unable to complete the operation because the entered " + "target device {0}(s) '{1}' are invalid.".format(value, ",".join(set(invalid)))) + if each_tag_to_id: + each_device_list = each_tag_to_id + else: + all_ips = get_all_ips(ip_addresses, module) + each_device_list = get_device_id_from_ip(all_ips, device_list["report_list"], module) + key = "IPAddresses" + return each_device_list, key + + +def add_member_to_group(module, rest_obj, group_id, device_id, key): + group_device = rest_obj.get_all_report_details("{0}({1})/Devices".format(GROUP_URI, group_id)) + device_exists, device_not_exists, added_ips = [], [], [] + if key != "IPAddresses": + for each in device_id: + each_device = list(filter(lambda d: d["Id"] in [each], group_device["report_list"])) + if each_device: + tag_or_id = each_device[0][key] if key == "DeviceServiceTag" else each + device_exists.append(str(tag_or_id)) + else: + device_not_exists.append(each) + else: + already_existing_id = [] + for device in group_device["report_list"]: + if device["Id"] in device_id: + device_exists.append(device_id[device["Id"]]) + already_existing_id.append(device["Id"]) + device_not_exists = list(set(device_id.keys()) - set(already_existing_id)) + added_ips = [ip for d_id, ip in device_id.items() if d_id in device_not_exists] + if module.check_mode and device_not_exists: + module.exit_json(msg="Changes found to be applied.", changed=True, group_id=group_id) + elif module.check_mode and not device_not_exists: + module.exit_json(msg="No changes found to be applied.", group_id=group_id) + + if device_exists and not device_not_exists: + module.exit_json( + msg="No changes found to be applied.", + group_id=group_id + ) + payload = {"GroupId": group_id, "MemberDeviceIds": device_not_exists} + response = rest_obj.invoke_request("POST", ADD_MEMBER_URI, data=payload) + return response, added_ips + + +def get_current_member_of_group(rest_obj, group_id): + group_device = rest_obj.get_all_report_details("{0}({1})/Devices".format(GROUP_URI, group_id)) + + device_id_list = [each["Id"] for each in group_device["report_list"]] + return device_id_list + + +def remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list): + payload_device_list = [each_id for each_id in device_id if each_id in current_device_list] + + if module.check_mode and payload_device_list: + module.exit_json(msg="Changes found to be applied.", changed=True, group_id=group_id) + + if not payload_device_list: + module.exit_json(msg="No changes found to be applied.", group_id=group_id) + + payload = {"GroupId": group_id, "MemberDeviceIds": payload_device_list} + response = rest_obj.invoke_request("POST", REMOVE_MEMBER_URI, data=payload) + return response + + +def main(): + specs = { + "name": {"type": "str"}, + "group_id": {"type": "int"}, + "state": {"required": False, "type": "str", "choices": ["present", "absent"], "default": "present"}, + "device_service_tags": {"required": False, "type": "list", "elements": 'str'}, + "device_ids": {"required": False, "type": "list", "elements": 'int'}, + "ip_addresses": {"required": False, "type": "list", "elements": 'str'}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=( + ["state", "present", ("device_ids", "device_service_tags", "ip_addresses"), True], + ), + mutually_exclusive=( + ("name", "group_id"), + ("device_ids", "device_service_tags", "ip_addresses"), + ), + required_one_of=[("name", "group_id"), + ("device_ids", "device_service_tags", "ip_addresses")], + supports_check_mode=True + ) + + try: + if module.params.get("ip_addresses") and not HAS_NETADDR: + module.fail_json(msg=NETADDR_ERROR) + with RestOME(module.params, req_session=True) as rest_obj: + group_id = get_group_id(rest_obj, module) + device_id, key = get_device_id(rest_obj, module) + if module.params["state"] == "present": + response, added_ips = add_member_to_group(module, rest_obj, group_id, device_id, key) + if added_ips: + module.exit_json(msg="Successfully added member(s) to the device group.", + group_id=group_id, changed=True, ip_addresses_added=added_ips) + module.exit_json(msg="Successfully added member(s) to the device group.", + group_id=group_id, changed=True) + else: + current_device_list = get_current_member_of_group(rest_obj, group_id) + resp = remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list) + module.exit_json(msg="Successfully removed member(s) from the device group.", changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, + IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py new file mode 100644 index 00000000..846dd5e8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py @@ -0,0 +1,433 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2019-2022 Dell Inc. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. +# Other trademarks may be trademarks of their respective owners. +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_device_info +short_description: Retrieves the information of devices inventoried by OpenManage Enterprise +version_added: "2.0.0" +description: + - This module retrieves the list of devices in the inventory of OpenManage Enterprise along with the details of each device. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + fact_subset: + description: + - C(basic_inventory) returns the list of the devices. + - C(detailed_inventory) returns the inventory details of specified devices. + - C(subsystem_health) returns the health status of specified devices. + type: str + choices: [basic_inventory, detailed_inventory, subsystem_health ] + default: basic_inventory + system_query_options: + description: + - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag) + is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable. + type: dict + suboptions: + device_id: + description: + - A list of unique identifier is applicable + for C(detailed_inventory) and C(subsystem_health). + type: list + elements: int + device_service_tag: + description: + - A list of service tags are applicable for C(detailed_inventory) + and C(subsystem_health). + type: list + elements: str + inventory_type: + description: + - For C(detailed_inventory), it returns details of the specified inventory type. + type: str + filter: + description: + - For C(basic_inventory), it filters the collection of devices. + I(filter) query format should be aligned with OData standards. + type: str + +requirements: + - "python >= 3.8.6" +author: "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = """ +--- +- name: Retrieve basic inventory of all devices + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + +- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "basic_inventory" + system_query_options: + filter: "Id eq 33333 or Id eq 11111" + +- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222 + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + - 22222 + +- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567 + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + +- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "detailed_inventory" + system_query_options: + device_id: + - 11111 + device_service_tag: + - MXL1234 + - MXL4567 + inventory_type: "serverDeviceCards" + +- name: Retrieve subsystem health of specified devices identified by service tags + dellemc.openmanage.ome_device_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + fact_subset: "subsystem_health" + system_query_options: + device_service_tag: + - MXL1234 + - MXL4567 + +""" + +RETURN = ''' +--- +msg: + type: str + description: Over all device information status. + returned: on error + sample: "Failed to fetch the device information" +device_info: + type: dict + description: Returns the information collected from the Device. + returned: success + sample: { + "value": [ + { + "Actions": null, + "AssetTag": null, + "ChassisServiceTag": null, + "ConnectionState": true, + "DeviceManagement": [ + { + "DnsName": "dnsname.host.com", + "InstrumentationName": "MX-12345", + "MacAddress": "11:10:11:10:11:10", + "ManagementId": 12345, + "ManagementProfile": [ + { + "HasCreds": 0, + "ManagementId": 12345, + "ManagementProfileId": 12345, + "ManagementURL": "https://192.168.0.1:443", + "Status": 1000, + "StatusDateTime": "2019-01-21 06:30:08.501" + } + ], + "ManagementType": 2, + "NetworkAddress": "192.168.0.1" + } + ], + "DeviceName": "MX-0003I", + "DeviceServiceTag": "MXL1234", + "DeviceSubscription": null, + "LastInventoryTime": "2019-01-21 06:30:08.501", + "LastStatusTime": "2019-01-21 06:30:02.492", + "ManagedState": 3000, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "SlotConfiguration": {}, + "Status": 4000, + "SystemId": 2031, + "Type": 2000 + } + ] + } +''' + +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +DEVICES_INVENTORY_DETAILS = "detailed_inventory" +DEVICES_SUBSYSTEM_HEALTH = "subsystem_health" +DEVICES_INVENTORY_TYPE = "inventory_type" +DEVICE_LIST = "basic_inventory" +DESC_HTTP_ERROR = "HTTP Error 404: Not Found" +device_fact_error_report = {} + +DEVICE_RESOURCE_COLLECTION = { + DEVICE_LIST: {"resource": "DeviceService/Devices"}, + DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"}, + DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"}, + DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"}, +} + + +def update_device_details_with_filtering(missing_service_tags, service_tag_dict, rest_obj): + """ + This is a workaround solutions. + Use filtering query, in case fetching all report list fails for some reason. + Updates service_tag_dict if filtering request is success. + :param missing_service_tags: Service tags which are unable to fetch from pagination request. + :param service_tag_dict: this contains device id mapping with tags + :param rest_obj: ome connection object + :return: None. + """ + try: + for tag in missing_service_tags: + query = "DeviceServiceTag eq '{0}'".format(tag) + query_param = {"$filter": query} + resp = rest_obj.invoke_request('GET', DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"], query_param=query_param) + value = resp.json_data["value"] + if value and value[0]["DeviceServiceTag"] == tag: + service_tag_dict.update({value[0]["Id"]: value[0]["DeviceServiceTag"]}) + missing_service_tags.remove(tag) + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def _get_device_id_from_service_tags(service_tags, rest_obj): + """ + Get device ids from device service tag + Returns :dict : device_id to service_tag map + :arg service_tags: service tag + :arg rest_obj: RestOME class object in case of request with session. + :returns: dict eg: {1345:"MXL1245"} + """ + device_list = rest_obj.get_all_report_details(DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"])["report_list"] + service_tag_dict = {} + for item in device_list: + if item["DeviceServiceTag"] in service_tags: + service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]}) + available_service_tags = service_tag_dict.values() + missing_service_tags = list(set(service_tags) - set(available_service_tags)) + update_device_details_with_filtering(missing_service_tags, service_tag_dict, rest_obj) + device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in missing_service_tags)) + return service_tag_dict + + +def is_int(val): + """check when device_id numeric represented value is int""" + try: + int(val) + return True + except ValueError: + return False + + +def _check_duplicate_device_id(device_id_list, service_tag_dict): + """If service_tag is duplicate of device_id, then updates the message as Duplicate report + :arg1: device_id_list : list of device_id + :arg2: service_tag_id_dict: dictionary of device_id to service tag map""" + if device_id_list: + device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)] + common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys())) + for device_id in common_val: + device_fact_error_report.update( + {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)}) + del service_tag_dict[device_id] + + +def _get_device_identifier_map(module_params, rest_obj): + """ + Builds the identifiers mapping + :returns: the dict of device_id to server_tag map + eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}""" + system_query_options_param = module_params.get("system_query_options") + device_id_service_tag_dict = {} + if system_query_options_param is not None: + device_id_list = system_query_options_param.get("device_id") + device_service_tag_list = system_query_options_param.get("device_service_tag") + if device_id_list: + device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list))) + device_id_service_tag_dict["device_id"] = device_id_dict + if device_service_tag_list: + service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list, + rest_obj) + + _check_duplicate_device_id(device_id_list, service_tag_dict) + device_id_service_tag_dict["device_service_tag"] = service_tag_dict + return device_id_service_tag_dict + + +def _get_query_parameters(module_params): + """ + Builds query parameter + :returns: dictionary, which is applicable builds the query format + eg : {"$filter":"Type eq 2000"} + """ + system_query_options_param = module_params.get("system_query_options") + query_parameter = None + if system_query_options_param: + filter_by_val = system_query_options_param.get("filter") + if filter_by_val: + query_parameter = {"$filter": filter_by_val} + return query_parameter + + +def _get_resource_parameters(module_params, rest_obj): + """ + Identifies the resource path by different states + :returns: dictionary containing identifier with respective resource path + eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"}, + "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}} + """ + fact_subset = module_params["fact_subset"] + path_dict = {} + if fact_subset != DEVICE_LIST: + inventory_type = None + device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj) + if fact_subset == DEVICES_INVENTORY_DETAILS: + system_query_options = module_params.get("system_query_options") + inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE) + path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset + for identifier_type, identifier_dict in device_id_service_tag_dict.items(): + path_dict[identifier_type] = {} + for device_id, service_tag in identifier_dict.items(): + key_identifier = service_tag if identifier_type == "device_service_tag" else device_id + path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id, + InventoryType=inventory_type) + path_dict[identifier_type].update({key_identifier: path}) + else: + path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]}) + return path_dict + + +def _check_mutually_inclusive_arguments(val, module_params, required_args): + """" + Throws error if arguments detailed_inventory, subsystem_health + not exists with qualifier device_id or device_service_tag""" + system_query_options_param = module_params.get("system_query_options") + if system_query_options_param is None or (system_query_options_param is not None and not any( + system_query_options_param.get(qualifier) for qualifier in required_args)): + raise ValueError("One of the following {0} is required for {1}".format(required_args, val)) + + +def _validate_inputs(module_params): + """validates input parameters""" + fact_subset = module_params["fact_subset"] + if fact_subset != "basic_inventory": + _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"]) + + +def main(): + system_query_options = {"type": 'dict', "required": False, "options": { + "device_id": {"type": 'list', "elements": 'int'}, + "device_service_tag": {"type": 'list', "elements": 'str'}, + "inventory_type": {"type": 'str'}, + "filter": {"type": 'str', "required": False}, + }} + + specs = { + "fact_subset": {"required": False, "default": "basic_inventory", + "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']}, + "system_query_options": system_query_options, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']], + ['fact_subset', 'subsystem_health', ['system_query_options']]], + supports_check_mode=True) + + try: + _validate_inputs(module.params) + with RestOME(module.params, req_session=True) as rest_obj: + device_facts = _get_resource_parameters(module.params, rest_obj) + resp_status = [] + if device_facts.get("basic_inventory"): + query_param = _get_query_parameters(module.params) + if query_param is not None: + resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param) + device_facts = resp.json_data + resp_status.append(resp.status_code) + else: + device_report = rest_obj.get_all_report_details(DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]) + device_facts = {"@odata.context": device_report["resp_obj"].json_data["@odata.context"], + "@odata.count": len(device_report["report_list"]), + "value": device_report["report_list"]} + resp_status.append(device_report["resp_obj"].status_code) + if device_facts["@odata.count"] == 0: + module.exit_json(msg="No devices present.", device_info=[]) + else: + for identifier_type, path_dict_map in device_facts.items(): + for identifier, path in path_dict_map.items(): + try: + resp = rest_obj.invoke_request('GET', path) + data = resp.json_data + resp_status.append(resp.status_code) + except HTTPError as err: + data = str(err) + path_dict_map[identifier] = data + if any(device_fact_error_report): + if "device_service_tag" in device_facts: + device_facts["device_service_tag"].update(device_fact_error_report) + else: + device_facts["device_service_tag"] = device_fact_error_report + if 200 in resp_status: + module.exit_json(device_info=device_facts) + else: + module.exit_json(msg="Unable to fetch the device information because the requested device id(s) or " + "device service tag(s) does not exist.", + device_info=[]) + + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py new file mode 100644 index 00000000..9b48e33d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py @@ -0,0 +1,481 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_device_local_access_configuration +short_description: Configure local access settings on OpenManage Enterprise Modular. +description: This module allows to configure the local access settings of the power button, quick sync, KVM, + LCD, and chassis direct access on OpenManage Enterprise Modular. +version_added: "4.4.0" +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_id: + type: int + description: + - The ID of the chassis for which the local access configuration to be updated. + - If the device ID is not specified, this module updates the local access settings for the I(hostname). + - I(device_id) is mutually exclusive with I(device_service_tag). + device_service_tag: + type: str + description: + - The service tag of the chassis for which the local access settings needs to be updated. + - If the device service tag is not specified, this module updates the local access settings for the I(hostname). + - I(device_service_tag) is mutually exclusive with I(device_id). + enable_kvm_access: + type: bool + description: Enables or disables the keyboard, video, and mouse (KVM) interfaces. + enable_chassis_direct_access: + type: bool + description: Enables or disables the access to management consoles such as iDRAC and the management module of + the device on the chassis. + chassis_power_button: + type: dict + description: The settings for the chassis power button. + suboptions: + enable_chassis_power_button: + required: true + type: bool + description: + - Enables or disables the chassis power button. + - If C(False), the chassis cannot be turn on or turn off using the power button. + enable_lcd_override_pin: + type: bool + description: + - Enables or disables the LCD override pin. + - This is required when I(enable_chassis_power_button) is C(False). + disabled_button_lcd_override_pin: + type: int + description: + - The six digit LCD override pin to change the power state of the chassis. + - This is required when I(enable_lcd_override_pin) is C(True). + - The module will always report change when I(disabled_button_lcd_override_pin) is C(True). + quick_sync: + type: dict + description: + - The settings for quick sync. + - The I(quick_sync) options are ignored if the quick sync hardware is not present. + suboptions: + quick_sync_access: + type: str + choices: [READ_WRITE, READ_ONLY, DISABLED] + description: + - Users with administrator privileges can set the following types of I(quick_sync_access). + - C(READ_WRITE) enables writing configuration using quick sync. + - C(READ_ONLY) enables read only access to Wi-Fi and Bluetooth Low Energy(BLE). + - C(DISABLED) disables reading or writing configuration through quick sync. + enable_inactivity_timeout: + type: bool + description: Enables or disables the inactivity timeout. + timeout_limit: + type: int + description: + - Inactivity timeout in seconds or minutes. + - The range is 120 to 3600 in seconds, or 2 to 60 in minutes. + - This option is required when I(enable_inactivity_timeout) is C(True). + timeout_limit_unit: + type: str + choices: [SECONDS, MINUTES] + description: + - Inactivity timeout limit unit. + - C(SECONDS) to set I(timeout_limit) in seconds. + - C(MINUTES) to set I(timeout_limit) in minutes. + - This option is required when I(enable_inactivity_timeout) is C(True). + enable_read_authentication: + type: bool + description: Enables or disables the option to log in using your user credentials and to read the + inventory in a secure data center. + enable_quick_sync_wifi: + type: bool + description: Enables or disables the Wi-Fi communication path to the chassis. + lcd: + type: dict + description: + - The settings for LCD. + - The I(lcd) options are ignored if the LCD hardware is not present in the chassis. + suboptions: + lcd_access: + type: str + choices: [VIEW_AND_MODIFY, VIEW_ONLY, DISABLED] + description: + - Option to configure the quick sync settings using LCD. + - C(VIEW_AND_MODIFY) to set access level to view and modify. + - C(VIEW_ONLY) to set access level to view. + - C(DISABLED) to disable the access. + user_defined: + type: str + description: The text to display on the LCD Home screen. The LCD Home screen is displayed when the system + is reset to factory default settings. The user-defined text can have a maximum of 62 characters. + lcd_language: + type: str + description: + - The language code in which the text on the LCD must be displayed. + - en to set English language. + - fr to set French language. + - de to set German language. + - es to set Spanish language. + - ja to set Japanese language. + - zh to set Chinese language. +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to OpenManage Enterprise Modular. + - This module supports C(check_mode). + - The module will always report change when I(enable_chassis_power_button) is C(True). +""" + +EXAMPLES = """ +--- +- name: Configure KVM, direct access and power button settings of the chassis using device ID. + dellemc.openmanage.ome_device_local_access_configuration: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + enable_kvm_access: true + enable_chassis_direct_access: false + chassis_power_button: + enable_chassis_power_button: false + enable_lcd_override_pin: true + disabled_button_lcd_override_pin: 123456 + +- name: Configure Quick sync and LCD settings of the chassis using device service tag. + dellemc.openmanage.ome_device_local_access_configuration: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + quick_sync: + quick_sync_access: READ_ONLY + enable_read_authentication: true + enable_quick_sync_wifi: true + enable_inactivity_timeout: true + timeout_limit: 10 + timeout_limit_unit: MINUTES + lcd: + lcd_access: VIEW_ONLY + lcd_language: en + user_defined: "LCD Text" + +- name: Configure all local access settings of the host chassis. + dellemc.openmanage.ome_device_local_access_configuration: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + enable_kvm_access: true + enable_chassis_direct_access: false + chassis_power_button: + enable_chassis_power_button: false + enable_lcd_override_pin: true + disabled_button_lcd_override_pin: 123456 + quick_sync: + quick_sync_access: READ_WRITE + enable_read_authentication: true + enable_quick_sync_wifi: true + enable_inactivity_timeout: true + timeout_limit: 120 + timeout_limit_unit: SECONDS + lcd: + lcd_access: VIEW_MODIFY + lcd_language: en + user_defined: "LCD Text" +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the device local access settings. + returned: always + sample: "Successfully updated the local access settings." +location_details: + type: dict + description: returned when local access settings are updated successfully. + returned: success + sample: { + "SettingType": "LocalAccessConfiguration", + "EnableChassisDirect": false, + "EnableChassisPowerButton": false, + "EnableKvmAccess": true, + "EnableLcdOverridePin": false, + "LcdAccess": "VIEW_ONLY", + "LcdCustomString": "LCD Text", + "LcdLanguage": "en", + "LcdOverridePin": "", + "LcdPinLength": null, + "LcdPresence": "Present", + "LedPresence": null, + "QuickSync": { + "EnableInactivityTimeout": true, + "EnableQuickSyncWifi": false, + "EnableReadAuthentication": false, + "QuickSyncAccess": "READ_ONLY", + "QuickSyncHardware": "Present", + "TimeoutLimit": 7, + "TimeoutLimitUnit": "MINUTES" + } + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import json +import socket +import copy +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params + +DOMAIN_URI = "ManagementDomainService/Domains" +DEVICE_URI = "DeviceService/Devices" +LAC_API = "DeviceService/Devices({0})/Settings('LocalAccessConfiguration')" +CONFIG_FAIL_MSG = "one of the following is required: enable_kvm_access, enable_chassis_direct_access, " \ + "chassis_power_button, quick_sync, lcd" +DOMAIN_FAIL_MSG = "The operation to configure the local access is supported only on " \ + "OpenManage Enterprise Modular." +FETCH_FAIL_MSG = "Unable to retrieve the device information." +DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid." +LAC_FAIL_MSG = "Unable to complete the operation because the local access configuration settings " \ + "are not supported on the specified device." +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." +SUCCESS_MSG = "Successfully updated the local access settings." + + +def get_ip_from_host(hostname): + ipaddr = hostname + try: + result = socket.getaddrinfo(hostname, None) + last_element = result[-1] + ip_address = last_element[-1][0] + if ip_address: + ipaddr = ip_address + except socket.gaierror: + ipaddr = hostname + except Exception: + ipaddr = hostname + return ipaddr + + +def get_chassis_device(module, rest_obj): + key, value = None, None + ipaddress = get_ip_from_host(module.params["hostname"]) + resp = rest_obj.invoke_request("GET", DOMAIN_URI) + for data in resp.json_data["value"]: + if ipaddress in data["PublicAddress"]: + key, value = ("Id", data["DeviceId"]) + break + else: + module.fail_json(msg=FETCH_FAIL_MSG) + return key, value + + +def check_domain_service(module, rest_obj): + try: + rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5) + except HTTPError as err: + err_message = json.load(err) + if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006": + module.fail_json(msg=DOMAIN_FAIL_MSG) + return + + +def check_mode_validation(module, loc_resp): + exist_config = { + "EnableKvmAccess": loc_resp["EnableKvmAccess"], "EnableChassisDirect": loc_resp["EnableChassisDirect"], + "EnableChassisPowerButton": loc_resp["EnableChassisPowerButton"], + "EnableLcdOverridePin": loc_resp["EnableLcdOverridePin"], "LcdAccess": loc_resp["LcdAccess"], + "LcdCustomString": loc_resp["LcdCustomString"], "LcdLanguage": loc_resp["LcdLanguage"]} + quick_sync = loc_resp["QuickSync"] + exist_quick_config = { + "QuickSyncAccess": quick_sync["QuickSyncAccess"], "TimeoutLimit": quick_sync["TimeoutLimit"], + "EnableInactivityTimeout": quick_sync["EnableInactivityTimeout"], + "TimeoutLimitUnit": quick_sync["TimeoutLimitUnit"], + "EnableReadAuthentication": quick_sync["EnableReadAuthentication"], + "EnableQuickSyncWifi": quick_sync["EnableQuickSyncWifi"]} + req_config, req_quick_config, payload = {}, {}, {} + lcd_options, chassis_power = module.params.get("lcd"), module.params.get("chassis_power_button") + if loc_resp["LcdPresence"] == "Present" and lcd_options is not None: + req_config["LcdCustomString"] = lcd_options.get("user_defined") + req_config["LcdAccess"] = lcd_options.get("lcd_access") + req_config["LcdLanguage"] = lcd_options.get("lcd_language") + req_config["EnableKvmAccess"] = module.params.get("enable_kvm_access") + req_config["EnableChassisDirect"] = module.params.get("enable_chassis_direct_access") + if chassis_power is not None: + power_button = chassis_power["enable_chassis_power_button"] + if power_button is False: + chassis_pin = chassis_power.get("enable_lcd_override_pin") + if chassis_pin is True: + exist_config["LcdOverridePin"] = loc_resp["LcdOverridePin"] + req_config["LcdOverridePin"] = chassis_power["disabled_button_lcd_override_pin"] + req_config["EnableLcdOverridePin"] = chassis_pin + req_config["EnableChassisPowerButton"] = power_button + q_sync = module.params.get("quick_sync") + if q_sync is not None and loc_resp["QuickSync"]["QuickSyncHardware"] == "Present": + req_quick_config["QuickSyncAccess"] = q_sync.get("quick_sync_access") + req_quick_config["EnableReadAuthentication"] = q_sync.get("enable_read_authentication") + req_quick_config["EnableQuickSyncWifi"] = q_sync.get("enable_quick_sync_wifi") + if q_sync.get("enable_inactivity_timeout") is True: + time_limit, time_unit = q_sync.get("timeout_limit"), q_sync.get("timeout_limit_unit") + if q_sync.get("timeout_limit_unit") == "MINUTES": + time_limit, time_unit = time_limit * 60, "SECONDS" + req_quick_config["TimeoutLimit"] = time_limit + req_quick_config["TimeoutLimitUnit"] = time_unit + req_quick_config["EnableInactivityTimeout"] = q_sync.get("enable_inactivity_timeout") + req_config = dict([(k, v) for k, v in req_config.items() if v is not None]) + req_quick_config = dict([(k, v) for k, v in req_quick_config.items() if v is not None]) + cloned_req_config = copy.deepcopy(exist_config) + cloned_req_config.update(req_config) + cloned_req_quick_config = copy.deepcopy(exist_quick_config) + cloned_req_quick_config.update(req_quick_config) + diff_changes = [bool(set(exist_config.items()) ^ set(cloned_req_config.items())) or + bool(set(exist_quick_config.items()) ^ set(cloned_req_quick_config.items()))] + if module.check_mode and any(diff_changes) is True: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and all(diff_changes) is False) or \ + (not module.check_mode and all(diff_changes) is False): + module.exit_json(msg=NO_CHANGES_FOUND) + payload.update(cloned_req_config) + payload["QuickSync"] = cloned_req_quick_config + payload["QuickSync"]["QuickSyncHardware"] = loc_resp["QuickSync"]["QuickSyncHardware"] + payload["SettingType"] = "LocalAccessConfiguration" + payload["LcdPresence"] = loc_resp["LcdPresence"] + return payload + + +def get_device_details(rest_obj, module): + device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag") + if device_id is None and tag is None: + key, value = get_chassis_device(module, rest_obj) + device_id = value + else: + key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag) + param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value) + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value}) + resp_data = resp.json_data.get("value") + rename_key = "id" if key == "Id" else "service tag" + if not resp_data: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag: + device_id = resp_data[0]["Id"] + elif key == "Id" and resp_data[0]["Id"] == device_id: + device_id = resp_data[0]["Id"] + else: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + try: + loc_resp = rest_obj.invoke_request("GET", LAC_API.format(device_id)) + except HTTPError as err: + err_message = json.load(err) + error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo') + if error_msg and error_msg[0].get("MessageId") == "CGEN1004": + module.fail_json(msg=LAC_FAIL_MSG) + else: + payload = check_mode_validation(module, loc_resp.json_data) + final_resp = rest_obj.invoke_request("PUT", LAC_API.format(device_id), data=payload) + return final_resp + + +def main(): + chassis_power = { + "enable_chassis_power_button": {"type": "bool", "required": True}, + "enable_lcd_override_pin": {"type": "bool", "required": False}, + "disabled_button_lcd_override_pin": {"type": "int", "required": False, "no_log": True}} + quick_sync_options = { + "quick_sync_access": {"type": "str", "required": False, "choices": ["DISABLED", "READ_ONLY", "READ_WRITE"]}, + "enable_inactivity_timeout": {"type": "bool", "required": False}, + "timeout_limit": {"type": "int", "required": False}, + "timeout_limit_unit": {"type": "str", "required": False, "choices": ["SECONDS", "MINUTES"]}, + "enable_read_authentication": {"type": "bool", "required": False}, + "enable_quick_sync_wifi": {"type": "bool", "required": False}} + lcd_options = { + "lcd_access": {"type": "str", "required": False, "choices": ["VIEW_AND_MODIFY", "VIEW_ONLY", "DISABLED"]}, + "user_defined": {"type": "str", "required": False}, + "lcd_language": {"type": "str", "required": False}} + specs = { + "device_id": {"required": False, "type": "int"}, + "device_service_tag": {"required": False, "type": "str"}, + "enable_kvm_access": {"required": False, "type": "bool"}, + "enable_chassis_direct_access": {"required": False, "type": "bool"}, + "chassis_power_button": { + "required": False, "type": "dict", "options": chassis_power, + "required_if": [["enable_lcd_override_pin", True, ("disabled_button_lcd_override_pin",)]], + }, + "quick_sync": { + "required": False, "type": "dict", "options": quick_sync_options, + "required_if": [["enable_inactivity_timeout", True, ("timeout_limit", "timeout_limit_unit")]] + }, + "lcd": { + "required": False, "type": "dict", "options": lcd_options, + }, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[('device_id', 'device_service_tag')], + required_one_of=[["enable_kvm_access", "enable_chassis_direct_access", + "chassis_power_button", "quick_sync", "lcd"]], + supports_check_mode=True, + ) + try: + if not any([module.params.get("chassis_power_button"), module.params.get("quick_sync"), + module.params.get("lcd"), module.params.get("enable_kvm_access") is not None, + module.params.get("enable_chassis_direct_access") is not None]): + module.fail_json(msg=CONFIG_FAIL_MSG) + with RestOME(module.params, req_session=True) as rest_obj: + check_domain_service(module, rest_obj) + resp = get_device_details(rest_obj, module) + resp_data = resp.json_data + quick_sync = module.params.get("quick_sync") + if quick_sync is not None and quick_sync.get("enable_inactivity_timeout") is True and \ + quick_sync.get("timeout_limit_unit") == "MINUTES": + resp_data["QuickSync"]["TimeoutLimit"] = int(resp_data["QuickSync"]["TimeoutLimit"] / 60) + resp_data["QuickSync"]["TimeoutLimitUnit"] = "MINUTES" + module.exit_json(msg=SUCCESS_MSG, local_access_settings=resp_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py new file mode 100644 index 00000000..96a61a29 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_device_location +short_description: Configure device location settings on OpenManage Enterprise Modular +description: This module allows to configure the device location settings of the chassis + on OpenManage Enterprise Modular. +version_added: "4.2.0" +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_id: + type: int + description: + - The ID of the chassis for which the settings need to be updated. + - If the device ID is not specified, this module updates + the location settings for the I(hostname). + - I(device_id) is mutually exclusive with I(device_service_tag). + device_service_tag: + type: str + description: + - The service tag of the chassis for which the settings need to be updated. + - If the device service tag is not specified, this module updates + the location settings for the I(hostname). + - I(device_service_tag) is mutually exclusive with I(device_id). + data_center: + type: str + description: The data center name of the chassis. + room: + type: str + description: The room of the chassis. + aisle: + type: str + description: The aisle of the chassis. + rack: + type: str + description: The rack name of the chassis. + rack_slot: + type: int + description: The rack slot number of the chassis. + location: + type: str + description: The physical location of the chassis. +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Update device location settings of a chassis using the device ID. + dellemc.openmanage.ome_device_location: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + data_center: data center 1 + room: room 1 + aisle: aisle 1 + rack: rack 1 + rack_slot: 2 + location: location 1 + +- name: Update device location settings of a chassis using the device service tag. + dellemc.openmanage.ome_device_location: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + data_center: data center 2 + room: room 7 + aisle: aisle 4 + rack: rack 6 + rack_slot: 22 + location: location 5 + +- name: Update device location settings of the host chassis. + dellemc.openmanage.ome_device_location: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + data_center: data center 3 + room: room 3 + aisle: aisle 1 + rack: rack 7 + rack_slot: 10 + location: location 9 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the device location settings. + returned: always + sample: "Successfully updated the location settings." +location_details: + type: dict + description: returned when location settings are updated successfully. + returned: success + sample: { + "Aisle": "aisle 1", + "DataCenter": "data center 1", + "Location": "location 1", + "RackName": "rack 1", + "RackSlot": 2, + "Room": "room 1", + "SettingType": "Location" + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import json +import socket +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params + +LOCATION_API = "DeviceService/Devices({0})/Settings('Location')" +DEVICE_URI = "DeviceService/Devices" +DOMAIN_URI = "ManagementDomainService/Domains" +DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid." + + +def check_domain_service(module, rest_obj): + try: + rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5) + except HTTPError as err: + err_message = json.load(err) + if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006": + module.fail_json(msg="The device location settings operation is supported only on " + "OpenManage Enterprise Modular systems.") + return + + +def validate_dictionary(module, loc_resp): + data_center = module.params.get("data_center") + room = module.params.get("room") + aisle = module.params.get("aisle") + rack = module.params.get("rack") + rack_slot = module.params.get("rack_slot") + location = module.params.get("location") + req_dict = {"DataCenter": data_center, "Room": room, "Aisle": aisle, "RackName": rack, "Location": location} + req_filter_none = dict((k, v) for k, v in req_dict.items() if v is not None) + keys = list(req_filter_none.keys()) + exit_dict = dict((k, v) for k, v in loc_resp.items() if k in keys and v is not None) + if rack_slot is not None: + req_dict.update({"RackSlot": rack_slot}) + req_filter_none.update({"RackSlot": rack_slot}) + exit_dict.update({"RackSlot": loc_resp["RackSlot"]}) + diff = bool(set(req_filter_none.items()) ^ set(exit_dict.items())) + if not diff and not module.check_mode: + module.exit_json(msg="No changes found to be applied.") + elif not diff and module.check_mode: + module.exit_json(msg="No changes found to be applied.") + elif diff and module.check_mode: + module.exit_json(msg="Changes found to be applied.", changed=True) + payload_dict = {"SettingType": "Location"} + payload_dict.update(dict((k, v) for k, v in loc_resp.items() if k in req_dict.keys())) + payload_dict.update(req_filter_none) + if req_filter_none.get("RackSlot") is None: + payload_dict.update({"RackSlot": loc_resp.get("RackSlot")}) + return payload_dict + + +def get_ip_from_host(hostname): + ipaddr = hostname + try: + result = socket.getaddrinfo(hostname, None) + last_element = result[-1] + ip_address = last_element[-1][0] + if ip_address: + ipaddr = ip_address + except socket.gaierror: + ipaddr = hostname + except Exception: + ipaddr = hostname + return ipaddr + + +def standalone_chassis(module, rest_obj): + key, value = None, None + ipaddress = get_ip_from_host(module.params["hostname"]) + resp = rest_obj.invoke_request("GET", DOMAIN_URI) + for data in resp.json_data["value"]: + if ipaddress in data["PublicAddress"]: + key, value = ("Id", data["DeviceId"]) + break + else: + module.fail_json(msg="Failed to fetch the device information.") + return key, value + + +def device_validation(module, rest_obj): + final_resp = {} + device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag") + if device_id is None and tag is None: + key, value = standalone_chassis(module, rest_obj) + device_id = value + else: + key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag) + param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value) + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value}) + resp_data = resp.json_data.get("value") + rename_key = "id" if key == "Id" else "service tag" + if not resp_data: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag: + device_id = resp_data[0]["Id"] + elif key == "Id" and resp_data[0]["Id"] == device_id: + device_id = resp_data[0]["Id"] + else: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + try: + loc_resp = rest_obj.invoke_request("GET", LOCATION_API.format(device_id)) + except HTTPError as err: + err_message = json.load(err) + error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo') + if error_msg and error_msg[0].get("MessageId") == "CGEN1004": + module.fail_json(msg="Unable to complete the operation because the location settings " + "are not supported on the specified device.") + else: + payload = validate_dictionary(module, loc_resp.json_data) + final_resp = rest_obj.invoke_request("PUT", LOCATION_API.format(device_id), data=payload) + return final_resp + + +def main(): + specs = { + "device_id": {"required": False, "type": "int"}, + "device_service_tag": {"required": False, "type": "str"}, + "data_center": {"required": False, "type": "str"}, + "room": {"required": False, "type": "str"}, + "aisle": {"required": False, "type": "str"}, + "rack": {"required": False, "type": "str"}, + "rack_slot": {"required": False, "type": "int"}, + "location": {"required": False, "type": "str"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[('device_id', 'device_service_tag')], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + check_domain_service(module, rest_obj) + resp = device_validation(module, rest_obj) + module.exit_json(msg="Successfully updated the location settings.", + location_details=resp.json_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py new file mode 100644 index 00000000..e895472e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py @@ -0,0 +1,778 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_device_mgmt_network +short_description: Configure network settings of devices on OpenManage Enterprise Modular +description: This module allows to configure network settings on Chassis, Servers, and I/O Modules on OpenManage Enterprise Modular. +version_added: 4.2.0 +author: + - Jagadeesh N V(@jagadeeshnv) +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_service_tag: + description: + - Service tag of the device. + - This option is mutually exclusive with I(device_id). + type: str + device_id: + description: + - ID of the device. + - This option is mutually exclusive with I(device_service_tag). + type: int + enable_nic: + description: + - Enable or disable Network Interface Card (NIC) configuration of the device. + - This option is not applicable to I/O Module. + type: bool + default: true + delay: + description: + - The time in seconds, after which settings are applied. + - This option is applicable only for Chassis. + type: int + default: 0 + ipv4_configuration: + description: + - IPv4 network configuration. + - "C(WARNING) Ensure that you have an alternate interface to access OpenManage Enterprise Modular because these + options can change the current IPv4 address for I(hostname)." + type: dict + suboptions: + enable_ipv4: + description: + - Enable or disable access to the network using IPv4. + type: bool + required: true + enable_dhcp: + description: + - "Enable or disable the automatic request to obtain an IPv4 address from the IPv4 Dynamic Host Configuration + Protocol (DHCP) server." + - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_subnet_mask), + and I(static_gateway) are not applied for these fields. However, the module may report changes." + type: bool + static_ip_address: + description: + - Static IPv4 address + - This option is applicable when I(enable_dhcp) is false. + type: str + static_subnet_mask: + description: + - Static IPv4 subnet mask address + - This option is applicable when I(enable_dhcp) is false. + type: str + static_gateway: + description: + - Static IPv4 gateway address + - This option is applicable when I(enable_dhcp) is false. + type: str + use_dhcp_to_obtain_dns_server_address: + description: + - This option allows to automatically request and obtain IPv4 address for the DNS Server from the DHCP server. + - This option is applicable when I(enable_dhcp) is true. + - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and + I(static_alternate_dns_server) are not applied for these fields. However, the module may report changes." + type: bool + static_preferred_dns_server: + description: + - Static IPv4 DNS preferred server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + static_alternate_dns_server: + description: + - Static IPv4 DNS alternate server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + ipv6_configuration: + description: + - IPv6 network configuration. + - "C(WARNING) Ensure that you have an alternate interface to access OpenManage Enterprise Modular because these options can + change the current IPv6 address for I(hostname)." + type: dict + suboptions: + enable_ipv6: + description: Enable or disable access to the network using the IPv6. + type: bool + required: true + enable_auto_configuration: + description: + - "Enable or disable the automatic request to obtain an IPv6 address from the IPv6 DHCP server or router + advertisements(RA)" + - "If I(enable_auto_configuration) is C(true), OpenManage Enterprise Modular retrieves IP configuration + (IPv6 address, prefix, and gateway address) from a DHCPv6 server on the existing network." + - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_prefix_length), + and I(static_gateway) are not applied for these fields. However, the module may report changes." + type: bool + static_ip_address: + description: + - Static IPv6 address + - This option is applicable when I(enable_auto_configuration) is false. + type: str + static_prefix_length: + description: + - Static IPv6 prefix length + - This option is applicable when I(enable_auto_configuration) is false. + type: int + static_gateway: + description: + - Static IPv6 gateway address + - This option is applicable when I(enable_auto_configuration) is false. + type: str + use_dhcpv6_to_obtain_dns_server_address: + description: + - This option allows to automatically request and obtain a IPv6 address for the DNS server from the DHCP server. + - This option is applicable when I(enable_auto_configuration) is true + - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and I(static_alternate_dns_server) + are not applied for these fields. However, the module may report changes." + type: bool + static_preferred_dns_server: + description: + - Static IPv6 DNS preferred server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + static_alternate_dns_server: + description: + - Static IPv6 DNS alternate server + - This option is applicable when I(use_dhcp_for_dns_server_names) is false. + type: str + management_vlan: + description: + - VLAN configuration. + type: dict + suboptions: + enable_vlan: + description: + - Enable or disable VLAN for management. + - The VLAN configuration cannot be updated if the I(register_with_dns) field under I(dns_configuration) is true. + - "C(WARNING) Ensure that the network cable is connected to the correct port after the VLAN configuration + is changed. If not, the VLAN configuration changes may not be applied." + required: true + type: bool + vlan_id: + description: + - VLAN ID. + - "The valid VLAN IDs are: 1 to 4000, and 4021 to 4094." + - This option is applicable when I(enable_vlan) is true. + type: int + dns_configuration: + description: Domain Name System(DNS) settings. + type: dict + suboptions: + register_with_dns: + description: + - Register/Unregister I(dns_name) on the DNS Server. + - C(WARNING) This option cannot be updated if VLAN configuration changes. + type: bool + use_dhcp_for_dns_domain_name: + description: Get the I(dns_domain_name) using a DHCP server. + type: bool + dns_name: + description: + - DNS name for I(hostname) + - This is applicable when I(register_with_dns) is true. + type: str + dns_domain_name: + description: + - Static DNS domain name + - This is applicable when I(use_dhcp_for_dns_domain_name) is false. + type: str + auto_negotiation: + description: + - Enables or disables the auto negation of the network speed. + - "C(NOTE): Setting I(auto_negotiation) to false and choosing a network port speed may result in the chassis + loosing link to the top of rack network switch, or to the neighboring chassis in case of MCM mode. It is + recommended that the I(auto_negotiation) is set to C(true) for most use cases." + - This is applicable when I(use_dhcp_for_dns_domain_name) is false. + - This is applicable only for Chassis. + type: bool + network_speed: + description: + - The speed of the network port. + - This is applicable when I(auto_negotiation) is false. + - C(10_MB) to select network speed of 10 MB. + - C(100_MB) to select network speed of 100 MB. + - This is applicable only for Chassis. + choices: + - 10_MB + - 100_MB + type: str + dns_server_settings: + description: + - DNS server settings. + - This is applicable only for I/O Module. + type: dict + suboptions: + preferred_dns_server: + description: + - Enter the IP address of the preferred DNS server. + type: str + alternate_dns_server1: + description: + - Enter the IP address of the first alternate DNS server. + type: str + alternate_dns_server2: + description: + - Enter the IP address of the second alternate DNS server. + type: str +requirements: + - "python >= 3.8.6" +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Network settings for chassis + dellemc.openmanage.ome_device_mgmt_network: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: CHAS123 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_to_obtain_dns_server_address: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcpv6_to_obtain_dns_server_address: false + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + dns_configuration: + register_with_dns: true + use_dhcp_for_dns_domain_name: false + dns_name: "MX-SVCTAG" + dns_domain_name: "dnslocaldomain" + auto_negotiation: no + network_speed: 100_MB + +- name: Network settings for server + dellemc.openmanage.ome_device_mgmt_network: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: SRVR123 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + use_dhcp_to_obtain_dns_server_address: false + static_preferred_dns_server: 192.168.0.4 + static_alternate_dns_server: 192.168.0.5 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + use_dhcpv6_to_obtain_dns_server_address: false + static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3 + static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4 + +- name: Network settings for I/O module + dellemc.openmanage.ome_device_mgmt_network: + hostname: 192.168.0.1 + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: IOM1234 + ipv4_configuration: + enable_ipv4: true + enable_dhcp: false + static_ip_address: 192.168.0.2 + static_subnet_mask: 255.255.254.0 + static_gateway: 192.168.0.3 + ipv6_configuration: + enable_ipv6: true + enable_auto_configuration: false + static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1 + static_prefix_length: 10 + static_gateway: ffff::2607:f2b1:f081:9 + dns_server_settings: + preferred_dns_server: 192.168.0.4 + alternate_dns_server1: 192.168.0.5 + +- name: Management VLAN configuration of chassis using device id + dellemc.openmanage.ome_device_mgmt_network: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id : 12345 + management_vlan: + enable_vlan: true + vlan_id: 2345 + dns_configuration: + register_with_dns: false +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the network config operation. + returned: always + sample: Successfully applied the network settings. +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CGEN1004", + "RelatedProperties": [], + "Message": "Unable to complete the request because IPV4 Settings Capability is not Supported does not + exist or is not applicable for the resource URI.", + "MessageArgs": [ + "IPV4 Settings Capability is not Supported" + ], + "Severity": "Critical", + "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide + for more information about resource URI and its properties." + } + ] + } +} +""" + +import json +import socket +import copy +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.common.dict_transformations import recursive_diff + +DEVICE_URI = "DeviceService/Devices" +MGMT_DOMAIN = "ManagementDomainService/Domains" +LEAD_CONFIG = "ApplicationService/Network/AddressConfiguration" +NETWORK_SETTINGS = "DeviceService/Devices({0})/Settings('Network')" +DEVICE_NOT_FOUND = "Device with {0} '{1}' not found." +NON_CONFIG_NETWORK = "Network settings for {0} is not configurable." +SUCCESS_MSG = "Successfully applied the network settings." +INVALID_IP = "Invalid {0} address provided for the {1}" +DNS_SETT_ERR1 = "'SecondaryDNS' requires 'PrimaryDNS' to be provided." +DNS_SETT_ERR2 = "'TertiaryDNS' requires both 'PrimaryDNS' and 'SecondaryDNS' to be provided." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +SERVER = 1000 +CHASSIS = 2000 +IO_MODULE = 4000 +API_TIMEOUT = 120 + + +def validate_ip_address(address): + try: + socket.inet_aton(address) + except socket.error: + return False + return address.count('.') == 3 + + +def validate_ip_v6_address(address): + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: + return False + return True + + +def validate_ipaddress(module, ip_type, config, var_list, ip_func): + ipv_input = module.params.get(config) + if ipv_input: + for ipname in var_list: + val = ipv_input.get(ipname) + if val and not ip_func(val): + module.fail_json(msg=INVALID_IP.format(ip_type, ipname)) + return + + +def validate_input(module): + ip_addr = ["static_ip_address", "static_gateway", "static_preferred_dns_server", "static_alternate_dns_server"] + validate_ipaddress(module, "IPv6", "ipv6_configuration", ip_addr, validate_ip_v6_address) + ip_addr.append("static_subnet_mask") + validate_ipaddress(module, "IPv4", "ipv4_configuration", ip_addr, validate_ip_address) + ipv6 = module.params.get("ipv6_configuration") + dns_settings = module.params.get("dns_server_settings") + if dns_settings: + for k, v in dns_settings.items(): + if v is not None: + if not validate_ip_address(v) and not validate_ip_v6_address(v): + module.fail_json(msg=INVALID_IP.format("IP", k)) + # int to str + if ipv6 and ipv6.get("static_prefix_length"): + ipv6["static_prefix_length"] = str(ipv6["static_prefix_length"]) + vlan = module.params.get("management_vlan") + if vlan and vlan.get("vlan_id"): + vlan["vlan_id"] = str(vlan["vlan_id"]) + return + + +def get_device_details(module, rest_obj): + id = module.params.get('device_id') + srch = 'Id' + query_param = {"$filter": "{0} eq {1}".format(srch, id)} + if not id: + id = module.params.get('device_service_tag') + srch = 'Identifier' + query_param = {"$filter": "{0} eq '{1}'".format(srch, id)} + resp = rest_obj.invoke_request('GET', DEVICE_URI, query_param=query_param) + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get(srch) == id: + dvc = xtype + return dvc + module.fail_json(msg=DEVICE_NOT_FOUND.format(srch, id)) + + +def transform_diff(params, translator, sub_payload, bool_trans=None): + df = {} + inp_dict = {} + for k, v in translator.items(): + inp = params.get(k) + if inp is not None: + if isinstance(inp, bool) and bool_trans: + inp = bool_trans.get(inp) + inp_dict[v] = inp + id_diff = recursive_diff(inp_dict, sub_payload) + if id_diff and id_diff[0]: + df = id_diff[0] + sub_payload.update(inp_dict) + return df + + +def validate_dependency(mparams): + params = copy.deepcopy(mparams) + ipv4 = params.get('ipv4_configuration') + if ipv4: + rm_list = [] + dhcp = ["static_preferred_dns_server", "static_alternate_dns_server"] + static = ["static_ip_address", "static_gateway", "static_subnet_mask"] + bools = ["enable_dhcp", "use_dhcp_to_obtain_dns_server_address"] + if ipv4.get("use_dhcp_to_obtain_dns_server_address") is True: + rm_list.extend(dhcp) + if ipv4.get("enable_dhcp") is True: + rm_list.extend(static) + if ipv4.get("enable_ipv4") is False: + rm_list.extend(dhcp) + rm_list.extend(static) + rm_list.extend(bools) + for prm in rm_list: + ipv4.pop(prm, None) + ipv6 = params.get('ipv6_configuration') + if ipv6: + rm_list = [] + dhcp = ["static_preferred_dns_server", "static_alternate_dns_server"] + static = ["static_ip_address", "static_gateway", "static_prefix_length"] + bools = ["enable_auto_configuration", "use_dhcpv6_to_obtain_dns_server_address"] + if ipv6.get("use_dhcpv6_to_obtain_dns_server_address") is True: + rm_list.extend(dhcp) + if ipv6.get("enable_auto_configuration") is True: + rm_list.extend(static) + if ipv6.get("enable_ipv6") is False: + rm_list.extend(dhcp) + rm_list.extend(static) + rm_list.extend(bools) + for prm in rm_list: + ipv6.pop(prm, None) + vlan = params.get('management_vlan') + if vlan: + if vlan.get('enable_vlan') is False: + vlan.pop('vlan_id', None) + dns = params.get('dns_configuration') + if dns: + if dns.get('auto_negotiation') is True: + dns.pop('network_speed', None) + if dns.get('use_dhcp_for_dns_domain_name') is True: + dns.pop('dns_domain_name', None) + return params + + +def update_chassis_payload(module, payload): + ipv4 = { + "enable_dhcp": "EnableDHCP", + "enable_ipv4": "EnableIPv4", + "static_alternate_dns_server": "StaticAlternateDNSServer", + "static_gateway": "StaticGateway", + "static_ip_address": "StaticIPAddress", + "static_preferred_dns_server": "StaticPreferredDNSServer", + "static_subnet_mask": "StaticSubnetMask", + "use_dhcp_to_obtain_dns_server_address": "UseDHCPObtainDNSServerAddresses" + } + ipv6 = { + "enable_auto_configuration": "EnableAutoconfiguration", + "enable_ipv6": "EnableIPv6", + "static_alternate_dns_server": "StaticAlternateDNSServer", + "static_gateway": "StaticGateway", + "static_ip_address": "StaticIPv6Address", + "static_preferred_dns_server": "StaticPreferredDNSServer", + "static_prefix_length": "StaticPrefixLength", + "use_dhcpv6_to_obtain_dns_server_address": "UseDHCPv6ObtainDNSServerAddresses" + } + dns = { + "auto_negotiation": "AutoNegotiation", + "dns_domain_name": "DnsDomainName", + "dns_name": "DnsName", + "network_speed": "NetworkSpeed", + "register_with_dns": "RegisterDNS", + "use_dhcp_for_dns_domain_name": "UseDHCPForDomainName" + } + vlan = {"enable_vlan": "EnableVLAN", "vlan_id": "MgmtVLANId"} + gnrl = payload.get('GeneralSettings') # where enable NIC is present + diff = {} + mparams = validate_dependency(module.params) + enable_nic = mparams.get('enable_nic') + delay = mparams.get('delay') + if enable_nic: + if mparams.get('ipv4_configuration'): + df = transform_diff(mparams.get('ipv4_configuration'), ipv4, payload.get('Ipv4Settings')) + diff.update(df) + if mparams.get('ipv6_configuration'): + df = transform_diff(mparams.get('ipv6_configuration'), ipv6, payload.get('Ipv6Settings')) + diff.update(df) + if mparams.get('dns_configuration'): + df = transform_diff(mparams.get('dns_configuration'), dns, payload.get('GeneralSettings')) + diff.update(df) + if mparams.get('management_vlan'): + df = transform_diff(mparams.get('management_vlan'), vlan, payload) + diff.update(df) + if gnrl.get('EnableNIC') != enable_nic: + gnrl['EnableNIC'] = enable_nic + diff.update({'EnableNIC': enable_nic}) + if delay != gnrl.get('Delay'): + gnrl['Delay'] = delay + diff.update({'Delay': delay}) + return diff + + +def update_server_payload(module, payload): + ipv4 = { + "enable_dhcp": "enableDHCPIPv4", + "enable_ipv4": "enableIPv4", + "static_alternate_dns_server": "staticAlternateDNSIPv4", + "static_gateway": "staticGatewayIPv4", + "static_ip_address": "staticIPAddressIPv4", + "static_preferred_dns_server": "staticPreferredDNSIPv4", + "static_subnet_mask": "staticSubnetMaskIPv4", + "use_dhcp_to_obtain_dns_server_address": "useDHCPToObtainDNSIPv4" + } + ipv6 = { + "enable_auto_configuration": "enableAutoConfigurationIPv6", + "enable_ipv6": "enableIPv6", + "static_alternate_dns_server": "staticAlternateDNSIPv6", + "static_gateway": "staticGatewayIPv6", + "static_ip_address": "staticIPAddressIPv6", + "static_preferred_dns_server": "staticPreferredDNSIPv6", + "static_prefix_length": "staticPrefixLengthIPv6", + "use_dhcpv6_to_obtain_dns_server_address": "useDHCPToObtainDNSIPv6" + } + vlan = {"enable_vlan": "vlanEnable", "vlan_id": "vlanId"} + diff = {} + mparams = validate_dependency(module.params) + enable_nic = mparams.get('enable_nic') + bool_trans = {True: 'Enabled', False: 'Disabled'} + if enable_nic: + if mparams.get('ipv4_configuration'): + df = transform_diff(mparams.get('ipv4_configuration'), ipv4, payload, bool_trans) + diff.update(df) + if mparams.get('ipv6_configuration'): + df = transform_diff(mparams.get('ipv6_configuration'), ipv6, payload, bool_trans) + diff.update(df) + if mparams.get('management_vlan'): + df = transform_diff(mparams.get('management_vlan'), vlan, payload, bool_trans) + diff.update(df) + enable_nic = bool_trans.get(enable_nic) + if payload.get('enableNIC') != enable_nic: + payload['enableNIC'] = enable_nic + diff.update({'enableNIC': enable_nic}) + return diff + + +def update_iom_payload(module, payload): + ipv4 = { + "enable_dhcp": "EnableDHCP", + "enable_ipv4": "EnableIPv4", + "static_gateway": "StaticGateway", + "static_ip_address": "StaticIPAddress", + "static_subnet_mask": "StaticSubnetMask", + } + ipv6 = { + "enable_ipv6": "EnableIPv6", + "static_gateway": "StaticGateway", + "static_ip_address": "StaticIPv6Address", + "static_prefix_length": "StaticPrefixLength", + "enable_auto_configuration": "UseDHCPv6" + } + dns = {"preferred_dns_server": "PrimaryDNS", + "alternate_dns_server1": "SecondaryDNS", + "alternate_dns_server2": "TertiaryDNS"} + vlan = {"enable_vlan": "EnableMgmtVLANId", "vlan_id": "MgmtVLANId"} + diff = {} + mparams = validate_dependency(module.params) + if mparams.get('ipv4_configuration'): + df = transform_diff(mparams.get('ipv4_configuration'), ipv4, payload.get('IomIPv4Settings')) + diff.update(df) + if mparams.get('ipv6_configuration'): + df = transform_diff(mparams.get('ipv6_configuration'), ipv6, payload.get('IomIPv6Settings')) + diff.update(df) + if mparams.get('management_vlan'): + df = transform_diff(mparams.get('management_vlan'), vlan, payload) + diff.update(df) + if mparams.get('dns_server_settings'): + df = transform_diff(mparams.get('dns_server_settings'), dns, payload.get('IomDNSSettings')) + dns_iom = payload.get('IomDNSSettings') + if dns_iom.get("SecondaryDNS") and not dns_iom.get("PrimaryDNS"): + module.fail_json(msg=DNS_SETT_ERR1) + if dns_iom.get("TertiaryDNS") and (not dns_iom.get("PrimaryDNS") or not dns_iom.get("SecondaryDNS")): + module.fail_json(msg=DNS_SETT_ERR2) + diff.update(df) + return diff + + +def get_network_payload(module, rest_obj, dvc): + resp = rest_obj.invoke_request('GET', NETWORK_SETTINGS.format(dvc.get('Id'))) + got_payload = resp.json_data + payload = rest_obj.strip_substr_dict(got_payload) + update_dict = { + CHASSIS: update_chassis_payload, + SERVER: update_server_payload, + IO_MODULE: update_iom_payload + } + diff = update_dict[dvc.get('Type')](module, payload) + # module.warn(json.dumps(diff)) + if not diff: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + return payload + + +def main(): + ipv4_options = {"enable_ipv4": {"required": True, "type": 'bool'}, + "enable_dhcp": {"type": 'bool'}, + "static_ip_address": {"type": 'str'}, + "static_subnet_mask": {"type": 'str'}, + "static_gateway": {"type": 'str'}, + "use_dhcp_to_obtain_dns_server_address": {"type": 'bool'}, + "static_preferred_dns_server": {"type": 'str'}, + "static_alternate_dns_server": {"type": 'str'}} + ipv6_options = {"enable_ipv6": {"required": True, "type": 'bool'}, + "enable_auto_configuration": {"type": 'bool'}, + "static_ip_address": {"type": 'str'}, + "static_prefix_length": {"type": 'int'}, + "static_gateway": {"type": 'str'}, + "use_dhcpv6_to_obtain_dns_server_address": {"type": 'bool'}, + "static_preferred_dns_server": {"type": 'str'}, + "static_alternate_dns_server": {"type": 'str'}} + dns_options = {"register_with_dns": {"type": 'bool'}, + "use_dhcp_for_dns_domain_name": {"type": 'bool'}, + "dns_name": {"type": 'str'}, + "dns_domain_name": {"type": 'str'}, + "auto_negotiation": {"type": 'bool'}, + "network_speed": {"type": 'str', "choices": ['10_MB', '100_MB']}} + management_vlan = {"enable_vlan": {"required": True, "type": 'bool'}, + "vlan_id": {"type": 'int'}} + dns_server_settings = {"preferred_dns_server": {"type": 'str'}, + "alternate_dns_server1": {"type": 'str'}, + "alternate_dns_server2": {"type": 'str'}} + specs = { + "enable_nic": {"type": 'bool', "default": True}, + "device_id": {"type": 'int'}, + "device_service_tag": {"type": 'str'}, + "delay": {"type": 'int', "default": 0}, + "ipv4_configuration": + {"type": "dict", "options": ipv4_options, + "required_if": [ + ['enable_ipv4', True, ('enable_dhcp',), True], + ['enable_dhcp', False, ('static_ip_address', 'static_subnet_mask', "static_gateway"), False], + ['use_dhcp_to_obtain_dns_server_address', False, + ('static_preferred_dns_server', 'static_alternate_dns_server'), True]] + }, + "ipv6_configuration": + {"type": "dict", "options": ipv6_options, + "required_if": [ + ['enable_ipv6', True, ('enable_auto_configuration',), True], + ['enable_auto_configuration', False, + ('static_ip_address', 'static_prefix_length', "static_gateway"), False], + ['use_dhcpv6_to_obtain_dns_server_address', False, + ('static_preferred_dns_server', 'static_alternate_dns_server'), True]] + }, + "dns_configuration": + {"type": "dict", "options": dns_options, + "required_if": [ + ['register_with_dns', True, ('dns_name',), False], + ['use_dhcp_for_dns_domain_name', False, ('dns_domain_name',)], + ['auto_negotiation', False, ('network_speed',)]] + }, + "management_vlan": + {"type": "dict", "options": management_vlan, + "required_if": [ + ['enable_vlan', True, ('vlan_id',), True]] + }, + "dns_server_settings": + {"type": "dict", "options": dns_server_settings, + "required_one_of": [("preferred_dns_server", "alternate_dns_server1", "alternate_dns_server2")] + } + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[('device_id', 'device_service_tag')], + mutually_exclusive=[('device_id', 'device_service_tag')], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + validate_input(module) + dvc = get_device_details(module, rest_obj) + if dvc.get('Type') in [SERVER, CHASSIS, IO_MODULE]: + nw_setting = get_network_payload(module, rest_obj, dvc) + resp = rest_obj.invoke_request('PUT', NETWORK_SETTINGS.format(dvc.get('Id')), + data=nw_setting, api_timeout=API_TIMEOUT) + module.exit_json(msg=SUCCESS_MSG, network_details=resp.json_data, changed=True) + else: + module.fail_json(msg=NON_CONFIG_NETWORK.format(dvc.get('Model'))) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py new file mode 100644 index 00000000..81475d48 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py @@ -0,0 +1,398 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_device_network_services +short_description: Configure chassis network services settings on OpenManage Enterprise Modular +description: This module allows to configure the network services on OpenManage Enterprise Modular. +version_added: "4.3.0" +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_id: + type: int + description: + - The ID of the chassis for which the settings need to be updated. + - If the device ID is not specified, this module updates the network services settings for the I(hostname). + - I(device_id) is mutually exclusive with I(device_service_tag). + device_service_tag: + type: str + description: + - The service tag of the chassis for which the setting needs to be updated. + - If the device service tag is not specified, this module updates the network + services settings for the I(hostname). + - I(device_service_tag) is mutually exclusive with I(device_id). + snmp_settings: + type: dict + description: The settings for SNMP configuration. + suboptions: + enabled: + type: bool + required: true + description: Enables or disables the SNMP settings. + port_number: + type: int + description: The SNMP port number. + community_name: + type: str + description: + - The SNMP community string. + - Required when I(enabled) is C(true). + ssh_settings: + type: dict + description: The settings for SSH configuration. + suboptions: + enabled: + required: true + type: bool + description: Enables or disables the SSH settings. + port_number: + type: int + description: The port number for SSH service. + max_sessions: + type: int + description: Number of SSH sessions. + max_auth_retries: + type: int + description: The number of retries when the SSH session fails. + idle_timeout: + type: float + description: SSH idle timeout in minutes. + remote_racadm_settings: + type: dict + description: The settings for remote RACADM configuration. + suboptions: + enabled: + type: bool + required: true + description: Enables or disables the remote RACADM settings. +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Update network services settings of a chassis using the device ID + dellemc.openmanage.ome_device_network_services: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + snmp_settings: + enabled: true + port_number: 161 + community_name: public + ssh_settings: + enabled: false + remote_racadm_settings: + enabled: false + +- name: Update network services settings of a chassis using the device service tag. + dellemc.openmanage.ome_device_network_services: + hostname: "192.168.0.2" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + snmp_settings: + enabled: false + ssh_settings: + enabled: true + port_number: 22 + max_sessions: 1 + max_auth_retries: 3 + idle_timeout: 1 + remote_racadm_settings: + enabled: false + +- name: Update network services settings of the host chassis. + dellemc.openmanage.ome_device_network_services: + hostname: "192.168.0.3" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + snmp_settings: + enabled: false + ssh_settings: + enabled: false + remote_racadm_settings: + enabled: true +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the network services settings. + returned: always + sample: "Successfully updated the network services settings." +network_services_details: + type: dict + description: returned when network services settings are updated successfully. + returned: success + sample: { + "EnableRemoteRacadm": true, + "SettingType": "NetworkServices", + "SnmpConfiguration": { + "PortNumber": 161, + "SnmpEnabled": true, + "SnmpV1V2Credential": { + "CommunityName": "public" + } + }, + "SshConfiguration": { + "IdleTimeout": 60, + "MaxAuthRetries": 3, + "MaxSessions": 1, + "PortNumber": 22, + "SshEnabled": false + } + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CAPP1042", + "RelatedProperties": [], + "Message": "Unable to update the network configuration because the SNMP PortNumber is already in use.", + "MessageArgs": ["SNMP PortNumber"], + "Severity": "Informational", + "Resolution": "Enter a different port number and retry the operation.", + } + ] + } + } +""" + + +import json +import socket +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params + +DOMAIN_URI = "ManagementDomainService/Domains" +DEVICE_URI = "DeviceService/Devices" +NETWORK_SERVICE_API = "DeviceService/Devices({0})/Settings('NetworkServices')" +CONFIG_FAIL_MSG = "one of the following is required: snmp_settings, ssh_settings, remote_racadm_settings" +DOMAIN_FAIL_MSG = "The device location settings operation is supported only on " \ + "OpenManage Enterprise Modular." +FETCH_FAIL_MSG = "Failed to retrieve the device information." +DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid." +NETWORK_SERVICE_FAIL_MSG = "Unable to complete the operation because the network services settings " \ + "are not supported on the specified device." +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." +SUCCESS_MSG = "Successfully updated the network services settings." + + +def check_domain_service(module, rest_obj): + try: + rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5) + except HTTPError as err: + err_message = json.loads(err) + if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006": + module.fail_json(msg=DOMAIN_FAIL_MSG) + return + + +def get_ip_from_host(hostname): + ipaddr = hostname + try: + result = socket.getaddrinfo(hostname, None) + last_element = result[-1] + ip_address = last_element[-1][0] + if ip_address: + ipaddr = ip_address + except socket.gaierror: + ipaddr = hostname + except Exception: + ipaddr = hostname + return ipaddr + + +def get_chassis_device(module, rest_obj): + key, value = None, None + ipaddress = get_ip_from_host(module.params["hostname"]) + resp = rest_obj.invoke_request("GET", DOMAIN_URI) + for data in resp.json_data["value"]: + if ipaddress in data["PublicAddress"]: + key, value = ("Id", data["DeviceId"]) + break + else: + module.fail_json(msg=FETCH_FAIL_MSG) + return key, value + + +def check_mode_validation(module, loc_data, rest_obj): + req_snmp, req_ssh, req_comm_str, req_racadm = {}, {}, {}, {} + exist_snmp, exist_ssh, exist_comm_str, exist_racadm = {}, {}, {}, {} + payload = {"SettingType": "NetworkServices"} + snmp_enabled = module.params.get("snmp_settings") + if snmp_enabled is not None and snmp_enabled["enabled"] is True: + req_snmp.update({"SnmpEnabled": snmp_enabled["enabled"]}) + req_comm_str.update({"CommunityName": module.params["snmp_settings"]["community_name"]}) + exist_snmp.update({"SnmpEnabled": loc_data["SnmpConfiguration"]["SnmpEnabled"]}) + exist_comm_str.update({"CommunityName": loc_data["SnmpConfiguration"]["SnmpV1V2Credential"]["CommunityName"]}) + elif snmp_enabled is not None and snmp_enabled["enabled"] is False: + req_snmp.update({"SnmpEnabled": snmp_enabled["enabled"]}) + exist_snmp.update({"SnmpEnabled": loc_data["SnmpConfiguration"]["SnmpEnabled"]}) + + if snmp_enabled is not None and snmp_enabled["enabled"] is True and snmp_enabled.get("port_number") is not None: + req_snmp.update({"PortNumber": snmp_enabled.get("port_number")}) + exist_snmp.update({"PortNumber": loc_data["SnmpConfiguration"]["PortNumber"]}) + ssh_enabled = module.params.get("ssh_settings") + if ssh_enabled is not None and ssh_enabled["enabled"] is True: + req_ssh.update({"SshEnabled": ssh_enabled["enabled"]}) + exist_ssh.update({"SshEnabled": loc_data["SshConfiguration"]["SshEnabled"]}) + elif ssh_enabled is not None and ssh_enabled["enabled"] is False: + req_ssh.update({"SshEnabled": ssh_enabled["enabled"]}) + exist_ssh.update({"SshEnabled": loc_data["SshConfiguration"]["SshEnabled"]}) + + if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("port_number") is not None: + req_ssh.update({"PortNumber": module.params["ssh_settings"]["port_number"]}) + exist_ssh.update({"PortNumber": loc_data["SshConfiguration"]["PortNumber"]}) + if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("max_sessions") is not None: + req_ssh.update({"MaxSessions": module.params["ssh_settings"]["max_sessions"]}) + exist_ssh.update({"MaxSessions": loc_data["SshConfiguration"]["MaxSessions"]}) + if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("max_auth_retries") is not None: + req_ssh.update({"MaxAuthRetries": module.params["ssh_settings"]["max_auth_retries"]}) + exist_ssh.update({"MaxAuthRetries": loc_data["SshConfiguration"]["MaxAuthRetries"]}) + if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("idle_timeout") is not None: + req_ssh.update({"IdleTimeout": int(module.params["ssh_settings"]["idle_timeout"] * 60)}) + exist_ssh.update({"IdleTimeout": int(loc_data["SshConfiguration"]["IdleTimeout"])}) + recadm_enabled = module.params.get("remote_racadm_settings") + if recadm_enabled is not None and recadm_enabled["enabled"] is True: + req_racadm = {"EnableRemoteRacadm": recadm_enabled["enabled"]} + exist_racadm = {"EnableRemoteRacadm": loc_data["EnableRemoteRacadm"]} + elif recadm_enabled is not None and recadm_enabled["enabled"] is False: + req_racadm = {"EnableRemoteRacadm": recadm_enabled["enabled"]} + exist_racadm = {"EnableRemoteRacadm": loc_data["EnableRemoteRacadm"]} + changes = [bool(set(req_snmp.items()) ^ set(exist_snmp.items())) or + bool(set(req_ssh.items()) ^ set(exist_ssh.items())) or + bool(set(req_comm_str.items()) ^ set(exist_comm_str.items())) or + bool(set(req_racadm.items()) ^ set(exist_racadm.items()))] + if module.check_mode and any(changes) is True: + loc_data["SshConfiguration"]["IdleTimeout"] = loc_data["SshConfiguration"]["IdleTimeout"] / 60 + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif module.check_mode and all(changes) is False: + loc_data["SshConfiguration"]["IdleTimeout"] = loc_data["SshConfiguration"]["IdleTimeout"] / 60 + module.exit_json(msg=NO_CHANGES_FOUND) + elif not module.check_mode and all(changes) is False: + loc_data["SshConfiguration"]["IdleTimeout"] = loc_data["SshConfiguration"]["IdleTimeout"] / 60 + module.exit_json(msg=NO_CHANGES_FOUND) + else: + payload.update(loc_data) + payload["SnmpConfiguration"].update(req_snmp) if req_snmp else None + payload["SnmpConfiguration"]["SnmpV1V2Credential"].update(req_comm_str) if req_comm_str else None + payload["SshConfiguration"].update(req_ssh) if req_ssh else None + payload.update(req_racadm) if req_racadm else None + return payload + + +def fetch_device_details(module, rest_obj): + device_id, tag, final_resp = module.params.get("device_id"), module.params.get("device_service_tag"), {} + if device_id is None and tag is None: + key, value = get_chassis_device(module, rest_obj) + device_id = value + else: + key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag) + param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value) + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value}) + resp_data = resp.json_data.get("value") + rename_key = "id" if key == "Id" else "service tag" + if not resp_data: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag: + device_id = resp_data[0]["Id"] + elif key == "Id" and resp_data[0]["Id"] == device_id: + device_id = resp_data[0]["Id"] + else: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + try: + loc_resp = rest_obj.invoke_request("GET", NETWORK_SERVICE_API.format(device_id)) + except HTTPError as err: + if err.code == 404: + module.fail_json(msg=NETWORK_SERVICE_FAIL_MSG) + err_message = json.load(err) + error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo') + if error_msg and error_msg[0].get("MessageId") == "CGEN1004": + module.fail_json(msg=NETWORK_SERVICE_FAIL_MSG) + else: + loc_resp_data = rest_obj.strip_substr_dict(loc_resp.json_data) + payload = check_mode_validation(module, loc_resp_data, rest_obj) + final_resp = rest_obj.invoke_request("PUT", NETWORK_SERVICE_API.format(device_id), data=payload) + return final_resp + + +def main(): + snmp_options = {"enabled": {"type": "bool", "required": True}, + "port_number": {"type": "int", "required": False}, + "community_name": {"type": "str", "required": False}} + ssh_options = {"enabled": {"type": "bool", "required": True}, + "port_number": {"type": "int", "required": False}, + "max_sessions": {"type": "int", "required": False}, + "max_auth_retries": {"type": "int", "required": False}, + "idle_timeout": {"type": "float", "required": False}} + racadm_options = {"enabled": {"type": "bool", "required": True}} + specs = { + "device_id": {"required": False, "type": "int"}, + "device_service_tag": {"required": False, "type": "str"}, + "snmp_settings": {"type": "dict", "required": False, "options": snmp_options, + "required_if": [["enabled", True, ("community_name",)]]}, + "ssh_settings": {"type": "dict", "required": False, "options": ssh_options}, + "remote_racadm_settings": {"type": "dict", "required": False, "options": racadm_options}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[('device_id', 'device_service_tag')], + required_one_of=[["snmp_settings", "ssh_settings", "remote_racadm_settings"]], + supports_check_mode=True, + ) + if not any([module.params.get("snmp_settings"), module.params.get("ssh_settings"), + module.params.get("remote_racadm_settings")]): + module.fail_json(msg=CONFIG_FAIL_MSG) + try: + with RestOME(module.params, req_session=True) as rest_obj: + check_domain_service(module, rest_obj) + resp = fetch_device_details(module, rest_obj) + resp_data = resp.json_data + resp_data["SshConfiguration"]["IdleTimeout"] = resp_data["SshConfiguration"]["IdleTimeout"] / 60 + module.exit_json(msg=SUCCESS_MSG, network_services_details=resp_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py new file mode 100644 index 00000000..ec99e693 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py @@ -0,0 +1,341 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_device_power_settings +short_description: Configure chassis power settings on OpenManage Enterprise Modular +description: This module allows to configure the chassis power settings on OpenManage Enterprise Modular. +version_added: "4.2.0" +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_id: + type: int + description: + - The ID of the chassis for which the settings need to be updated. + - If the device ID is not specified, this module updates the power settings for the I(hostname). + - I(device_id) is mutually exclusive with I(device_service_tag). + device_service_tag: + type: str + description: + - The service tag of the chassis for which the setting needs to be updated. + - If the device service tag is not specified, this module updates the power settings for the I(hostname). + - I(device_service_tag) is mutually exclusive with I(device_id). + power_configuration: + description: The settings for Power configuration. + type: dict + suboptions: + enable_power_cap: + type: bool + description: Enables or disables the Power Cap Settings. + required: true + power_cap: + type: int + description: + - The maximum power consumption limit of the device. Specify the consumption limit in Watts. + - This is required if I(enable_power_cap) is set to true. + redundancy_configuration: + description: The settings for Redundancy configuration. + type: dict + suboptions: + redundancy_policy: + type: str + description: + - The choices to configure the redundancy policy. + - C(NO_REDUNDANCY) no redundancy policy is used. + - C(GRID_REDUNDANCY) to distributes power by dividing the PSUs into two grids. + - C(PSU_REDUNDANCY) to distribute power between all the PSUs. + choices: ['NO_REDUNDANCY', 'GRID_REDUNDANCY', 'PSU_REDUNDANCY'] + default: NO_REDUNDANCY + hot_spare_configuration: + description: The settings for Hot Spare configuration. + type: dict + suboptions: + enable_hot_spare: + type: bool + description: Enables or disables Hot Spare configuration to facilitate voltage regulation when power + utilized by the Power Supply Unit (PSU) is low. + required: true + primary_grid: + type: str + description: + - The choices for PSU grid. + - C(GRID_1) Hot Spare on Grid 1. + - C(GRID_2) Hot Spare on Grid 2. + choices: ['GRID_1', 'GRID_2'] + default: GRID_1 +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Update power configuration settings of a chassis using the device ID. + dellemc.openmanage.ome_device_power_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25011 + power_configuration: + enable_power_cap: true + power_cap: 3424 + +- name: Update redundancy configuration settings of a chassis using the device service tag. + dellemc.openmanage.ome_device_power_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: GHRT2RL + redundancy_configuration: + redundancy_policy: GRID_REDUNDANCY + +- name: Update hot spare configuration settings of a chassis using device ID. + dellemc.openmanage.ome_device_power_settings: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25012 + hot_spare_configuration: + enable_hot_spare: true + primary_grid: GRID_1 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the device power settings. + returned: always + sample: "Successfully updated the power settings." +power_details: + type: dict + description: returned when power settings are updated successfully. + returned: success + sample: { + "EnableHotSpare": true, + "EnablePowerCapSettings": true, + "MaxPowerCap": "3424", + "MinPowerCap": "3291", + "PowerCap": "3425", + "PrimaryGrid": "GRID_1", + "RedundancyPolicy": "NO_REDUNDANCY", + "SettingType": "Power" + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import json +import socket +import copy +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +POWER_API = "DeviceService/Devices({0})/Settings('Power')" +DEVICE_URI = "DeviceService/Devices" +DOMAIN_URI = "ManagementDomainService/Domains" +DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid." +CONFIG_FAIL_MSG = "one of the following is required: power_configuration, " \ + "redundancy_configuration, hot_spare_configuration" +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." +SUCCESS_MSG = "Successfully updated the power settings." +FETCH_FAIL_MSG = "Failed to fetch the device information." +POWER_FAIL_MSG = "Unable to complete the operation because the power settings " \ + "are not supported on the specified device." +DOMAIN_FAIL_MSG = "The device location settings operation is supported only on " \ + "OpenManage Enterprise Modular." + + +def check_domain_service(module, rest_obj): + try: + rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5) + except HTTPError as err: + err_message = json.load(err) + if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006": + module.fail_json(msg=DOMAIN_FAIL_MSG) + return + + +def get_ip_from_host(hostname): + ipaddr = hostname + try: + result = socket.getaddrinfo(hostname, None) + last_element = result[-1] + ip_address = last_element[-1][0] + if ip_address: + ipaddr = ip_address + except socket.gaierror: + ipaddr = hostname + except Exception: + ipaddr = hostname + return ipaddr + + +def get_chassis_device(module, rest_obj): + key, value = None, None + ipaddress = get_ip_from_host(module.params["hostname"]) + resp = rest_obj.invoke_request("GET", DOMAIN_URI) + for data in resp.json_data["value"]: + if ipaddress in data["PublicAddress"]: + key, value = ("Id", data["DeviceId"]) + break + else: + module.fail_json(msg=FETCH_FAIL_MSG) + return key, value + + +def check_mode_validation(module, loc_data): + power_data = {"PowerCap": loc_data.get("PowerCap"), "MinPowerCap": loc_data["MinPowerCap"], + "MaxPowerCap": loc_data["MaxPowerCap"], "RedundancyPolicy": loc_data.get("RedundancyPolicy"), + "EnablePowerCapSettings": loc_data["EnablePowerCapSettings"], + "EnableHotSpare": loc_data["EnableHotSpare"], "PrimaryGrid": loc_data.get("PrimaryGrid")} + cloned_data = copy.deepcopy(power_data) + if module.params.get("power_configuration") is not None: + if module.params["power_configuration"]["enable_power_cap"] is None: + module.fail_json(msg="missing parameter: enable_power_cap") + enable_power_cap = module.params["power_configuration"]["enable_power_cap"] + power_cap = module.params["power_configuration"].get("power_cap") + if enable_power_cap is True: + cloned_data.update({"EnablePowerCapSettings": enable_power_cap, "PowerCap": str(power_cap)}) + else: + cloned_data.update({"EnablePowerCapSettings": enable_power_cap}) + if module.params.get("redundancy_configuration") is not None: + cloned_data.update({"RedundancyPolicy": module.params["redundancy_configuration"]["redundancy_policy"]}) + if module.params.get("hot_spare_configuration") is not None: + if module.params["hot_spare_configuration"]["enable_hot_spare"] is None: + module.fail_json(msg="missing parameter: enable_hot_spare") + enable_hot_spare = module.params["hot_spare_configuration"]["enable_hot_spare"] + primary_grid = module.params["hot_spare_configuration"].get("primary_grid") + if enable_hot_spare is True: + cloned_data.update({"EnableHotSpare": enable_hot_spare, "PrimaryGrid": primary_grid}) + else: + cloned_data.update({"EnableHotSpare": enable_hot_spare}) + power_diff = bool(set(power_data.items()) ^ set(cloned_data.items())) + if not power_diff and not module.check_mode: + module.exit_json(msg=NO_CHANGES_FOUND) + elif not power_diff and module.check_mode: + module.exit_json(msg=NO_CHANGES_FOUND) + elif power_diff and module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + cloned_data.update({"SettingType": "Power"}) + return cloned_data + + +def fetch_device_details(module, rest_obj): + device_id, tag, final_resp = module.params.get("device_id"), module.params.get("device_service_tag"), {} + if device_id is None and tag is None: + key, value = get_chassis_device(module, rest_obj) + device_id = value + else: + key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag) + param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value) + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value}) + resp_data = resp.json_data.get("value") + rename_key = "id" if key == "Id" else "service tag" + if not resp_data: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag: + device_id = resp_data[0]["Id"] + elif key == "Id" and resp_data[0]["Id"] == device_id: + device_id = resp_data[0]["Id"] + else: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + try: + loc_resp = rest_obj.invoke_request("GET", POWER_API.format(device_id)) + except HTTPError as err: + if err.code == 404: + module.fail_json(msg=POWER_FAIL_MSG) + err_message = json.load(err) + error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo') + if error_msg and error_msg[0].get("MessageId") == "CGEN1004": + module.fail_json(msg=POWER_FAIL_MSG) + else: + payload = check_mode_validation(module, loc_resp.json_data) + final_resp = rest_obj.invoke_request("PUT", POWER_API.format(device_id), data=payload) + return final_resp + + +def main(): + power_options = {"enable_power_cap": {"type": "bool", "required": True}, + "power_cap": {"type": "int", "required": False}} + redundancy_options = {"redundancy_policy": {"type": "str", "default": "NO_REDUNDANCY", + "choices": ["NO_REDUNDANCY", "GRID_REDUNDANCY", "PSU_REDUNDANCY"]}} + hot_spare_options = {"enable_hot_spare": {"required": True, "type": "bool"}, + "primary_grid": {"required": False, "type": "str", "default": "GRID_1", + "choices": ["GRID_1", "GRID_2"]}} + specs = { + "device_id": {"required": False, "type": "int"}, + "device_service_tag": {"required": False, "type": "str"}, + "power_configuration": {"type": "dict", "required": False, "options": power_options, + "required_if": [["enable_power_cap", True, ("power_cap",), True]]}, + "redundancy_configuration": {"type": "dict", "required": False, "options": redundancy_options}, + "hot_spare_configuration": {"type": "dict", "required": False, "options": hot_spare_options, + "required_if": [["enable_hot_spare", True, ("primary_grid",)]]}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[('device_id', 'device_service_tag')], + required_one_of=[["power_configuration", "redundancy_configuration", "hot_spare_configuration"]], + supports_check_mode=True, + ) + try: + if not any([module.params.get("power_configuration"), module.params.get("redundancy_configuration"), + module.params.get("hot_spare_configuration")]): + module.fail_json(msg=CONFIG_FAIL_MSG) + with RestOME(module.params, req_session=True) as rest_obj: + check_domain_service(module, rest_obj) + resp = fetch_device_details(module, rest_obj) + module.exit_json(msg=SUCCESS_MSG, power_details=resp.json_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py new file mode 100644 index 00000000..183b7f67 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py @@ -0,0 +1,674 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_device_quick_deploy +short_description: Configure Quick Deploy settings on OpenManage Enterprise Modular. +description: This module allows to configure the Quick Deploy settings of the server or IOM + on OpenManage Enterprise Modular. +version_added: "5.0.0" +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_id: + type: int + description: + - The ID of the chassis for which the Quick Deploy settings to be deployed. + - If the device ID is not specified, this module updates the Quick Deploy settings for the I(hostname). + - I(device_id) is mutually exclusive with I(device_service_tag). + device_service_tag: + type: str + description: + - The service tag of the chassis for which the Quick Deploy settings to be deployed. + - If the device service tag is not specified, this module updates the Quick Deploy settings for the I(hostname). + - I(device_service_tag) is mutually exclusive with I(device_id). + setting_type: + type: str + required: True + choices: [ServerQuickDeploy, IOMQuickDeploy] + description: + - The type of the Quick Deploy settings to be applied. + - C(ServerQuickDeploy) to apply the server Quick Deploy settings. + - C(IOMQuickDeploy) to apply the IOM Quick Deploy settings. + job_wait: + type: bool + description: Determines whether to wait for the job completion or not. + default: True + job_wait_timeout: + type: int + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + default: 120 + quick_deploy_options: + type: dict + required: True + description: The Quick Deploy settings for server and IOM quick deploy. + suboptions: + password: + type: str + description: + - The password to login to the server or IOM. + - The module will always report change when I(password) option is added. + ipv4_enabled: + type: bool + description: Enables or disables the IPv4 network. + ipv4_network_type: + type: str + choices: [Static, DHCP] + description: + - IPv4 network type. + - I(ipv4_network_type) is required if I(ipv4_enabled) is C(True). + - C(Static) to configure the static IP settings. + - C(DHCP) to configure the Dynamic IP settings. + ipv4_subnet_mask: + type: str + description: + - IPv4 subnet mask. + - I(ipv4_subnet_mask) is required if I(ipv4_network_type) is C(Static). + ipv4_gateway: + type: str + description: + - IPv4 gateway. + - I(ipv4_gateway) is required if I(ipv4_network_type) is C(Static). + ipv6_enabled: + type: bool + description: Enables or disables the IPv6 network. + ipv6_network_type: + type: str + choices: [Static, DHCP] + description: + - IPv6 network type. + - I(ipv6_network_type) is required if I(ipv6_enabled) is C(True). + - C(Static) to configure the static IP settings. + - C(DHCP) to configure the Dynamic IP settings. + ipv6_prefix_length: + type: int + description: + - IPV6 prefix length. + - I(ipv6_prefix_length) is required if I(ipv6_network_type) is C(Static). + ipv6_gateway: + type: str + description: + - IPv6 gateway. + - I(ipv6_gateway) is required if I(ipv6_network_type) is C(Static). + slots: + type: list + elements: dict + description: The slot configuration for the server or IOM. + suboptions: + slot_id: + type: int + required: True + description: The ID of the slot. + slot_ipv4_address: + type: str + description: The IPv4 address of the slot. + slot_ipv6_address: + type: str + description: The IPv6 address of the slot. + vlan_id: + type: int + description: The ID of the VLAN. +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to OpenManage Enterprise Modular. + - This module supports C(check_mode). + - The module will always report change when I(password) option is added. + - If the chassis is a member of a multi-chassis group and it is assigned as a backup + lead chassis, the operations performed on the chassis using this module may + conflict with the management operations performed on the chassis through the lead chassis. +""" + +EXAMPLES = """ +--- +- name: Configure server Quick Deploy settings of the chassis using device ID. + dellemc.openmanage.ome_device_quick_deploy: + hostname: "192.168.0.1" + username: "username" + password: "password" + device_id: 25011 + setting_type: ServerQuickDeploy + ca_path: "/path/to/ca_cert.pem" + quick_deploy_options: + password: "password" + ipv4_enabled: True + ipv4_network_type: Static + ipv4_subnet_mask: 255.255.255.0 + ipv4_gateway: 192.168.0.1 + ipv6_enabled: True + ipv6_network_type: Static + ipv6_prefix_length: 1 + ipv6_gateway: "::" + slots: + - slot_id: 1 + slot_ipv4_address: 192.168.0.2 + slot_ipv6_address: "::" + vlan_id: 1 + - slot_id: 2 + slot_ipv4_address: 192.168.0.3 + slot_ipv6_address: "::" + vlan_id: 2 + +- name: Configure server Quick Deploy settings of the chassis using device service tag. + dellemc.openmanage.ome_device_quick_deploy: + hostname: "192.168.0.1" + username: "username" + password: "password" + device_service_tag: GHRT2RL + setting_type: IOMQuickDeploy + ca_path: "/path/to/ca_cert.pem" + quick_deploy_options: + password: "password" + ipv4_enabled: True + ipv4_network_type: Static + ipv4_subnet_mask: 255.255.255.0 + ipv4_gateway: 192.168.0.1 + ipv6_enabled: True + ipv6_network_type: Static + ipv6_prefix_length: 1 + ipv6_gateway: "::" + slots: + - slot_id: 1 + slot_ipv4_address: 192.168.0.2 + slot_ipv6_address: "::" + vlan_id: 1 + - slot_id: 2 + slot_ipv4_address: 192.168.0.3 + slot_ipv6_address: "::" + vlan_id: 2 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the device quick deploy settings. + returned: always + sample: "Successfully deployed the quick deploy settings." +job_id: + type: int + description: The job ID of the submitted quick deploy job. + returned: when quick deploy job is submitted. + sample: 1234 +quick_deploy_settings: + type: dict + description: returned when quick deploy settings are deployed successfully. + returned: success + sample: { + "DeviceId": 25011, + "SettingType": "ServerQuickDeploy", + "ProtocolTypeV4": true, + "NetworkTypeV4": "Static", + "IpV4Gateway": 192.168.0.1, + "IpV4SubnetMask": "255.255.255.0", + "ProtocolTypeV6": true, + "NetworkTypeV6": "Static", + "PrefixLength": "2", + "IpV6Gateway": "::", + "slots": [ + { + "DeviceId": 25011, + "DeviceCapabilities": [18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 41, 8, 7, 4, 3, 2, 1, 31, 30], + "DeviceIPV4Address": "192.168.0.2", + "DeviceIPV6Address": "::", + "Dhcpipv4": "Disabled", + "Dhcpipv6": "Disabled", + "Ipv4Enabled": "Enabled", + "Ipv6Enabled": "Enabled", + "Model": "PowerEdge MX840c", + "SlotIPV4Address": "192.168.0.2", + "SlotIPV6Address": "::", + "SlotId": 1, + "SlotSelected": true, + "SlotSettingsApplied": true, + "SlotType": "2000", + "Type": "1000", + "VlanId": "1" + }, + { + "DeviceId": 0, + "Model": "", + "SlotIPV4Address": "0.0.0.0", + "SlotIPV6Address": "::", + "SlotId": 2, + "SlotSelected": false, + "SlotSettingsApplied": false, + "SlotType": "2000", + "Type": "0" + }, + { + "DeviceId": 0, + "Model": "", + "SlotIPV4Address": "0.0.0.0", + "SlotIPV6Address": "::", + "SlotId": 3, + "SlotSelected": false, + "SlotSettingsApplied": false, + "SlotType": "2000", + "Type": "0" + }, + { + "DeviceId": 0, + "Model": "", + "SlotIPV4Address": "0.0.0.0", + "SlotIPV6Address": "::", + "SlotId": 4, + "SlotSelected": false, + "SlotSettingsApplied": false, + "SlotType": "2000", + "Type": "0" + }, + { + "DeviceId": 0, + "Model": "", + "SlotIPV4Address": "0.0.0.0", + "SlotIPV6Address": "::", + "SlotId": 5, + "SlotSelected": false, + "SlotSettingsApplied": false, + "SlotType": "2000", + "Type": "0" + }, + { + "DeviceId": 0, + "Model": "", + "SlotIPV4Address": "0.0.0.0", + "SlotIPV6Address": "::", + "SlotId": 6, + "SlotSelected": false, + "SlotSettingsApplied": false, + "SlotType": "2000", + "Type": "0" + }, + { + "DeviceId": 0, + "Model": "", + "SlotIPV4Address": "0.0.0.0", + "SlotIPV6Address": "::", + "SlotId": 7, + "SlotSelected": false, + "SlotSettingsApplied": false, + "SlotType": "2000", + "Type": "0" + }, + { + "DeviceId": 0, + "Model": "", + "SlotIPV4Address": "0.0.0.0", + "SlotIPV6Address": "::", + "SlotId": 8, + "SlotSelected": false, + "SlotSettingsApplied": false, + "SlotType": "2000", + "Type": "0" + } + ] + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import copy +import json +import socket +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params + +DOMAIN_URI = "ManagementDomainService/Domains" +DEVICE_URI = "DeviceService/Devices" +QUICK_DEPLOY_API = "DeviceService/Devices({0})/Settings('{1}')" + +DOMAIN_FAIL_MSG = "The operation to configure the Quick Deploy settings is supported only on " \ + "OpenManage Enterprise Modular." +IP_FAIL_MSG = "Invalid '{0}' address provided for the {1}." +FETCH_FAIL_MSG = "Unable to retrieve the device information." +DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid." +QUICK_DEPLOY_FAIL_MSG = "Unable to complete the operation because the {0} configuration settings " \ + "are not supported on the specified device." +INVALID_SLOT_MSG = "Unable to complete the operation because the entered slot(s) '{0}' does not exist." +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." +SUCCESS_MSG = "Successfully deployed the Quick Deploy settings." +FAIL_MSG = "Unable to deploy the Quick Deploy settings." +QUICK_DEPLOY_JOB_DESC = "The Quick Deploy job is initiated from the OpenManage Ansible Module collections." +JOB_MSG = "Successfully submitted the Quick Deploy job settings." + + +def validate_ip_address(address, flag): + value = True + try: + if flag == "IPV4": + socket.inet_aton(address) + value = address.count('.') == 3 + else: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: + value = False + return value + + +def ip_address_field(module, field, deploy_options, slot=False): + module_params = deploy_options + if slot: + module_params = deploy_options + for val in field: + field_value = module_params.get(val[0]) + if field_value is not None: + valid = validate_ip_address(module_params.get(val[0]), val[1]) + if valid is False: + module.fail_json(msg=IP_FAIL_MSG.format(field_value, val[0])) + return + + +def check_domain_service(module, rest_obj): + try: + rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5) + except HTTPError as err: + err_message = json.load(err) + if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006": + module.fail_json(msg=DOMAIN_FAIL_MSG) + return + + +def get_ip_from_host(hostname): + ipaddr = hostname + try: + result = socket.getaddrinfo(hostname, None) + last_element = result[-1] + ip_address = last_element[-1][0] + if ip_address: + ipaddr = ip_address + except socket.gaierror: + ipaddr = hostname + except Exception: + ipaddr = hostname + return ipaddr + + +def get_chassis_device(module, rest_obj): + key, value = None, None + ipaddress = get_ip_from_host(module.params["hostname"]) + resp = rest_obj.invoke_request("GET", DOMAIN_URI) + for data in resp.json_data["value"]: + if ipaddress in data["PublicAddress"]: + key, value = ("Id", data["DeviceId"]) + break + else: + module.fail_json(msg=FETCH_FAIL_MSG) + return key, value + + +def check_mode_validation(module, deploy_data): + deploy_options = module.params.get("quick_deploy_options") + req_data, req_payload = {}, {} + if deploy_options.get("password") is not None: + req_data["rootCredential"] = deploy_options.get("password") + ipv4_enabled = deploy_options.get("ipv4_enabled") + ipv4_enabled_deploy = deploy_data["ProtocolTypeV4"] + ipv6_enabled_deploy = deploy_data["ProtocolTypeV6"] + ipv4_nt_deploy = deploy_data.get("NetworkTypeV4") + ipv6_nt_deploy = deploy_data.get("NetworkTypeV6") + if ipv4_enabled is not None and ipv4_enabled is True or \ + ipv4_enabled_deploy is not None and ipv4_enabled_deploy is True: + req_data["ProtocolTypeV4"] = None + if ipv4_enabled is not None: + req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower() + ipv4_network_type = deploy_options.get("ipv4_network_type") + req_data["NetworkTypeV4"] = ipv4_network_type + if ipv4_network_type == "Static" or ipv4_nt_deploy is not None and ipv4_nt_deploy == "Static": + req_data["IpV4SubnetMask"] = deploy_options.get("ipv4_subnet_mask") + req_data["IpV4Gateway"] = deploy_options.get("ipv4_gateway") + elif ipv4_enabled is not None and ipv4_enabled is False: + req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower() + ipv6_enabled = deploy_options.get("ipv6_enabled") + if ipv6_enabled is not None and ipv6_enabled is True or \ + ipv6_enabled_deploy is not None and ipv6_enabled_deploy is True: + req_data["ProtocolTypeV6"] = None + if ipv6_enabled is not None: + req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower() + ipv6_network_type = deploy_options.get("ipv6_network_type") + req_data["NetworkTypeV6"] = ipv6_network_type + if ipv6_network_type == "Static" or ipv6_nt_deploy is not None and ipv6_nt_deploy == "Static": + req_data["PrefixLength"] = deploy_options.get("ipv6_prefix_length") + if deploy_options.get("ipv6_prefix_length") is not None: + req_data["PrefixLength"] = str(deploy_options.get("ipv6_prefix_length")) + req_data["IpV6Gateway"] = deploy_options.get("ipv6_gateway") + elif ipv6_enabled is not None and ipv6_enabled is False: + req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower() + resp_data = { + "ProtocolTypeV4": str(ipv4_enabled_deploy).lower(), "NetworkTypeV4": deploy_data.get("NetworkTypeV4"), + "IpV4SubnetMask": deploy_data.get("IpV4SubnetMask"), "IpV4Gateway": deploy_data.get("IpV4Gateway"), + "ProtocolTypeV6": str(ipv6_enabled_deploy).lower(), "NetworkTypeV6": deploy_data.get("NetworkTypeV6"), + "PrefixLength": deploy_data.get("PrefixLength"), "IpV6Gateway": deploy_data.get("IpV6Gateway")} + resp_filter_data = dict([(k, v) for k, v in resp_data.items() if v is not None]) + req_data_filter = dict([(k, v) for k, v in req_data.items() if v is not None]) + diff_changes = [bool(set(resp_filter_data.items()) ^ set(req_data_filter.items()))] + req_slot_payload, invalid_slot = [], [] + slots = deploy_options.get("slots") + if slots is not None: + exist_slot = deploy_data.get("Slots") + for each in slots: + exist_filter_slot = list(filter(lambda d: d["SlotId"] in [each["slot_id"]], exist_slot)) + if exist_filter_slot: + req_slot_1 = {"SlotId": each["slot_id"], "SlotIPV4Address": each.get("slot_ipv4_address"), + "SlotIPV6Address": each.get("slot_ipv6_address"), "VlanId": each.get("vlan_id")} + if each.get("vlan_id") is not None: + req_slot_1.update({"VlanId": str(each.get("vlan_id"))}) + req_filter_slot = dict([(k, v) for k, v in req_slot_1.items() if v is not None]) + exist_slot_1 = {"SlotId": exist_filter_slot[0]["SlotId"], + "SlotIPV4Address": exist_filter_slot[0]["SlotIPV4Address"], + "SlotIPV6Address": exist_filter_slot[0]["SlotIPV6Address"], + "VlanId": exist_filter_slot[0]["VlanId"]} + exist_filter_slot = dict([(k, v) for k, v in exist_slot_1.items() if v is not None]) + cp_exist_filter_slot = copy.deepcopy(exist_filter_slot) + cp_exist_filter_slot.update(req_filter_slot) + diff_changes.append(bool(set(cp_exist_filter_slot.items()) ^ set(exist_filter_slot.items()))) + req_slot_payload.append(cp_exist_filter_slot) + else: + invalid_slot.append(each["slot_id"]) + if invalid_slot: + module.fail_json(msg=INVALID_SLOT_MSG.format(", ".join(map(str, invalid_slot)))) + if module.check_mode and any(diff_changes) is True: + module.exit_json(msg=CHANGES_FOUND, changed=True, quick_deploy_settings=deploy_data) + elif (module.check_mode and any(diff_changes) is False) or \ + (not module.check_mode and any(diff_changes) is False): + module.exit_json(msg=NO_CHANGES_FOUND, quick_deploy_settings=deploy_data) + req_payload.update(resp_filter_data) + req_payload.update(req_data_filter) + return req_payload, req_slot_payload + + +def job_payload_submission(rest_obj, payload, slot_payload, settings_type, device_id, resp_data): + job_params = [] + job_params.append({"Key": "protocolTypeV4", "Value": payload["ProtocolTypeV4"]}) + job_params.append({"Key": "protocolTypeV6", "Value": payload["ProtocolTypeV6"]}) + s_type = "SERVER_QUICK_DEPLOY" if settings_type == "ServerQuickDeploy" else "IOM_QUICK_DEPLOY" + job_params.append({"Key": "operationName", "Value": "{0}".format(s_type)}) + job_params.append({"Key": "deviceId", "Value": "{0}".format(device_id)}) + if payload.get("rootCredential") is not None: + job_params.append({"Key": "rootCredential", "Value": payload["rootCredential"]}) + if payload.get("NetworkTypeV4") is not None: + job_params.append({"Key": "networkTypeV4", "Value": payload["NetworkTypeV4"]}) + if payload.get("IpV4SubnetMask") is not None: + job_params.append({"Key": "subnetMaskV4", "Value": payload["IpV4SubnetMask"]}) + if payload.get("IpV4Gateway") is not None: + job_params.append({"Key": "gatewayV4", "Value": payload["IpV4Gateway"]}) + if payload.get("NetworkTypeV6") is not None: + job_params.append({"Key": "networkTypeV6", "Value": payload["NetworkTypeV6"]}) + if payload.get("PrefixLength") is not None: + job_params.append({"Key": "prefixLength", "Value": payload["PrefixLength"]}) + if payload.get("IpV6Gateway") is not None: + job_params.append({"Key": "gatewayV6", "Value": payload["IpV6Gateway"]}) + updated_slot = [] + if slot_payload: + for each in slot_payload: + updated_slot.append(each.get("SlotId")) + job_params.append( + {"Key": "slotId={0}".format(each.get("SlotId")), + "Value": "SlotSelected=true;IPV4Address={0};IPV6Address={1};VlanId={2}".format( + each.get("SlotIPV4Address"), each.get("SlotIPV6Address"), each.get("VlanId"))}) + slots = resp_data["Slots"] + if updated_slot is not None: + slots = list(filter(lambda d: d["SlotId"] not in updated_slot, slots)) + for each in slots: + key = "slot_id={0}".format(each["SlotId"]) + value = "SlotSelected={0};".format(each["SlotSelected"]) + if each.get("SlotIPV4Address") is not None: + value = value + "IPV4Address={0};".format(each["SlotIPV4Address"]) + if each.get("SlotIPV6Address") is not None: + value = value + "IPV6Address={0};".format(each["SlotIPV6Address"]) + if each.get("VlanId") is not None: + value = value + "VlanId={0}".format(each["VlanId"]) + job_params.append({"Key": key, "Value": value}) + job_sub_resp = rest_obj.job_submission("Quick Deploy", QUICK_DEPLOY_JOB_DESC, [], job_params, + {"Id": 42, "Name": "QuickDeploy_Task"}) + return job_sub_resp.json_data.get('Id') + + +def get_device_details(rest_obj, module): + job_success_data, job_id = None, None + device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag") + if device_id is None and tag is None: + key, value = get_chassis_device(module, rest_obj) + device_id = value + else: + key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag) + param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value) + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value}) + resp_data = resp.json_data.get("value") + rename_key = "id" if key == "Id" else "service tag" + if not resp_data: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag: + device_id = resp_data[0]["Id"] + elif key == "Id" and resp_data[0]["Id"] == device_id: + device_id = resp_data[0]["Id"] + else: + module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value)) + settings_type, settings_key = "IOMQuickDeploy", "IOM Quick Deploy" + if module.params["setting_type"] == "ServerQuickDeploy": + settings_type, settings_key = "ServerQuickDeploy", "Server Quick Deploy" + try: + deploy_resp = rest_obj.invoke_request("GET", QUICK_DEPLOY_API.format(device_id, settings_type)) + except HTTPError as err: + err_message = json.load(err) + error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo') + if error_msg and error_msg[0].get("MessageId") == "CGEN1004": + module.fail_json(msg=QUICK_DEPLOY_FAIL_MSG.format(settings_key)) + else: + resp_data = rest_obj.strip_substr_dict(deploy_resp.json_data) + payload, slot_payload = check_mode_validation(module, resp_data) + job_id = job_payload_submission(rest_obj, payload, slot_payload, settings_type, device_id, resp_data) + if module.params["job_wait"]: + job_failed, job_msg = rest_obj.job_tracking(job_id, job_wait_sec=module.params["job_wait_timeout"]) + if job_failed is True: + module.fail_json(msg=FAIL_MSG) + job_success_resp = rest_obj.invoke_request("GET", QUICK_DEPLOY_API.format(device_id, settings_type)) + job_success_data = rest_obj.strip_substr_dict(job_success_resp.json_data) + return job_id, job_success_data + + +def main(): + slots = { + "slot_id": {"required": True, "type": "int"}, + "slot_ipv4_address": {"type": "str"}, + "slot_ipv6_address": {"type": "str"}, + "vlan_id": {"type": "int"}, + } + quick_deploy = { + "password": {"type": "str", "no_log": True}, + "ipv4_enabled": {"type": "bool"}, + "ipv4_network_type": {"type": "str", "choices": ["Static", "DHCP"]}, + "ipv4_subnet_mask": {"type": "str"}, + "ipv4_gateway": {"type": "str"}, + "ipv6_enabled": {"type": "bool"}, + "ipv6_network_type": {"type": "str", "choices": ["Static", "DHCP"]}, + "ipv6_prefix_length": {"type": "int"}, + "ipv6_gateway": {"type": "str"}, + "slots": {"type": "list", "elements": "dict", "options": slots}, + } + specs = { + "device_id": {"required": False, "type": "int"}, + "device_service_tag": {"required": False, "type": "str"}, + "setting_type": {"required": True, "choices": ["ServerQuickDeploy", "IOMQuickDeploy"]}, + "quick_deploy_options": { + "type": "dict", "required": True, "options": quick_deploy, + "required_if": [ + ["ipv4_enabled", True, ["ipv4_network_type"]], + ["ipv4_network_type", "Static", ["ipv4_subnet_mask", "ipv4_gateway"]], + ["ipv6_enabled", True, ["ipv6_network_type"]], + ["ipv6_network_type", "Static", ["ipv6_prefix_length", "ipv6_gateway"]], + ], + }, + "job_wait": {"type": "bool", "default": True}, + "job_wait_timeout": {"type": "int", "default": 120}, + } + specs.update(ome_auth_params) + module = AnsibleModule(argument_spec=specs, + mutually_exclusive=[('device_id', 'device_service_tag')], + supports_check_mode=True,) + if module.params["quick_deploy_options"] is None: + module.fail_json(msg="missing required arguments: quick_deploy_options") + fields = [("ipv4_subnet_mask", "IPV4"), ("ipv4_gateway", "IPV4"), ("ipv6_gateway", "IPV6")] + ip_address_field(module, fields, module.params["quick_deploy_options"], slot=False) + slot_options = module.params["quick_deploy_options"].get("slots") + if slot_options is not None: + slot_field = [("slot_ipv4_address", "IPV4"), ("slot_ipv6_address", "IPV6")] + for dep_opt in slot_options: + ip_address_field(module, slot_field, dep_opt, slot=True) + try: + with RestOME(module.params, req_session=True) as rest_obj: + check_domain_service(module, rest_obj) + job_id, data = get_device_details(rest_obj, module) + if job_id is not None and data is not None: + module.exit_json(msg=SUCCESS_MSG, job_id=job_id, quick_deploy_settings=data, changed=True) + module.exit_json(msg=JOB_MSG, job_id=job_id) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, + AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py new file mode 100644 index 00000000..95439528 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_devices +short_description: Perform device-specific operations on target devices +description: Perform device-specific operations such as refresh inventory, clear iDRAC job queue, and reset iDRAC from OpenManage Enterprise. +version_added: 6.1.0 +author: + - Jagadeesh N V(@jagadeeshnv) +extends_documentation_fragment: + - dellemc.openmanage.oment_auth_options +options: + device_service_tags: + description: + - Service tag of the target devices. + - This is mutually exclusive with I(device_ids). + type: list + elements: str + device_ids: + description: + - IDs of the target devices. + - This is mutually exclusive with I(device_service_tags). + type: list + elements: int + state: + description: + - C(present) Allows to perform the I(device_action) on the target devices. + - "C(absent) Removes the device from OpenManage Enterprise. Job is not triggered. I(job_wait), I(job_schedule), + I(job_name), and I(job_description) are not applicable to this operation." + type: str + choices: [present, absent] + default: present + device_action: + description: + - C(refresh_inventory) refreshes the inventory on the target devices. + - C(reset_idrac) Triggers a reset on the target iDRACs. + - C(clear_idrac_job_queue) Clears the job queue on the target iDRACs. + - A job is triggered for each action. + type: str + choices: [refresh_inventory, reset_idrac, clear_idrac_job_queue] + default: refresh_inventory + job_wait: + description: + - Provides an option to wait for the job completion. + - This option is applicable when I(state) is C(present). + - This is applicable when I(job_schedule) is C(startnow). + type: bool + default: true + job_wait_timeout: + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 1200 + job_schedule: + description: Provide the cron string to schedule the job. + type: str + default: startnow + job_name: + description: Optional name for the job. + type: str + job_description: + description: Optional description for the job. + type: str +requirements: + - "python >= 3.8.6" +notes: + - For C(idrac_reset), the job triggers only the iDRAC reset operation and does not track the complete reset cycle. + - Run this module from a system that has direct access to Dell OpenManage Enterprise. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Refresh Inventory + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_action: refresh_inventory + device_service_tags: + - SVCTAG1 + +- name: Clear iDRAC job queue + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_action: clear_idrac_job_queue + device_service_tags: + - SVCTAG1 + +- name: Reset iDRAC using the service tag + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_action: reset_idrac + device_service_tags: + - SVCTAG1 + +- name: Remove devices using servicetags + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + device_service_tags: + - SVCTAG1 + - SVCTAF2 + +- name: Remove devices using IDs + dellemc.openmanage.ome_devices: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + device_ids: + - 10235 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the devices operation. + returned: always + sample: "Successfully removed the device(s)." +job: + type: dict + description: Job details of the devices operation. + returned: success + sample: { + "Id": 14874, + "JobName": "Refresh inventory", + "JobDescription": "The Refresh inventory task initiated from OpenManage Ansible Modules for devices with the ids '13216'.", + "Schedule": "startnow", + "State": "Enabled", + "CreatedBy": "admin", + "UpdatedBy": null, + "Visible": true, + "Editable": true, + "Builtin": false, + "UserGenerated": true, + "Targets": [ + { + "JobId": 14874, + "Id": 13216, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "action", + "Value": "CONFIG_INVENTORY" + }, + { + "JobId": 14874, + "Key": "isCollectDriverInventory", + "Value": "true" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2060, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 8, + "Name": "Inventory_Task", + "Internal": false + }, + "JobStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2020, + "Name": "Scheduled" + }, + "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(14874)/ExecutionHistories", + "LastExecutionDetail": { + "@odata.id": "/api/JobService/Jobs(14874)/LastExecutionDetail" + } +} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CGEN1002", + "RelatedProperties": [], + "Message": "Unable to complete the operation because the requested URI is invalid.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Enter a valid URI and retry the operation." + } + ] + } +} +""" + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import \ + get_rest_items, strip_substr_dict, job_tracking, apply_diff_key +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import CHANGES_MSG, NO_CHANGES_MSG + +DEVICE_URI = "DeviceService/Devices" +JOBS_URI = "JobService/Jobs" +JOB_URI = "JobService/Jobs({job_id})" +RUN_JOB_URI = "JobService/Actions/JobService.RunJobs" +LAST_EXEC = "JobService/Jobs({job_id})/LastExecutionDetail" +DELETE_DEVICES_URI = "DeviceService/Actions/DeviceService.RemoveDevices" +DELETE_SUCCESS = "The devices(s) are removed successfully." +INVALID_DEV_ST = "Unable to complete the operation because the entered target device(s) '{0}' are invalid." +JOB_DESC = "The {0} task initiated from OpenManage Ansible Modules for devices with the ids '{1}'." +APPLY_TRIGGERED = "Successfully initiated the device action job." +JOB_SCHEDULED = "The job is scheduled successfully." +SUCCESS_MSG = "The device operation is performed successfully." + +all_device_types = [1000, 2000, 4000, 5000, 7000, 8000, 9001] +device_type_map = {"refresh_inventory": all_device_types, "reset_idrac": [1000], "clear_idrac_job_queue": [1000]} +job_type_map = {"refresh_inventory": 8, "reset_idrac": 3, "clear_idrac_job_queue": 3} +jtype_map = {3: "DeviceAction_Task", 8: "Inventory_Task"} +job_params_map = {"refresh_inventory": {"action": "CONFIG_INVENTORY", + "isCollectDriverInventory": "true"}, + "reset_idrac": {"operationName": "RESET_IDRAC"}, + "clear_idrac_job_queue": {"operationName": "REMOTE_RACADM_EXEC", + "Command": "jobqueue delete -i JID_CLEARALL_FORCE", + "CommandTimeout": "60", "deviceTypes": "1000"}} +jobname_map = {"refresh_inventory": "Refresh inventory", "reset_idrac": "Reset iDRAC", + "clear_idrac_job_queue": "Clear iDRAC job queue"} + + +def get_dev_ids(module, rest_obj, types): + invalids = set() + sts = module.params.get('device_ids') + param = "{0} eq {1}" + srch = 'Id' + if not sts: + sts = module.params.get('device_service_tags') + param = "{0} eq '{1}'" + srch = 'Identifier' + devs = [] + for st in sts: + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param.format(srch, st)}) + val = resp.json_data.get('value') + if not val: + invalids.add(st) + for v in val: + if v[srch] == st: + if v["Type"] in types: + devs.extend(val) + else: + invalids.add(st) + break + else: + invalids.add(st) + valids = [(dv.get('Id')) for dv in devs] + return valids, invalids + + +def delete_devices(module, rest_obj, valid_ids): + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + payload = {"DeviceIds": list(valid_ids)} + rest_obj.invoke_request('POST', DELETE_DEVICES_URI, data=payload) + module.exit_json(msg=DELETE_SUCCESS, changed=True) + + +def update_common_job(module, payload, task, valid_ids): + payload["Schedule"] = module.params.get('job_schedule') + if module.params.get('job_name'): + payload["JobName"] = module.params.get('job_name') + else: + payload["JobName"] = jobname_map.get(task) + if module.params.get('job_description'): + payload["JobDescription"] = module.params.get('job_description') + else: + payload["JobDescription"] = JOB_DESC.format(jobname_map.get(task), ",".join(map(str, valid_ids))) + + +def check_similar_job(rest_obj, payload): + query_param = {"$filter": "JobType/Id eq {0}".format(payload['JobType'])} + job_resp = rest_obj.invoke_request("GET", JOBS_URI, query_param=query_param) + job_list = job_resp.json_data.get('value', []) + for jb in job_list: + if jb['JobName'] == payload['JobName'] and jb['JobDescription'] == payload['JobDescription'] and \ + jb['Schedule'] == payload['Schedule']: + jb_prm = dict((k.get('Key'), k.get('Value')) for k in jb.get('Params')) + if not jb_prm == payload.get('Params'): + continue + trgts = dict((t.get('Id'), t.get('TargetType').get('Name')) for t in jb.get('Targets')) + if not trgts == payload.get('Targets'): + continue + return jb + return {} + + +def job_wait(module, rest_obj, job): + mparams = module.params + if mparams.get('job_schedule') != 'startnow': + module.exit_json(changed=True, msg=JOB_SCHEDULED, job=strip_substr_dict(job)) + if not module.params.get("job_wait"): + module.exit_json(changed=True, msg=APPLY_TRIGGERED, job=strip_substr_dict(job)) + else: + job_msg = SUCCESS_MSG + job_failed, msg, job_dict, wait_time = job_tracking( + rest_obj, JOB_URI.format(job_id=job['Id']), max_job_wait_sec=module.params.get('job_wait_timeout'), + initial_wait=3) + if job_failed: + try: + job_resp = rest_obj.invoke_request('GET', LAST_EXEC.format(job_id=job['Id'])) + msg = job_resp.json_data.get("Value") + job_msg = msg.replace('\n', ' ') + except Exception: + job_msg = msg + module.exit_json(failed=job_failed, msg=job_msg, job=strip_substr_dict(job), changed=True) + + +def get_task_payload(task): + taskload = {} + taskload.update({"JobType": job_type_map.get(task, 8)}) + taskload.update({"Params": job_params_map.get(task, {})}) + return taskload + + +def get_payload_method(task, valid_ids): + payload = get_task_payload(task) + targets = dict((dv, "DEVICE") for dv in valid_ids) + payload["Targets"] = targets + return payload, "POST", JOBS_URI + + +def formalize_job_payload(payload): + payload["Id"] = 0 + payload["State"] = "Enabled" + prms = payload['Params'] + payload['Params'] = [({"Key": k, "Value": v}) for k, v in prms.items()] + trgts = payload['Targets'] + payload['Targets'] = [({"Id": k, "Data": "", "TargetType": {"Id": 1000, "Name": v}}) for k, v in trgts.items()] + jtype = payload["JobType"] + payload["JobType"] = {"Id": jtype, "Name": jtype_map.get(jtype)} + + +def perform_device_tasks(module, rest_obj, valid_ids): + task = module.params.get("device_action") + payload, method, uri = get_payload_method(task, valid_ids) + update_common_job(module, payload, task, valid_ids) + job = check_similar_job(rest_obj, payload) + if not job: + formalize_job_payload(payload) + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request("POST", JOBS_URI, data=payload, api_timeout=60) + job_wait(module, rest_obj, resp.json_data) + else: + if module.params.get('job_schedule') == 'startnow' and job["LastRunStatus"]['Id'] != 2050: + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request("POST", RUN_JOB_URI, data={"JobIds": [job['Id']]}) + job_wait(module, rest_obj, job) + module.exit_json(msg=NO_CHANGES_MSG, job=strip_substr_dict(job)) + + +def main(): + specs = { + "device_service_tags": {"type": "list", "elements": 'str'}, + "device_ids": {"type": "list", "elements": 'int'}, + "state": {"type": "str", "choices": ["present", "absent"], "default": "present"}, + "device_action": {"type": "str", "choices": ["refresh_inventory", "reset_idrac", "clear_idrac_job_queue"], + "default": 'refresh_inventory'}, + "job_wait": {"type": "bool", "default": True}, + "job_wait_timeout": {"type": "int", "default": 1200}, + "job_schedule": {"type": "str", "default": 'startnow'}, + "job_name": {"type": "str"}, + "job_description": {"type": "str"}, + # "job_params": {"type": "dict"} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[], + mutually_exclusive=[ + ("device_service_tags", "device_ids"), + ], + required_one_of=[("device_service_tags", "device_ids")], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + if module.params.get("state") == 'present': + valids, invalids = get_dev_ids(module, rest_obj, + device_type_map.get(module.params.get("device_action"))) + if invalids: + module.exit_json(failed=True, msg=INVALID_DEV_ST.format(",".join(map(str, invalids)))) + perform_device_tasks(module, rest_obj, valids) + else: + valids, invalids = get_dev_ids(module, rest_obj, all_device_types) + if not valids: + module.exit_json(msg=NO_CHANGES_MSG) + delete_devices(module, rest_obj, valids) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, + OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py new file mode 100644 index 00000000..71b0e096 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py @@ -0,0 +1,518 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ome_diagnostics +short_description: Export technical support logs(TSR) to network share location +version_added: "3.6.0" +description: This module allows to export SupportAssist collection logs from OpenManage Enterprise and + OpenManage Enterprise Modular and application logs from OpenManage Enterprise Modular to a CIFS or NFS share. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + device_ids: + type: list + description: + - List of target device IDs. + - This is applicable for C(support_assist_collection) and C(supportassist_collection) logs. + - This option is mutually exclusive with I(device_service_tags) and I(device_group_name). + elements: int + device_service_tags: + type: list + description: + - List of target identifier. + - This is applicable for C(support_assist_collection) and C(supportassist_collection) logs. + - This option is mutually exclusive with I(device_ids) and I(device_group_name). + elements: str + device_group_name: + type: str + description: + - Name of the device group to export C(support_assist_collection) or C(supportassist_collection) logs of all devices within the group. + - This is applicable for C(support_assist_collection) and C(supportassist_collection) logs. + - This option is not applicable for OpenManage Enterprise Modular. + - This option is mutually exclusive with I(device_ids) and I(device_service_tags). + log_type: + type: str + description: + - C(application) is applicable for OpenManage Enterprise Modular to export the application log bundle. + - C(support_assist_collection) and C(supportassist_collection) is applicable for one or more devices to export SupportAssist logs. + - C(support_assist_collection) and C(supportassist_collection) supports both OpenManage Enterprise and OpenManage Enterprise Modular. + - C(support_assist_collection) and C(supportassist_collection) does not support export of C(OS_LOGS) from OpenManage Enterprise. + If tried to export, the tasks will complete with errors, and the module fails. + choices: [application, support_assist_collection, supportassist_collection] + default: support_assist_collection + mask_sensitive_info: + type: bool + description: + - Select this option to mask the personal identification information such as IPAddress, + DNS, alert destination, email, gateway, inet6, MacAddress, netmask etc. + - This option is applicable for C(application) of I(log_type). + default: False + log_selectors: + type: list + description: + - By default, the SupportAssist logs contain only hardware logs. To collect additional logs + such as OS logs, RAID logs or Debug logs, specify the log types to be collected in the choices list. + - If the log types are not specified, only the hardware logs are exported. + - C(OS_LOGS) to collect OS Logs. + - C(RAID_LOGS) to collect RAID controller logs. + - C(DEBUG_LOGS) to collect Debug logs. + - This option is applicable only for C(support_assist_collection) and C(supportassist_collection) of I(log_type). + choices: [OS_LOGS, RAID_LOGS, DEBUG_LOGS] + elements: str + share_address: + type: str + required: True + description: Network share IP address. + share_name: + type: str + required: True + description: + - Network share path. + - Filename is auto generated and should not be provided as part of I(share_name). + share_type: + type: str + required: True + description: Network share type + choices: [NFS, CIFS] + share_user: + type: str + description: + - Network share username. + - This option is applicable for C(CIFS) of I(share_type). + share_password: + type: str + description: + - Network share password + - This option is applicable for C(CIFS) of I(share_type). + share_domain: + type: str + description: + - Network share domain name. + - This option is applicable for C(CIFS) if I(share_type). + job_wait: + type: bool + description: + - Whether to wait for the Job completion or not. + - The maximum wait time is I(job_wait_timeout). + default: True + job_wait_timeout: + type: int + description: + - The maximum wait time of I(job_wait) in minutes. + - This option is applicable I(job_wait) is true. + default: 60 + test_connection: + type: bool + description: + - Test the availability of the network share location. + - I(job_wait) and I(job_wait_timeout) options are not applicable for I(test_connection). + default: False + lead_chassis_only: + type: bool + description: + - Extract the logs from Lead chassis only. + - I(lead_chassis_only) is only applicable when I(log_type) is C(application) on OpenManage Enterprise Modular. + default: False +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Sachin Apagundi(@sachin-apa)" +notes: + - Run this module from a system that has direct access to OpenManage Enterprise. + - This module performs the test connection and device validations. It does not create a job for copying the + logs in check mode and always reports as changes found. + - This module supports C(check_mode). +""" + + +EXAMPLES = r""" +--- +- name: Export application log using CIFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_type: CIFS + share_address: "192.168.0.2" + share_user: share_username + share_password: share_password + share_name: cifs_share + log_type: application + mask_sensitive_info: false + test_connection: true + +- name: Export application log using NFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_address: "192.168.0.3" + share_type: NFS + share_name: nfs_share + log_type: application + mask_sensitive_info: true + test_connection: true + +- name: Export SupportAssist log using CIFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_address: "192.168.0.3" + share_user: share_username + share_password: share_password + share_name: cifs_share + share_type: CIFS + log_type: support_assist_collection + device_ids: [10011, 10022] + log_selectors: [OS_LOGS] + test_connection: true + +- name: Export SupportAssist log using NFS share location + dellemc.openmanage.ome_diagnostics: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + share_address: "192.168.0.3" + share_type: NFS + share_name: nfs_share + log_type: support_assist_collection + device_group_name: group_name + test_connection: true +""" + +RETURN = r""" +--- +msg: + type: str + description: "Overall status of the export log." + returned: always + sample: "Export log job completed successfully." +jog_status: + type: dict + description: Details of the export log operation status. + returned: success + sample: { + "Builtin": false, + "CreatedBy": "root", + "Editable": true, + "EndTime": None, + "Id": 12778, + "JobDescription": "Export device log", + "JobName": "Export Log", + "JobStatus": {"Id": 2080, "Name": "New"}, + "JobType": {"Id": 18, "Internal": false, "Name": "DebugLogs_Task"}, + "LastRun": "2021-07-06 10:52:50.519", + "LastRunStatus": {"Id": 2060, "Name": "Completed"}, + "NextRun": None, + "Schedule": "startnow", + "StartTime": None, + "State": "Enabled", + "UpdatedBy": None, + "UserGenerated": true, + "Visible": true, + "Params": [ + {"JobId": 12778, "Key": "maskSensitiveInfo", "Value": "FALSE"}, + {"JobId": 12778, "Key": "password", "Value": "tY86w7q92u0QzvykuF0gQQ"}, + {"JobId": 12778, "Key": "userName", "Value": "administrator"}, + {"JobId": 12778, "Key": "shareName", "Value": "iso"}, + {"JobId": 12778, "Key": "OPERATION_NAME", "Value": "EXTRACT_LOGS"}, + {"JobId": 12778, "Key": "shareType", "Value": "CIFS"}, + {"JobId": 12778, "Key": "shareAddress", "Value": "100.96.32.142"} + ], + "Targets": [{"Data": "", "Id": 10053, "JobId": 12778, "TargetType": {"Id": 1000, "Name": "DEVICE"}}] + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import json +import re +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +LOG_SELECTOR = {"OS_LOGS": 1, "RAID_LOGS": 2, "DEBUG_LOGS": 3} +JOB_URI = "JobService/Jobs" +GROUP_URI = "GroupService/Groups" +GROUP_DEVICE_URI = "GroupService/Groups({0})/Devices" +DEVICE_URI = "DeviceService/Devices" +DOMAIN_URI = "ManagementDomainService/Domains" +EXE_HISTORY_URI = "JobService/Jobs({0})/ExecutionHistories" +CHANGES_FOUND = "Changes found to be applied." + + +def group_validation(module, rest_obj): + group_name, group_device = module.params.get('device_group_name'), [] + query_param = {"$filter": "Name eq '{0}'".format(group_name)} + group_resp = rest_obj.invoke_request("GET", GROUP_URI, query_param=query_param) + group = group_resp.json_data["value"] + if group: + group_id = group[0]["Id"] + resp = rest_obj.invoke_request("GET", GROUP_DEVICE_URI.format(group_id)) + device_group_resp = resp.json_data["value"] + if device_group_resp: + for device in device_group_resp: + if device["Type"] == 1000: + group_device.append(device["Id"]) + else: + module.fail_json(msg="There are no device(s) present in this group.") + else: + module.fail_json(msg="Unable to complete the operation because the entered target " + "device group name '{0}' is invalid.".format(group_name)) + if not group_device: + module.fail_json(msg="The requested group '{0}' does not contain devices that " + "support export log.".format(group_name)) + return group_device + + +def device_validation(module, rest_obj): + device_lst, invalid_lst, other_types = [], [], [] + devices, tags = module.params.get("device_ids"), module.params.get("device_service_tags") + all_device = rest_obj.get_all_report_details(DEVICE_URI) + key = "Id" if devices is not None else "DeviceServiceTag" + value = "id" if key == "Id" else "service tag" + req_device = devices if devices is not None else tags + for each in req_device: + device = list(filter(lambda d: d[key] in [each], all_device["report_list"])) + if device and device[0]["Type"] == 1000: + device_lst.append(device[0]["Id"]) + elif device and not device[0]["Type"] == 1000: + other_types.append(str(each)) + else: + invalid_lst.append(str(each)) + if invalid_lst: + module.fail_json(msg="Unable to complete the operation because the entered " + "target device {0}(s) '{1}' are invalid.".format(value, ",".join(set(invalid_lst)))) + if not device_lst and other_types: + module.fail_json(msg="The requested device {0}(s) '{1}' are " + "not applicable for export log.".format(value, ",".join(set(other_types)))) + return device_lst + + +def extract_log_operation(module, rest_obj, device_lst=None): + payload_params, target_params = [], [] + log_type = module.params["log_type"] + if log_type == "application": + lead_only = module.params["lead_chassis_only"] + resp_data = None + if lead_only: + domain_details = rest_obj.get_all_items_with_pagination(DOMAIN_URI) + key = "Id" + ch_device_id = None + for each_domain in domain_details["value"]: + if each_domain["DomainRoleTypeValue"] in ["LEAD", "STANDALONE"]: + ch_device_id = each_domain["DeviceId"] + if ch_device_id: + resp = rest_obj.invoke_request("GET", DEVICE_URI, + query_param={"$filter": "{0} eq {1}".format(key, ch_device_id)}) + resp_data = resp.json_data["value"] + else: + resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": "Type eq 2000"}) + resp_data = resp.json_data["value"] + if resp_data: + for dev in resp_data: + target_params.append({"Id": dev["Id"], "Data": "", + "TargetType": {"Id": dev["Type"], "Name": "CHASSIS"}}) + else: + module.fail_json(msg="There is no device(s) available to export application log.") + else: + for device in device_lst: + target_params.append({"Id": device, "Data": "", + "TargetType": {"Id": 1000, "Name": "DEVICE"}}) + payload_params.append({"Key": "shareAddress", "Value": module.params["share_address"]}) + payload_params.append({"Key": "shareType", "Value": module.params["share_type"]}) + payload_params.append({"Key": "OPERATION_NAME", "Value": "EXTRACT_LOGS"}) + if module.params.get("share_name") is not None: + payload_params.append({"Key": "shareName", "Value": module.params["share_name"]}) + if module.params.get("share_user") is not None: + payload_params.append({"Key": "userName", "Value": module.params["share_user"]}) + if module.params.get("share_password") is not None: + payload_params.append({"Key": "password", "Value": module.params["share_password"]}) + if module.params.get("share_domain") is not None: + payload_params.append({"Key": "domainName", "Value": module.params["share_domain"]}) + if module.params.get("mask_sensitive_info") is not None and log_type == "application": + payload_params.append({"Key": "maskSensitiveInfo", "Value": str(module.params["mask_sensitive_info"]).upper()}) + if module.params.get("log_selectors") is not None and (log_type == "support_assist_collection" or log_type == "supportassist_collection"): + log_lst = [LOG_SELECTOR[i] for i in module.params["log_selectors"]] + log_lst.sort() + log_selector = ",".join(map(str, log_lst)) + payload_params.append({"Key": "logSelector", "Value": "0,{0}".format(log_selector)}) + response = rest_obj.job_submission("Export Log", "Export device log", target_params, + payload_params, {"Id": 18, "Name": "DebugLogs_Task"}) + return response + + +def check_domain_service(module, rest_obj): + try: + rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5) + except HTTPError as err: + err_message = json.load(err) + if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006": + module.fail_json(msg="Export log operation is not supported on the specified system.") + return + + +def find_failed_jobs(resp, rest_obj): + msg, fail = "Export log job completed with errors.", False + history = rest_obj.invoke_request("GET", EXE_HISTORY_URI.format(resp["Id"])) + if history.json_data["value"]: + hist = history.json_data["value"][0] + history_details = rest_obj.invoke_request( + "GET", + "{0}({1})/ExecutionHistoryDetails".format(EXE_HISTORY_URI.format(resp["Id"]), hist["Id"]) + ) + for hd in history_details.json_data["value"]: + if not re.findall(r"Job status for JID_\d+ is Completed with Errors.", hd["Value"]): + fail = True + break + else: + fail = False + return msg, fail + + +def main(): + specs = { + "device_ids": {"required": False, "type": "list", "elements": "int"}, + "device_service_tags": {"required": False, "type": "list", "elements": "str"}, + "device_group_name": {"required": False, "type": "str"}, + "log_type": {"required": False, "type": "str", "default": "support_assist_collection", + "choices": ["support_assist_collection", "application", "supportassist_collection"]}, + "mask_sensitive_info": {"required": False, "type": "bool", "default": False}, + "log_selectors": {"required": False, "type": "list", + "choices": ["RAID_LOGS", "OS_LOGS", "DEBUG_LOGS"], "elements": "str"}, + "share_address": {"required": True, "type": "str"}, + "share_name": {"required": True, "type": "str"}, + "share_type": {"required": True, "type": "str", "choices": ["NFS", "CIFS"]}, + "share_user": {"required": False, "type": "str"}, + "share_password": {"required": False, "type": "str", "no_log": True}, + "share_domain": {"required": False, "type": "str"}, + "job_wait": {"required": False, "type": "bool", "default": True}, + "job_wait_timeout": {"required": False, "type": "int", "default": 60}, + "test_connection": {"required": False, "type": "bool", "default": False}, + "lead_chassis_only": {"required": False, "type": "bool", "default": False}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ['log_type', 'application', ['mask_sensitive_info']], + ['log_type', 'support_assist_collection', + ['device_ids', 'device_service_tags', 'device_group_name'], True], + ['log_type', 'supportassist_collection', + ['device_ids', 'device_service_tags', 'device_group_name'], True], + ['share_type', 'CIFS', ['share_user', 'share_password']] + ], + mutually_exclusive=[('device_ids', 'device_service_tags', 'device_group_name')], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + # checking the domain service + if module.params["log_type"] == "application": + check_domain_service(module, rest_obj) + + # checking any existing running job + job_allowed, job_lst = rest_obj.check_existing_job_state("DebugLogs_Task") + if not job_allowed: + module.fail_json(msg="An export log job is already running. Wait for the job to finish.") + + # test network connection + if module.params["test_connection"]: + conn_resp = rest_obj.test_network_connection(module.params["share_address"], + module.params["share_name"], + module.params["share_type"], + module.params["share_user"], + module.params["share_password"], + module.params["share_domain"]) + job_failed, job_message = rest_obj.job_tracking(conn_resp.json_data["Id"], job_wait_sec=5, + sleep_time=5) + if job_failed: + module.fail_json(msg="Unable to access the share. Ensure that the share address, share name, " + "share domain, and share credentials provided are correct.") + + # validation for device id/tag/group + valid_device = [] + if (module.params["log_type"] == "support_assist_collection" or module.params["log_type"] == "supportassist_collection") and \ + module.params.get("device_group_name") is not None: + valid_device = group_validation(module, rest_obj) + elif (module.params["log_type"] == "support_assist_collection" or module.params["log_type"] == "supportassist_collection") and \ + module.params.get("device_group_name") is None: + valid_device = device_validation(module, rest_obj) + + # exit if running in check mode + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + + # extract log job operation + response = extract_log_operation(module, rest_obj, device_lst=valid_device) + message = "Export log job submitted successfully." + if module.params["job_wait"]: + seconds = module.params["job_wait_timeout"] * 60 + job_failed, job_message = rest_obj.job_tracking(response.json_data["Id"], + job_wait_sec=seconds, + sleep_time=5) + message = "Export log job completed successfully." + if job_message == "The job is not complete after {0} seconds.".format(seconds): + module.fail_json( + msg="The export job is not complete because it has exceeded the configured timeout period.", + job_status=response.json_data + ) + if job_failed: + message, failed_job = find_failed_jobs(response.json_data, rest_obj) + if failed_job: + module.fail_json(msg=message, job_status=response.json_data) + response = rest_obj.invoke_request("GET", "{0}({1})".format(JOB_URI, response.json_data["Id"])) + resp = response.json_data + if resp: + resp = rest_obj.strip_substr_dict(resp) + module.exit_json(msg=message, job_status=resp) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py new file mode 100644 index 00000000..a4fde99f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py @@ -0,0 +1,1067 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_discovery +short_description: Create, modify, or delete a discovery job on OpenManage Enterprise +version_added: "3.3.0" +description: This module allows to create, modify, or delete a discovery job. +extends_documentation_fragment: + - dellemc.openmanage.oment_auth_options +options: + state: + description: + - C(present) creates a discovery job or modifies an existing discovery job. + - I(discovery_job_name) is mandatory for the creation of a new discovery job. + - If multiple discoveries of the same I(discovery_job_name) exist, then the new discovery job will not be created. + - C(absent) deletes an existing discovery job(s) with the specified I(discovery_job_name). + choices: [present, absent] + default: present + type: str + discovery_job_name: + description: + - Name of the discovery configuration job. + - It is mutually exclusive with I(discovery_id). + type: str + discovery_id: + description: + - ID of the discovery configuration group. + - This value is DiscoveryConfigGroupId in the return values under discovery_status. + - It is mutually exclusive with I(discovery_job_name). + type: int + new_name: + description: New name of the discovery configuration job. + type: str + schedule: + description: + - Provides the option to schedule the discovery job. + - If C(RunLater) is selected, then I(cron) must be specified. + choices: [RunNow, RunLater] + default: RunNow + type: str + cron: + description: + - Provide a cron expression based on Quartz cron format. + type: str + trap_destination: + description: + - Enable OpenManage Enterprise to receive the incoming SNMP traps from the discovered devices. + - This is effective only for servers discovered by using their iDRAC interface. + type: bool + default: false + community_string: + description: "Enable the use of SNMP community strings to receive SNMP traps using Application Settings in + OpenManage Enterprise. This option is available only for the discovered iDRAC servers and MX7000 chassis." + type: bool + default: false + email_recipient: + description: "Enter the email address to which notifications are to be sent about the discovery job status. + Configure the SMTP settings to allow sending notifications to an email address." + type: str + job_wait: + description: + - Provides the option to wait for job completion. + - This option is applicable when I(state) is C(present). + type: bool + default: true + job_wait_timeout: + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 10800 + ignore_partial_failure: + description: + - "Provides the option to ignore partial failures. Partial failures occur when there is a combination of both + discovered and undiscovered IPs." + - If C(False), then the partial failure is not ignored, and the module will error out. + - If C(True), then the partial failure is ignored. + - This option is only applicable if I(job_wait) is C(True). + type: bool + default: false + discovery_config_targets: + description: + - Provide the list of discovery targets. + - "Each discovery target is a set of I(network_address_detail), I(device_types), and one or more protocol + credentials." + - This is mandatory when I(state) is C(present). + - "C(WARNING) Modification of this field is not supported, this field is overwritten every time. Ensure to provide + all the required details for this field." + type: list + elements: dict + suboptions: + network_address_detail: + description: + - "Provide the list of IP addresses, host names, or the range of IP addresses of the devices to be discovered + or included." + - "Sample Valid IP Range Formats" + - " 192.35.0.0" + - " 192.36.0.0-10.36.0.255" + - " 192.37.0.0/24" + - " 2345:f2b1:f083:135::5500/118" + - " 2345:f2b1:f083:135::a500-2607:f2b1:f083:135::a600" + - " hostname.domain.tld" + - " hostname" + - " 2345:f2b1:f083:139::22a" + - "Sample Invalid IP Range Formats" + - " 192.35.0.*" + - " 192.36.0.0-255" + - " 192.35.0.0/255.255.255.0" + - C(NOTE) The range size for the number of IP addresses is limited to 16,385 (0x4001). + - C(NOTE) Both IPv6 and IPv6 CIDR formats are supported. + type: list + elements: str + required: true + device_types: + description: + - Provide the type of devices to be discovered. + - The accepted types are SERVER, CHASSIS, NETWORK SWITCH, and STORAGE. + - A combination or all of the above can be provided. + - "Supported protocols for each device type are:" + - SERVER - I(wsman), I(redfish), I(snmp), I(ipmi), I(ssh), and I(vmware). + - CHASSIS - I(wsman), and I(redfish). + - NETWORK SWITCH - I(snmp). + - STORAGE - I(storage), and I(snmp). + type: list + elements: str + required: true + wsman: + description: Web Services-Management (WS-Man). + type: dict + suboptions: + username: + description: Provide a username for the protocol. + type: str + required: true + password: + description: Provide a password for the protocol. + type: str + required: true + domain: + description: Provide a domain for the protocol. + type: str + port: + description: Enter the port number that the job must use to discover the devices. + type: int + default: 443 + retries: + description: Enter the number of repeated attempts required to discover a device. + type: int + default: 3 + timeout: + description: Enter the time in seconds after which a job must stop running. + type: int + default: 60 + cn_check: + description: Enable the Common Name (CN) check. + type: bool + default: false + ca_check: + description: Enable the Certificate Authority (CA) check. + type: bool + default: false + certificate_data: + description: Provide certificate data for the CA check. + type: str + redfish: + description: REDFISH protocol. + type: dict + suboptions: + username: + description: Provide a username for the protocol. + type: str + required: true + password: + description: Provide a password for the protocol. + type: str + required: true + domain: + description: Provide a domain for the protocol. + type: str + port: + description: Enter the port number that the job must use to discover the devices. + type: int + default: 443 + retries: + description: Enter the number of repeated attempts required to discover a device. + type: int + default: 3 + timeout: + description: Enter the time in seconds after which a job must stop running. + type: int + default: 60 + cn_check: + description: Enable the Common Name (CN) check. + type: bool + default: false + ca_check: + description: Enable the Certificate Authority (CA) check. + type: bool + default: false + certificate_data: + description: Provide certificate data for the CA check. + type: str + snmp: + description: Simple Network Management Protocol (SNMP). + type: dict + suboptions: + community: + description: Community string for the SNMP protocol. + type: str + required: true + port: + description: Enter the port number that the job must use to discover the devices. + type: int + default: 161 + retries: + description: Enter the number of repeated attempts required to discover a device. + type: int + default: 3 + timeout: + description: Enter the time in seconds after which a job must stop running. + type: int + default: 3 + storage: + description: HTTPS Storage protocol. + type: dict + suboptions: + username: + description: Provide a username for the protocol. + type: str + required: true + password: + description: Provide a password for the protocol. + type: str + required: true + domain: + description: Provide a domain for the protocol. + type: str + port: + description: Enter the port number that the job must use to discover the devices. + type: int + default: 443 + retries: + description: Enter the number of repeated attempts required to discover a device. + type: int + default: 3 + timeout: + description: Enter the time in seconds after which a job must stop running. + type: int + default: 60 + cn_check: + description: Enable the Common Name (CN) check. + type: bool + default: false + ca_check: + description: Enable the Certificate Authority (CA) check. + type: bool + default: false + certificate_data: + description: Provide certificate data for the CA check. + type: str + vmware: + description: VMWARE protocol. + type: dict + suboptions: + username: + description: Provide a username for the protocol. + type: str + required: true + password: + description: Provide a password for the protocol. + type: str + required: true + domain: + description: Provide a domain for the protocol. + type: str + port: + description: Enter the port number that the job must use to discover the devices. + type: int + default: 443 + retries: + description: Enter the number of repeated attempts required to discover a device. + type: int + default: 3 + timeout: + description: Enter the time in seconds after which a job must stop running. + type: int + default: 60 + cn_check: + description: Enable the Common Name (CN) check. + type: bool + default: false + ca_check: + description: Enable the Certificate Authority (CA) check. + type: bool + default: false + certificate_data: + description: Provide certificate data for the CA check. + type: str + ssh: + description: Secure Shell (SSH). + type: dict + suboptions: + username: + description: Provide a username for the protocol. + type: str + required: true + password: + description: Provide a password for the protocol. + type: str + required: true + port: + description: Enter the port number that the job must use to discover the devices. + type: int + default: 22 + retries: + description: Enter the number of repeated attempts required to discover a device. + type: int + default: 3 + timeout: + description: Enter the time in seconds after which a job must stop running. + type: int + default: 60 + check_known_hosts: + description: Verify the known host key. + type: bool + default: false + is_sudo_user: + description: Use the SUDO option. + type: bool + default: false + ipmi: + description: Intelligent Platform Management Interface (IPMI) + type: dict + suboptions: + username: + description: Provide a username for the protocol. + type: str + required: true + password: + description: Provide a password for the protocol. + type: str + required: true + retries: + description: Enter the number of repeated attempts required to discover a device. + type: int + default: 3 + timeout: + description: Enter the time in seconds after which a job must stop running. + type: int + default: 60 + kgkey: + description: KgKey for the IPMI protocol. + type: str +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V (@jagadeeshnv)" + - "Sajna Shetty (@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise. + - This module does not support C(check_mode). + - If I(state) is C(present), then Idempotency is not supported. +''' + +EXAMPLES = r''' +--- +- name: Discover servers in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_server_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - SERVER + wsman: + username: user + password: password + +- name: Discover chassis in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_chassis_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - CHASSIS + wsman: + username: user + password: password + +- name: Discover switches in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discover_switch_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - NETWORK SWITCH + snmp: + community: snmp_creds + +- name: Discover storage in a range + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discover_storage_1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + device_types: + - STORAGE + storage: + username: user + password: password + snmp: + community: snmp_creds + +- name: Delete a discovery job + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + discovery_job_name: "Discovery-123" + +- name: Schedule the discovery of multiple devices ignoring partial failure and enable trap to receive alerts + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + discovery_job_name: "Discovery-123" + discovery_config_targets: + - network_address_detail: + - 192.96.24.1-192.96.24.255 + - 192.96.0.0/24 + - 192.96.26.108 + device_types: + - SERVER + - CHASSIS + - STORAGE + - NETWORK SWITCH + wsman: + username: wsman_user + password: wsman_pwd + redfish: + username: redfish_user + password: redfish_pwd + snmp: + community: snmp_community + - network_address_detail: + - 192.96.25.1-192.96.25.255 + - ipmihost + - esxiserver + - sshserver + device_types: + - SERVER + ssh: + username: ssh_user + password: ssh_pwd + vmware: + username: vm_user + password: vmware_pwd + ipmi: + username: ipmi_user + password: ipmi_pwd + schedule: RunLater + cron: "0 0 9 ? * MON,WED,FRI *" + ignore_partial_failure: True + trap_destination: True + community_string: True + email_recipient: test_email@company.com + +- name: Discover servers with ca check enabled + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_server_ca1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.108 + device_types: + - SERVER + wsman: + username: user + password: password + ca_check: True + certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}" + +- name: Discover chassis with ca check enabled data + dellemc.openmanage.ome_discovery: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + discovery_job_name: "Discovery_chassis_ca1" + discovery_config_targets: + - network_address_detail: + - 192.96.24.108 + device_types: + - CHASSIS + redfish: + username: user + password: password + ca_check: True + certificate_data: "-----BEGIN CERTIFICATE-----\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n + aqwertyuiopasdfghjklzxcvbnmasdasagasvv=\r\n + -----END CERTIFICATE-----" +''' + +RETURN = r''' +--- +msg: + description: Overall status of the discovery operation. + returned: always + type: str + sample: "Successfully deleted 1 discovery job(s)." +discovery_status: + description: + - Details of the discovery job created or modified. + - If I(job_wait) is true, Completed and Failed IPs are also listed. + returned: when I(state) is C(present) + type: dict + sample: { + "Completed": [ + "192.168.24.17", + "192.168.24.20", + "192.168.24.22" + ], + "Failed": [ + "192.168.24.15", + "192.168.24.16", + "192.168.24.18", + "192.168.24.19", + "192.168.24.21", + "host123" + ], + "DiscoveredDevicesByType": [ + { + "Count": 3, + "DeviceType": "SERVER" + } + ], + "DiscoveryConfigDiscoveredDeviceCount": 3, + "DiscoveryConfigEmailRecipient": "myemail@dell.com", + "DiscoveryConfigExpectedDeviceCount": 9, + "DiscoveryConfigGroupId": 125, + "JobDescription": "D1", + "JobEnabled": true, + "JobEndTime": "2021-01-01 06:27:29.99", + "JobId": 12666, + "JobName": "D1", + "JobNextRun": null, + "JobProgress": "100", + "JobSchedule": "startnow", + "JobStartTime": "2021-01-01 06:24:10.071", + "JobStatusId": 2090, + "LastUpdateTime": "2021-01-01 06:27:30.001", + "UpdatedBy": "admin" + } +discovery_ids: + description: IDs of the discoveries with duplicate names. + returned: when discoveries with duplicate name exist for I(state) is C(present) + type: list + sample: [1234, 5678] +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +import time +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +CONFIG_GROUPS_URI = "DiscoveryConfigService/DiscoveryConfigGroups" +DISCOVERY_JOBS_URI = "DiscoveryConfigService/Jobs" +DELETE_JOB_URI = "DiscoveryConfigService/Actions/DiscoveryConfigService.RemoveDiscoveryGroup" +PROTOCOL_DEVICE = "DiscoveryConfigService/ProtocolToDeviceType" +JOB_EXEC_HISTORY = "JobService/Jobs({job_id})/ExecutionHistories" +CONFIG_GROUPS_ID_URI = "DiscoveryConfigService/DiscoveryConfigGroups({group_id})" +NO_CHANGES_MSG = "No changes found to be applied." +DISC_JOB_RUNNING = "Discovery job '{name}' with ID {id} is running. Please retry after job completion." +DISC_DEL_JOBS_SUCCESS = "Successfully deleted {n} discovery job(s)." +MULTI_DISCOVERY = "Multiple discoveries present. Run the job again using a specific ID." +DISCOVERY_SCHEDULED = "Successfully scheduled the Discovery job." +DISCOVER_JOB_COMPLETE = "Successfully completed the Discovery job." +JOB_TRACK_SUCCESS = "Discovery job has {0}." +JOB_TRACK_FAIL = "No devices discovered, job is in {0} state." +JOB_TRACK_UNABLE = "Unable to track discovery job status of {0}." +JOB_TRACK_INCOMPLETE = "Discovery job {0} incomplete after polling {1} times." +INVALID_DEVICES = "Invalid device types found - {0}." +DISCOVERY_PARTIAL = "Some IPs are not discovered." +ATLEAST_ONE_PROTOCOL = "Protocol not applicable for given device types." +INVALID_DISCOVERY_ID = "Invalid discovery ID provided." +SETTLING_TIME = 5 + + +def check_existing_discovery(module, rest_obj): + discovery_cfgs = [] + discovery_id = module.params.get("discovery_id") + srch_key = "DiscoveryConfigGroupName" + srch_val = module.params.get("discovery_job_name") + if discovery_id: + srch_key = "DiscoveryConfigGroupId" + srch_val = module.params.get("discovery_id") + resp = rest_obj.invoke_request('GET', CONFIG_GROUPS_URI + "?$top=9999") + discovs = resp.json_data.get("value") + for d in discovs: + if d[srch_key] == srch_val: + discovery_cfgs.append(d) + if discovery_id: + break + return discovery_cfgs + + +def get_discovery_states(rest_obj, key="JobStatusId"): + resp = rest_obj.invoke_request('GET', DISCOVERY_JOBS_URI) + disc_jobs = resp.json_data.get("value") + job_state_dict = dict([(item["DiscoveryConfigGroupId"], item[key]) for item in disc_jobs]) + return job_state_dict + + +def get_protocol_device_map(rest_obj): + prot_dev_map = {} + dev_id_map = {} + resp = rest_obj.invoke_request('GET', PROTOCOL_DEVICE) + prot_dev = resp.json_data.get('value') + for item in prot_dev: + dname = item["DeviceTypeName"] + dlist = prot_dev_map.get(dname, []) + dlist.append(item["ProtocolName"]) + prot_dev_map[dname] = dlist + dev_id_map[dname] = item["DeviceTypeId"] + if dname == "DELL STORAGE": + prot_dev_map['STORAGE'] = dlist + dev_id_map['STORAGE'] = item["DeviceTypeId"] + return prot_dev_map, dev_id_map + + +def get_other_discovery_payload(module): + trans_dict = {'discovery_job_name': "DiscoveryConfigGroupName", + 'trap_destination': "TrapDestination", + 'community_string': "CommunityString", + 'email_recipient': "DiscoveryStatusEmailRecipient"} + other_dict = {} + for key, val in trans_dict.items(): + if module.params.get(key) is not None: + other_dict[val] = module.params.get(key) + return other_dict + + +def get_schedule(module): + schedule_payload = {} + schedule = module.params.get('schedule') + if not schedule or schedule == 'RunNow': + schedule_payload['RunNow'] = True + schedule_payload['RunLater'] = False + schedule_payload['Cron'] = 'startnow' + else: + schedule_payload['RunNow'] = False + schedule_payload['RunLater'] = True + schedule_payload['Cron'] = module.params.get('cron') + return schedule_payload + + +def get_execution_details(module, rest_obj, job_id): + try: + resp = rest_obj.invoke_request('GET', JOB_EXEC_HISTORY.format(job_id=job_id)) + ex_hist = resp.json_data.get('value') + # Sorting based on startTime and to get latest execution instance. + tmp_dict = dict((x["StartTime"], x["Id"]) for x in ex_hist) + sorted_dates = sorted(tmp_dict.keys()) + ex_url = JOB_EXEC_HISTORY.format(job_id=job_id) + "({0})/ExecutionHistoryDetails".format(tmp_dict[sorted_dates[-1]]) + ips = {"Completed": [], "Failed": []} + all_exec = rest_obj.get_all_items_with_pagination(ex_url) + for jb_ip in all_exec.get('value'): + jobstatus = jb_ip.get('JobStatus', {}).get('Name', 'Unknown') + jlist = ips.get(jobstatus, []) + jlist.append(jb_ip.get('Key')) + ips[jobstatus] = jlist + except Exception: + ips = {"Completed": [], "Failed": []} + return ips + + +def discovery_job_tracking(rest_obj, job_id, job_wait_sec): + job_status_map = { + 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully", + 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped", + 2103: "Canceled" + } + sleep_interval = 30 + max_retries = job_wait_sec // sleep_interval + failed_job_status = [2070, 2100, 2101, 2102, 2103] + success_job_status = [2060, 2020, 2090] + job_url = (DISCOVERY_JOBS_URI + "({job_id})").format(job_id=job_id) + loop_ctr = 0 + job_failed = True + time.sleep(SETTLING_TIME) + while loop_ctr < max_retries: + loop_ctr += 1 + try: + job_resp = rest_obj.invoke_request('GET', job_url) + job_dict = job_resp.json_data + job_status = job_dict['JobStatusId'] + if job_status in success_job_status: + job_failed = False + return job_failed, JOB_TRACK_SUCCESS.format(job_status_map[job_status]) + elif job_status in failed_job_status: + job_failed = True + return job_failed, JOB_TRACK_FAIL.format(job_status_map[job_status]) + time.sleep(sleep_interval) + except HTTPError: + return job_failed, JOB_TRACK_UNABLE.format(job_id) + except Exception as err: + return job_failed, str(err) + return job_failed, JOB_TRACK_INCOMPLETE.format(job_id, max_retries) + + +def get_job_data(discovery_json, rest_obj): + job_list = discovery_json['DiscoveryConfigTaskParam'] + if len(job_list) == 1: + job_id = job_list[0].get('TaskId') + else: + srch_key = 'DiscoveryConfigGroupId' + srch_val = discovery_json[srch_key] + resp = rest_obj.invoke_request('GET', DISCOVERY_JOBS_URI + "?$top=9999") + discovs = resp.json_data.get("value") + for d in discovs: + if d[srch_key] == srch_val: + job_id = d['JobId'] + break + return job_id + + +def get_connection_profile(disc_config): + proto_add_dict = { + 'wsman': { + 'certificateDetail': None, + 'isHttp': False, + 'keepAlive': True, + # 'version': None + }, + 'redfish': {'certificateDetail': None, 'isHttp': False, 'keepAlive': True}, + 'snmp': { + # 'authenticationPassphrase': None, + # 'authenticationProtocol': None, + 'enableV1V2': True, + 'enableV3': False, + # 'localizationEngineID': None, + # 'privacyPassphrase': None, + # 'privacyProtocol': None, + # 'securityName': None + }, + 'vmware': {'certificateDetail': None, 'isHttp': False, 'keepAlive': False}, + 'ssh': {'useKey': False, 'key': None, 'knownHostKey': None, 'passphrase': None}, + 'ipmi': {'privilege': 2}, + 'storage': { + 'certificateDetail': None, + 'isHttp': False, + 'keepAlive': True, + # 'version': None + } + } + proto_list = ['wsman', 'snmp', 'vmware', 'ssh', 'ipmi', 'redfish', 'storage'] + conn_profile = {"profileId": 0, "profileName": "", "profileDescription": "", "type": "DISCOVERY"} + creds_dict = {} + for p in proto_list: + if disc_config.get(p): + xproto = {"type": p.upper(), + "authType": "Basic", + "modified": False} + xproto['credentials'] = snake_dict_to_camel_dict(disc_config[p]) + (xproto['credentials']).update(proto_add_dict.get(p, {})) + creds_dict[p] = xproto + # Special handling, duplicating wsman to redfish as in GUI + if p == 'wsman': + rf = xproto.copy() + rf['type'] = 'REDFISH' + creds_dict['redfish'] = rf + conn_profile['credentials'] = list(creds_dict.values()) + return conn_profile + + +def get_discovery_config(module, rest_obj): + disc_cfg_list = [] + proto_dev_map, dev_id_map = get_protocol_device_map(rest_obj) + discovery_config_list = module.params.get("discovery_config_targets") + for disc_config in discovery_config_list: + disc_cfg = {} + disc_cfg['DeviceType'] = list( + dev_id_map[dev] for dev in disc_config.get('device_types') if dev in dev_id_map.keys()) + devices = list(set(disc_config.get('device_types'))) + if len(devices) != len(disc_cfg['DeviceType']): + invalid_dev = set(devices) - set(dev_id_map.keys()) + module.fail_json(msg=INVALID_DEVICES.format(','.join(invalid_dev))) + disc_cfg["DiscoveryConfigTargets"] = list({"NetworkAddressDetail": ip} for ip in disc_config["network_address_detail"]) + conn_profile = get_connection_profile(disc_config) + given_protos = list(x["type"] for x in conn_profile['credentials']) + req_protos = [] + for dev in disc_config.get('device_types'): + proto_dev_value = proto_dev_map.get(dev, []) + req_protos.extend(proto_dev_value) + if not (set(req_protos) & set(given_protos)): + module.fail_json(msg=ATLEAST_ONE_PROTOCOL, discovery_status=proto_dev_map) + disc_cfg["ConnectionProfile"] = json.dumps(conn_profile) + disc_cfg_list.append(disc_cfg) + return disc_cfg_list + + +def get_discovery_job(rest_obj, job_id): + resp = rest_obj.invoke_request('GET', DISCOVERY_JOBS_URI + "({0})".format(job_id)) + djob = resp.json_data + nlist = list(djob) + for k in nlist: + if str(k).lower().startswith('@odata'): + djob.pop(k) + return djob + + +def exit_discovery(module, rest_obj, job_id): + msg = DISCOVERY_SCHEDULED + time.sleep(SETTLING_TIME) + djob = get_discovery_job(rest_obj, job_id) + if module.params.get("job_wait") and module.params.get('schedule') == 'RunNow': + job_failed, job_message = discovery_job_tracking(rest_obj, job_id, + job_wait_sec=module.params["job_wait_timeout"]) + if job_failed is True: + djob.update({"Completed": [], "Failed": []}) + module.fail_json(msg=job_message, discovery_status=djob) + msg = job_message + ip_details = get_execution_details(module, rest_obj, job_id) + djob = get_discovery_job(rest_obj, job_id) + djob.update(ip_details) + if ip_details.get("Failed") and module.params.get("ignore_partial_failure") is False: + module.fail_json(msg=DISCOVERY_PARTIAL, discovery_status=djob) + module.exit_json(msg=msg, discovery_status=djob, changed=True) + + +def create_discovery(module, rest_obj): + discovery_payload = {} + discovery_payload['DiscoveryConfigModels'] = get_discovery_config(module, rest_obj) + discovery_payload['Schedule'] = get_schedule(module) + other_params = get_other_discovery_payload(module) + discovery_payload.update(other_params) + resp = rest_obj.invoke_request("POST", CONFIG_GROUPS_URI, data=discovery_payload) + job_id = get_job_data(resp.json_data, rest_obj) + exit_discovery(module, rest_obj, job_id) + + +def update_modify_payload(discovery_modify_payload, current_payload, new_name=None): + parent_items = ["DiscoveryConfigGroupName", + "TrapDestination", + "CommunityString", + "DiscoveryStatusEmailRecipient", + "CreateGroup", + "UseAllProfiles"] + for key in parent_items: + if key not in discovery_modify_payload and key in current_payload: + discovery_modify_payload[key] = current_payload[key] + if not discovery_modify_payload.get("Schedule"): + exist_schedule = current_payload.get("Schedule", {}) + schedule_payload = {} + if exist_schedule.get('Cron') == 'startnow': + schedule_payload['RunNow'] = True + schedule_payload['RunLater'] = False + schedule_payload['Cron'] = 'startnow' + else: + schedule_payload['RunNow'] = False + schedule_payload['RunLater'] = True + schedule_payload['Cron'] = exist_schedule.get('Cron') + discovery_modify_payload['Schedule'] = schedule_payload + discovery_modify_payload["DiscoveryConfigGroupId"] = current_payload["DiscoveryConfigGroupId"] + if new_name: + discovery_modify_payload["DiscoveryConfigGroupName"] = new_name + + +def modify_discovery(module, rest_obj, discov_list): + if len(discov_list) > 1: + dup_discovery = list(item["DiscoveryConfigGroupId"] for item in discov_list) + module.fail_json(msg=MULTI_DISCOVERY, discovery_ids=dup_discovery) + job_state_dict = get_discovery_states(rest_obj) + for d in discov_list: + if job_state_dict.get(d["DiscoveryConfigGroupId"]) == 2050: + module.fail_json( + msg=DISC_JOB_RUNNING.format(name=d["DiscoveryConfigGroupName"], id=d["DiscoveryConfigGroupId"])) + discovery_payload = {'DiscoveryConfigModels': get_discovery_config(module, rest_obj), + 'Schedule': get_schedule(module)} + other_params = get_other_discovery_payload(module) + discovery_payload.update(other_params) + update_modify_payload(discovery_payload, discov_list[0], module.params.get("new_name")) + resp = rest_obj.invoke_request("PUT", + CONFIG_GROUPS_ID_URI.format(group_id=discovery_payload["DiscoveryConfigGroupId"]), + data=discovery_payload) + job_id = get_job_data(resp.json_data, rest_obj) + exit_discovery(module, rest_obj, job_id) + + +def delete_discovery(module, rest_obj, discov_list): + job_state_dict = get_discovery_states(rest_obj) + delete_ids = [] + for d in discov_list: + if job_state_dict.get(d["DiscoveryConfigGroupId"]) == 2050: + module.fail_json(msg=DISC_JOB_RUNNING.format(name=d["DiscoveryConfigGroupName"], + id=d["DiscoveryConfigGroupId"])) + else: + delete_ids.append(d["DiscoveryConfigGroupId"]) + delete_payload = {"DiscoveryGroupIds": delete_ids} + rest_obj.invoke_request('POST', DELETE_JOB_URI, data=delete_payload) + module.exit_json(msg=DISC_DEL_JOBS_SUCCESS.format(n=len(delete_ids)), changed=True) + + +def main(): + http_creds = {"username": {"type": 'str', "required": True}, + "password": {"type": 'str', "required": True, "no_log": True}, + "domain": {"type": 'str'}, + "retries": {"type": 'int', "default": 3}, + "timeout": {"type": 'int', "default": 60}, + "port": {"type": 'int', "default": 443}, + "cn_check": {"type": 'bool', "default": False}, + "ca_check": {"type": 'bool', "default": False}, + "certificate_data": {"type": 'str', "no_log": True} + } + snmp_creds = {"community": {"type": 'str', "required": True}, + "retries": {"type": 'int', "default": 3}, + "timeout": {"type": 'int', "default": 3}, + "port": {"type": 'int', "default": 161}, + } + ssh_creds = {"username": {"type": 'str', "required": True}, + "password": {"type": 'str', "required": True, "no_log": True}, + "retries": {"type": 'int', "default": 3}, + "timeout": {"type": 'int', "default": 60}, + "port": {"type": 'int', "default": 22}, + "check_known_hosts": {"type": 'bool', "default": False}, + "is_sudo_user": {"type": 'bool', "default": False} + } + ipmi_creds = {"username": {"type": 'str', "required": True}, + "password": {"type": 'str', "required": True, "no_log": True}, + "retries": {"type": 'int', "default": 3}, + "timeout": {"type": 'int', "default": 60}, + "kgkey": {"type": 'str', "no_log": True} + } + DiscoveryConfigModel = {"device_types": {"required": True, 'type': 'list', "elements": 'str'}, + "network_address_detail": {"required": True, "type": 'list', "elements": 'str'}, + "wsman": {"type": 'dict', "options": http_creds, + "required_if": [['ca_check', True, ('certificate_data',)]]}, + "storage": {"type": 'dict', "options": http_creds, + "required_if": [['ca_check', True, ('certificate_data',)]]}, + "redfish": {"type": 'dict', "options": http_creds, + "required_if": [['ca_check', True, ('certificate_data',)]]}, + "vmware": {"type": 'dict', "options": http_creds, + "required_if": [['ca_check', True, ('certificate_data',)]]}, + "snmp": {"type": 'dict', "options": snmp_creds}, + "ssh": {"type": 'dict', "options": ssh_creds}, + "ipmi": {"type": 'dict', "options": ipmi_creds}, + } + specs = { + "discovery_job_name": {"type": 'str'}, + "discovery_id": {"type": 'int'}, + "state": {"default": "present", "choices": ['present', 'absent']}, + "new_name": {"type": 'str'}, + "discovery_config_targets": + {"type": 'list', "elements": 'dict', "options": DiscoveryConfigModel, + "required_one_of": [ + ('wsman', 'storage', 'redfish', 'vmware', 'snmp', 'ssh', 'ipmi') + ]}, + "schedule": {"default": 'RunNow', "choices": ['RunNow', 'RunLater']}, + "cron": {"type": 'str'}, + "job_wait": {"type": 'bool', "default": True}, + "job_wait_timeout": {"type": 'int', "default": 10800}, + "trap_destination": {"type": 'bool', "default": False}, + "community_string": {"type": 'bool', "default": False}, + "email_recipient": {"type": 'str'}, + "ignore_partial_failure": {"type": 'bool', "default": False} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ['state', 'present', ('discovery_config_targets',)], + ['schedule', 'RunLater', ('cron',)] + ], + required_one_of=[('discovery_job_name', 'discovery_id')], + mutually_exclusive=[('discovery_job_name', 'discovery_id')], + supports_check_mode=False + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + discov_list = check_existing_discovery(module, rest_obj) + if module.params.get('state') == 'absent': + if discov_list: + delete_discovery(module, rest_obj, discov_list) + module.exit_json(msg=NO_CHANGES_MSG) + else: + if discov_list: + modify_discovery(module, rest_obj, discov_list) + else: + if module.params.get('discovery_id'): + module.fail_json(msg=INVALID_DISCOVERY_ID) + create_discovery(module, rest_obj) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py new file mode 100644 index 00000000..7b74c306 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ome_domain_user_groups +short_description: Create, modify, or delete an Active Directory user group on + OpenManage Enterprise and OpenManage Enterprise Modular +version_added: "4.0.0" +description: This module allows to create, modify, or delete an Active Directory user group on + OpenManage Enterprise and OpenManage Enterprise Modular. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + state: + type: str + description: + - C(present) imports or modifies the Active Directory user group. + - C(absent) deletes an existing Active Directory user group. + choices: [present, absent] + default: present + group_name: + type: str + required: True + description: + - The desired Active Directory user group name to be imported or removed. + - "Examples for user group name: Administrator or Account Operators or Access Control Assistance Operator." + - I(group_name) value is case insensitive. + role: + type: str + description: + - The desired roles and privilege for the imported Active Directory user group. + - "OpenManage Enterprise Modular Roles: CHASSIS ADMINISTRATOR, COMPUTE MANAGER, STORAGE MANAGER, + FABRIC MANAGER, VIEWER." + - "OpenManage Enterprise Roles: ADMINISTRATOR, DEVICE MANAGER, VIEWER." + - I(role) value is case insensitive. + directory_name: + type: str + description: + - The directory name set while adding the Active Directory. + - I(directory_name) is mutually exclusive with I(directory_id). + directory_id: + type: int + description: + - The ID of the Active Directory. + - I(directory_id) is mutually exclusive with I(directory_name). + domain_username: + type: str + description: + - Active directory domain username. + - "Example: username@domain or domain\\username." + domain_password: + type: str + description: + - Active directory domain password. +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - This module supports C(check_mode) and idempotency. + - Run this module from a system that has direct access to OpenManage Enterprise + or OpenManage Enterprise Modular. +""" + +EXAMPLES = r""" +--- +- name: Create Active Directory user group + dellemc.openmanage.ome_domain_user_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + group_name: account operators + directory_name: directory_name + role: administrator + domain_username: username@domain + domain_password: domain_password + +- name: Update Active Directory user group + dellemc.openmanage.ome_domain_user_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + group_name: account operators + role: viewer + +- name: Delete active directory user group + dellemc.openmanage.ome_domain_user_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + group_name: administrators +""" + +RETURN = r""" +--- +msg: + type: str + description: Overall status of the Active Directory user group operation. + returned: always + sample: Successfully imported the active directory user group. +domain_user_status: + description: Details of the domain user operation, when I(state) is C(present). + returned: When I(state) is C(present). + type: dict + sample: { + "Description": null, + "DirectoryServiceId": 16097, + "Enabled": true, + "Id": "16617", + "IsBuiltin": false, + "IsVisible": true, + "Locked": false, + "Name": "Account Operators", + "ObjectGuid": "a491859c-031e-42a3-ae5e-0ab148ecf1d6", + "ObjectSid": null, + "Oem": null, + "Password": null, + "PlainTextPassword": null, + "RoleId": "16", + "UserName": "Account Operators", + "UserTypeId": 2 + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +ROLE_URI = "AccountService/Roles" +ACCOUNT_URI = "AccountService/Accounts" +GET_AD_ACC = "AccountService/ExternalAccountProvider/ADAccountProvider" +IMPORT_ACC_PRV = "AccountService/Actions/AccountService.ImportExternalAccountProvider" +SEARCH_AD = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.SearchGroups" +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." + + +def get_directory(module, rest_obj): + user_dir_name = module.params.get("directory_name") + user_dir_id = module.params.get("directory_id") + key = "name" if user_dir_name is not None else "id" + value = user_dir_name if user_dir_name is not None else user_dir_id + dir_id = None + if user_dir_name is None and user_dir_id is None: + module.fail_json(msg="missing required arguments: directory_name or directory_id") + directory_resp = rest_obj.invoke_request("GET", GET_AD_ACC) + for dire in directory_resp.json_data["value"]: + if user_dir_name is not None and dire["Name"] == user_dir_name: + dir_id = dire["Id"] + break + if user_dir_id is not None and dire["Id"] == user_dir_id: + dir_id = dire["Id"] + break + else: + module.fail_json(msg="Unable to complete the operation because the entered " + "directory {0} '{1}' does not exist.".format(key, value)) + return dir_id + + +def search_directory(module, rest_obj, dir_id): + group_name, obj_gui_id, common_name = module.params["group_name"], None, None + payload = {"DirectoryServerId": dir_id, "Type": "AD", + "UserName": module.params["domain_username"], + "Password": module.params["domain_password"], + "CommonName": group_name} + try: + resp = rest_obj.invoke_request("POST", SEARCH_AD, data=payload) + for ad in resp.json_data: + if ad["CommonName"].lower() == group_name.lower(): + obj_gui_id = ad["ObjectGuid"] + common_name = ad["CommonName"] + break + else: + module.fail_json(msg="Unable to complete the operation because the entered " + "group name '{0}' does not exist.".format(group_name)) + except HTTPError as err: + error = json.load(err) + if error['error']['@Message.ExtendedInfo'][0]['MessageId'] in ["CGEN1004", "CSEC5022"]: + module.fail_json(msg="Unable to complete the operation because the entered " + "domain username or domain password are invalid.") + return obj_gui_id, common_name + + +def directory_user(module, rest_obj): + user = get_directory_user(module, rest_obj) + new_role_id = get_role(module, rest_obj) + dir_id = get_directory(module, rest_obj) + domain_resp, msg = None, '' + if user is None: + obj_gui_id, common_name = search_directory(module, rest_obj, dir_id) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + payload = [ + {"UserTypeId": 2, "DirectoryServiceId": dir_id, "Description": None, + "Name": common_name, "Password": "", "UserName": common_name, "RoleId": new_role_id, "Locked": False, + "IsBuiltin": False, "Enabled": True, "ObjectGuid": obj_gui_id} + ] + domain_resp = rest_obj.invoke_request("POST", IMPORT_ACC_PRV, data=payload) + msg = 'imported' + else: + if (int(user["RoleId"]) == new_role_id): + user = rest_obj.strip_substr_dict(user) + module.exit_json(msg=NO_CHANGES_MSG, domain_user_status=user) + else: + payload = {"Id": str(user["Id"]), "UserTypeId": 2, "DirectoryServiceId": dir_id, + "UserName": user["UserName"], "RoleId": str(new_role_id), "Enabled": user["Enabled"]} + update_uri = "{0}('{1}')".format(ACCOUNT_URI, user['Id']) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=payload) + domain_resp = rest_obj.invoke_request("PUT", update_uri, data=payload) + msg = 'updated' + if domain_resp is None: + module.fail_json(msg="Unable to complete the Active Directory user account.") + return domain_resp.json_data, msg + + +def get_role(module, rest_obj): + role_name, role_id = module.params.get("role"), None + if role_name is None: + module.fail_json(msg="missing required arguments: role") + resp_role = rest_obj.invoke_request("GET", ROLE_URI) + role_list = resp_role.json_data["value"] + for role in role_list: + if role["Name"] == role_name.upper().replace(" ", "_"): + role_id = int(role["Id"]) + break + else: + module.fail_json(msg="Unable to complete the operation because the entered " + "role name '{0}' does not exist.".format(role_name)) + return role_id + + +def get_directory_user(module, rest_obj): + user_group_name, user = module.params.get("group_name"), None + state = module.params["state"] + if user_group_name is None: + module.fail_json(msg="missing required arguments: group_name") + user_resp = rest_obj.invoke_request('GET', ACCOUNT_URI) + for usr in user_resp.json_data["value"]: + if usr["UserName"].lower() == user_group_name.lower() and usr["UserTypeId"] == 2: + user = usr + if module.check_mode and state == "absent": + user = rest_obj.strip_substr_dict(usr) + module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=user) + break + else: + if state == "absent": + module.exit_json(msg=NO_CHANGES_MSG) + return user + + +def delete_directory_user(rest_obj, user_id): + delete_uri, changed = "{0}('{1}')".format(ACCOUNT_URI, user_id), False + msg = "Invalid active directory user group name provided." + resp = rest_obj.invoke_request('DELETE', delete_uri) + if resp.status_code == 204: + changed = True + msg = "Successfully deleted the active directory user group." + return msg, changed + + +def main(): + specs = { + "state": {"required": False, "type": 'str', "default": "present", + "choices": ['present', 'absent']}, + "group_name": {"required": True, "type": 'str'}, + "role": {"required": False, "type": 'str'}, + "directory_name": {"required": False, "type": 'str'}, + "directory_id": {"required": False, "type": 'int'}, + "domain_username": {"required": False, "type": 'str'}, + "domain_password": {"required": False, "type": 'str', "no_log": True}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[['directory_name', 'directory_id'], ], + supports_check_mode=True) + try: + with RestOME(module.params, req_session=True) as rest_obj: + if module.params["state"] == "present": + resp, msg = directory_user(module, rest_obj) + if isinstance(resp, list): + resp = resp[0] + module.exit_json( + msg="Successfully {0} the active directory user group.".format(msg), + domain_user_status=resp, changed=True + ) + if module.params["state"] == "absent": + user = get_directory_user(module, rest_obj) + msg, changed = delete_directory_user(rest_obj, int(user["Id"])) + user = rest_obj.strip_substr_dict(user) + module.exit_json(msg=msg, changed=changed, domain_user_status=user) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py new file mode 100644 index 00000000..a3bfff95 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py @@ -0,0 +1,653 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_firmware +short_description: Update firmware on PowerEdge devices and its components through OpenManage Enterprise +version_added: "2.0.0" +description: This module updates the firmware of PowerEdge devices and all its components through OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + device_service_tag: + description: + - List of service tags of the targeted devices. + - Either I(device_id) or I(device_service_tag) can be used individually or together. + - This option is mutually exclusive with I(device_group_names) and I(devices). + type: list + elements: str + device_id: + description: + - List of ids of the targeted device. + - Either I(device_id) or I(device_service_tag) can be used individually or together. + - This option is mutually exclusive with I(device_group_names) and I(devices). + type: list + elements: int + device_group_names: + description: + - Enter the name of the device group that contains the devices on which firmware needs to be updated. + - This option is mutually exclusive with I(device_id) and I(device_service_tag). + type: list + elements: str + dup_file: + description: + - "The path of the Dell Update Package (DUP) file that contains the firmware or drivers required to update the + target system device or individual device components." + - This is mutually exclusive with I(baseline_name), I(components), and I(devices). + type: path + baseline_name: + description: + - Enter the baseline name to update the firmware of all devices or list of devices that are not complaint. + - This option is mutually exclusive with I(dup_file) and I(device_group_names). + type: str + components: + description: + - List of components to be updated. + - If not provided, all components applicable are considered. + - This option is case sensitive. + - This is applicable to I(device_service_tag), I(device_id), and I(baseline_name). + type: list + elements: str + devices: + description: + - This option allows to select components on each device for firmware update. + - This option is mutually exclusive with I(dup_file), I(device_group_names), I(device_id), and I(device_service_tag). + type: list + elements: dict + suboptions: + id: + type: int + description: + - The id of the target device to be updated. + - This option is mutually exclusive with I(service_tag). + service_tag: + type: str + description: + - The service tag of the target device to be updated. + - This option is mutually exclusive with I(id). + components: + description: The target components to be updated. If not specified, all applicable device components are considered. + type: list + elements: str + schedule: + type: str + description: + - Select the schedule for the firmware update. + - if C(StageForNextReboot) is chosen, the firmware will be staged and updated during the next reboot + of the target device. + - if C(RebootNow) will apply the firmware updates immediately. + choices: + - RebootNow + - StageForNextReboot + default: RebootNow +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" + - "Jagadeesh N V (@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Update firmware from DUP file using device ids + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 11111 + - 22222 + dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE" + +- name: Update firmware from a DUP file using a device service tags + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - KLBR111 + - KLBR222 + dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE" + +- name: Update firmware from a DUP file using a device group names + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_group_names: + - servers + dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE" + +- name: Update firmware using baseline name + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + +- name: Stage firmware for the next reboot using baseline name + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + schedule: StageForNextReboot + +- name: "Update firmware using baseline name and components." + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + components: + - BIOS + +- name: Update firmware of device components from a DUP file using a device ids in a baseline + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + device_id: + - 11111 + - 22222 + components: + - iDRAC with Lifecycle Controller + +- name: Update firmware of device components from a baseline using a device service tags under a baseline + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + device_service_tag: + - KLBR111 + - KLBR222 + components: + - IOM-SAS + +- name: Update firmware using baseline name with a device id and required components + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - id: 12345 + components: + - Lifecycle Controller + - id: 12346 + components: + - Enterprise UEFI Diagnostics + - BIOS + +- name: "Update firmware using baseline name with a device service tag and required components." + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - service_tag: ABCDE12 + components: + - PERC H740P Adapter + - BIOS + - service_tag: GHIJK34 + components: + - OS Drivers Pack + +- name: "Update firmware using baseline name with a device service tag or device id and required components." + dellemc.openmanage.ome_firmware: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: baseline_devices + devices: + - service_tag: ABCDE12 + components: + - BOSS-S1 Adapter + - PowerEdge Server BIOS + - id: 12345 + components: + - iDRAC with Lifecycle Controller +''' + +RETURN = r''' +--- +msg: + type: str + description: "Overall firmware update status." + returned: always + sample: Successfully submitted the firmware update job. +update_status: + type: dict + description: The firmware update job and progress details from the OME. + returned: success + sample: { + 'LastRun': None, + 'CreatedBy': 'user', + 'Schedule': 'startnow', + 'LastRunStatus': { + 'Id': 1111, + 'Name': 'NotRun' + }, + 'Builtin': False, + 'Editable': True, + 'NextRun': None, + 'JobStatus': { + 'Id': 1111, + 'Name': 'New' + }, + 'JobName': 'Firmware Update Task', + 'Visible': True, + 'State': 'Enabled', + 'JobDescription': 'dup test', + 'Params': [{ + 'Value': 'true', + 'Key': 'signVerify', + 'JobId': 11111}, { + 'Value': 'false', + 'Key': 'stagingValue', + 'JobId': 11112}, { + 'Value': 'false', + 'Key': 'complianceUpdate', + 'JobId': 11113}, { + 'Value': 'INSTALL_FIRMWARE', + 'Key': 'operationName', + 'JobId': 11114}], + 'Targets': [{ + 'TargetType': { + 'Id': 1000, + 'Name': 'DEVICE'}, + 'Data': 'DCIM:INSTALLED#701__NIC.Mezzanine.1A-1-1=1234567654321', + 'Id': 11115, + 'JobId': 11116}], + 'StartTime': None, + 'UpdatedBy': None, + 'EndTime': None, + 'Id': 11117, + 'JobType': { + 'Internal': False, + 'Id': 5, + 'Name': 'Update_Task'} +} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + + +COMPLIANCE_URI = "UpdateService/Baselines({0})/DeviceComplianceReports" +BASELINE_URI = "UpdateService/Baselines" +FW_JOB_DESC = "Firmware update task initiated from OpenManage Ansible Module collections" +NO_CHANGES_MSG = "No changes found to be applied. Either there are no updates present or components specified are not" \ + " found in the baseline." +COMPLIANCE_READ_FAIL = "Failed to read compliance report." +DUP_REQ_MSG = "Parameter 'dup_file' to be provided along with 'device_id'|'device_service_tag'|'device_group_names'" +APPLICABLE_DUP = "Unable to get applicable components DUP." +CHANGES_FOUND = "Changes found to be applied." + + +def spawn_update_job(rest_obj, job_payload): + """Spawns an update job and tracks it to completion.""" + job_uri, job_details = "JobService/Jobs", {} + job_resp = rest_obj.invoke_request("POST", job_uri, data=job_payload) + if job_resp.status_code == 201: + job_details = job_resp.json_data + return job_details + + +def job_payload_for_update(rest_obj, module, target_data, baseline=None): + """Formulate the payload to initiate a firmware update job.""" + resp = rest_obj.get_job_type_id("Update_Task") + if resp is None: + module.fail_json(msg="Unable to fetch the job type Id.") + stage_dict = {"StageForNextReboot": 'true', "RebootNow": 'false'} + schedule = module.params["schedule"] + params = [{"Key": "operationName", "Value": "INSTALL_FIRMWARE"}, + {"Key": "stagingValue", "Value": stage_dict[schedule]}, + {"Key": "signVerify", "Value": "true"}] + # reboot applicable only if staging false + if schedule == "RebootNow": + params.append({"Key": "rebootType", "Value": "3"}) + # reboot_dict = {"GracefulReboot": "2", "GracefulRebootForce": "3", "PowerCycle": "1"} + payload = { + "Id": 0, "JobName": "Firmware Update Task", + "JobDescription": FW_JOB_DESC, "Schedule": "startnow", + "State": "Enabled", "JobType": {"Id": resp, "Name": "Update_Task"}, + "Targets": target_data, + "Params": params + } + + if baseline is not None: + payload["Params"].append({"Key": "complianceReportId", "Value": "{0}".format(baseline["baseline_id"])}) + payload["Params"].append({"Key": "repositoryId", "Value": "{0}".format(baseline["repo_id"])}) + payload["Params"].append({"Key": "catalogId", "Value": "{0}".format(baseline["catalog_id"])}) + payload["Params"].append({"Key": "complianceUpdate", "Value": "true"}) + else: + payload["Params"].append({"JobId": 0, "Key": "complianceUpdate", "Value": "false"}) + return payload + + +def get_applicable_components(rest_obj, dup_payload, module): + """Get the target array to be used in spawning jobs for update.""" + target_data = [] + dup_url = "UpdateService/Actions/UpdateService.GetSingleDupReport" + headers = {"Content-Type": "application/json", "Accept": "application/json"} + dup_resp = rest_obj.invoke_request("POST", dup_url, data=dup_payload, + headers=headers, api_timeout=60) + if dup_resp.status_code == 200: + dup_data = dup_resp.json_data + file_token = str(dup_payload['SingleUpdateReportFileToken']) + for device in dup_data: + for component in device['DeviceReport']['Components']: + temp_map = {} + temp_map['Id'] = device['DeviceId'] + temp_map['Data'] = "{0}={1}".format(component['ComponentSourceName'], file_token) + temp_map['TargetType'] = {} + temp_map['TargetType']['Id'] = int(device['DeviceReport']['DeviceTypeId']) + temp_map['TargetType']['Name'] = str(device['DeviceReport']['DeviceTypeName']) + target_data.append(temp_map) + else: + module.fail_json(msg=APPLICABLE_DUP) + return target_data + + +def get_dup_applicability_payload(file_token, device_ids=None, group_ids=None, baseline_ids=None): + """Returns the DUP applicability JSON payload.""" + dup_applicability_payload = {'SingleUpdateReportBaseline': [], + 'SingleUpdateReportGroup': [], + 'SingleUpdateReportTargets': [], + 'SingleUpdateReportFileToken': file_token} + if device_ids is not None: + dup_applicability_payload.update({"SingleUpdateReportTargets": list(map(int, device_ids))}) + elif group_ids is not None: + dup_applicability_payload.update({"SingleUpdateReportGroup": list(map(int, group_ids))}) + elif baseline_ids is not None: + dup_applicability_payload.update({"SingleUpdateReportBaseline": list(map(int, baseline_ids))}) + return dup_applicability_payload + + +def upload_dup_file(rest_obj, module): + """Upload DUP file to OME and get a file token.""" + upload_uri = "UpdateService/Actions/UpdateService.UploadFile" + headers = {"Content-Type": "application/octet-stream", "Accept": "application/octet-stream"} + upload_success, token = False, None + dup_file = module.params['dup_file'] + with open(dup_file, 'rb') as payload: + payload = payload.read() + response = rest_obj.invoke_request("POST", upload_uri, data=payload, headers=headers, + api_timeout=100, dump=False) + if response.status_code == 200: + upload_success = True + token = str(response.json_data) + else: + module.fail_json(msg="Unable to upload {0} to {1}".format(dup_file, module.params['hostname'])) + return upload_success, token + + +def get_device_ids(rest_obj, module, device_id_tags): + """Getting the list of device ids filtered from the device inventory.""" + device_id = [] + resp = rest_obj.get_all_report_details("DeviceService/Devices") + if resp.get("report_list"): + device_resp = dict([(str(device['Id']), device['DeviceServiceTag']) for device in resp["report_list"]]) + device_tags = map(str, device_id_tags) + invalid_tags = [] + for tag in device_tags: + if tag in device_resp.keys(): + device_id.append(tag) + elif tag in device_resp.values(): + ids = list(device_resp.keys())[list(device_resp.values()).index(tag)] + device_id.append(ids) + else: + invalid_tags.append(tag) + if invalid_tags: + module.fail_json( + msg="Unable to complete the operation because the entered target device service" + " tag(s) or device id(s) '{0}' are invalid.".format(",".join(set(invalid_tags)))) + else: + module.fail_json(msg="Failed to fetch the device facts.") + return device_id, device_resp + + +def get_group_ids(rest_obj, module): + """Getting the list of group ids filtered from the groups.""" + resp = rest_obj.get_all_report_details("GroupService/Groups") + group_name = module.params.get('device_group_names') + if resp["report_list"]: + grp_ids = [grp['Id'] for grp in resp["report_list"] for grpname in group_name if grp['Name'] == grpname] + if len(set(group_name)) != len(set(grp_ids)): + module.fail_json( + msg="Unable to complete the operation because the entered target device group name(s)" + " '{0}' are invalid.".format(",".join(set(group_name)))) + return grp_ids + + +def get_baseline_ids(rest_obj, module): + """Getting the list of group ids filtered from the groups.""" + resp = rest_obj.get_all_report_details(BASELINE_URI) + baseline, baseline_details = module.params.get('baseline_name'), {} + if resp["report_list"]: + for bse in resp["report_list"]: + if bse['Name'] == baseline: + baseline_details["baseline_id"] = bse["Id"] + baseline_details["repo_id"] = bse["RepositoryId"] + baseline_details["catalog_id"] = bse["CatalogId"] + if not baseline_details: + module.fail_json( + msg="Unable to complete the operation because the entered target baseline name" + " '{0}' is invalid.".format(baseline)) + else: + module.fail_json(msg="Unable to complete the operation because the entered " + "target baseline name does not exist.") + return baseline_details + + +def single_dup_update(rest_obj, module): + target_data, device_ids, group_ids, baseline_ids = None, None, None, None + if module.params.get("device_group_names") is not None: + group_ids = get_group_ids(rest_obj, module) + else: + device_id_tags = _validate_device_attributes(module) + device_ids, id_tag_map = get_device_ids(rest_obj, module, device_id_tags) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND) + upload_status, token = upload_dup_file(rest_obj, module) + if upload_status: + report_payload = get_dup_applicability_payload(token, device_ids=device_ids, group_ids=group_ids, + baseline_ids=baseline_ids) + if report_payload: + target_data = get_applicable_components(rest_obj, report_payload, module) + return target_data + + +def baseline_based_update(rest_obj, module, baseline, dev_comp_map): + compliance_uri = COMPLIANCE_URI.format(baseline["baseline_id"]) + resp = rest_obj.get_all_report_details(compliance_uri) + compliance_report_list = [] + update_actions = ["UPGRADE", "DOWNGRADE"] + if resp["report_list"]: + comps = [] + if not dev_comp_map: + comps = module.params.get('components') + dev_comp_map = dict([(str(dev["DeviceId"]), comps) for dev in resp["report_list"]]) + for dvc in resp["report_list"]: + dev_id = dvc["DeviceId"] + if str(dev_id) in dev_comp_map: + comps = dev_comp_map.get(str(dev_id), []) + compliance_report = dvc.get("ComponentComplianceReports") + if compliance_report is not None: + data_dict = {} + comp_list = [] + if not comps: + comp_list = list(icomp["SourceName"] for icomp in compliance_report + if icomp["UpdateAction"] in update_actions) + else: + comp_list = list(icomp["SourceName"] for icomp in compliance_report + if ((icomp["UpdateAction"] in update_actions) and + (icomp.get('Name') in comps))) # regex filtering ++ + if comp_list: + data_dict["Id"] = dev_id + data_dict["Data"] = str(";").join(comp_list) + data_dict["TargetType"] = {"Id": dvc['DeviceTypeId'], "Name": dvc["DeviceTypeName"]} + compliance_report_list.append(data_dict) + else: + module.fail_json(msg=COMPLIANCE_READ_FAIL) + if not compliance_report_list: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND) + return compliance_report_list + + +def _validate_device_attributes(module): + device_id_tags = [] + service_tag = module.params.get('device_service_tag') + device_id = module.params.get('device_id') + devices = module.params.get('devices') + if devices: + for dev in devices: + if dev.get('id'): + device_id_tags.append(dev.get('id')) + else: + device_id_tags.append(dev.get('service_tag')) + if device_id is not None: + device_id_tags.extend(device_id) + if service_tag is not None: + device_id_tags.extend(service_tag) + return device_id_tags + + +def get_device_component_map(rest_obj, module): + device_id_tags = _validate_device_attributes(module) + device_ids, id_tag_map = get_device_ids(rest_obj, module, device_id_tags) + comps = module.params.get('components') + dev_comp_map = {} + if device_ids: + dev_comp_map = dict([(dev, comps) for dev in device_ids]) + devices = module.params.get('devices') + if devices: + for dev in devices: + if dev.get('id'): + dev_comp_map[str(dev.get('id'))] = dev.get('components') + else: + id = list(id_tag_map.keys())[list(id_tag_map.values()).index(dev.get('service_tag'))] + dev_comp_map[str(id)] = dev.get('components') + return dev_comp_map + + +def validate_inputs(module): + param = module.params + if param.get("dup_file"): + if not any([param.get("device_id"), param.get("device_service_tag"), param.get("device_group_names")]): + module.fail_json(msg=DUP_REQ_MSG) + + +def main(): + specs = { + "device_service_tag": {"type": "list", "elements": 'str'}, + "device_id": {"type": "list", "elements": 'int'}, + "dup_file": {"type": "path"}, + "device_group_names": {"type": "list", "elements": 'str'}, + "components": {"type": "list", "elements": 'str', "default": []}, + "baseline_name": {"type": "str"}, + "schedule": {"type": 'str', "choices": ['RebootNow', 'StageForNextReboot'], "default": 'RebootNow'}, + "devices": { + "type": 'list', "elements": 'dict', + "options": { + "id": {'type': 'int'}, + "service_tag": {"type": 'str'}, + "components": {"type": "list", "elements": 'str', "default": []}, + }, + "mutually_exclusive": [('id', 'service_tag')], + "required_one_of": [('id', 'service_tag')] + }, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[["dup_file", "baseline_name"]], + mutually_exclusive=[ + ["baseline_name", "dup_file"], + ["device_group_names", "device_id", "devices"], + ["device_group_names", "device_service_tag", "devices"], + ["baseline_name", "device_group_names"], + ["dup_file", "components", "devices"]], + supports_check_mode=True + ) + validate_inputs(module) + update_status, baseline_details = {}, None + try: + with RestOME(module.params, req_session=True) as rest_obj: + if module.params.get("baseline_name"): + baseline_details = get_baseline_ids(rest_obj, module) + device_comp_map = get_device_component_map(rest_obj, module) + target_data = baseline_based_update(rest_obj, module, baseline_details, device_comp_map) + else: + target_data = single_dup_update(rest_obj, module) + job_payload = job_payload_for_update(rest_obj, module, target_data, baseline=baseline_details) + update_status = spawn_update_job(rest_obj, job_payload) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, OSError) as err: + module.fail_json(msg=str(err)) + module.exit_json(msg="Successfully submitted the firmware update job.", update_status=update_status, changed=True) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py new file mode 100644 index 00000000..d6282db3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py @@ -0,0 +1,550 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_firmware_baseline +short_description: Create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular +description: This module allows to create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular. +version_added: "2.0.0" +author: + - Jagadeesh N V(@jagadeeshnv) +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + state: + description: + - C(present) creates or modifies a baseline. + - C(absent) deletes an existing baseline. + choices: + - present + - absent + default: present + type: str + version_added: 3.4.0 + baseline_name: + type: str + description: + - Name of the the baseline. + - This option is mutually exclusive with I(baseline_id). + baseline_id: + type: int + description: + - ID of the existing baseline. + - This option is mutually exclusive with I(baseline_name). + version_added: 3.4.0 + new_baseline_name: + description: New name of the baseline. + type: str + version_added: 3.4.0 + baseline_description: + type: str + description: + - Description for the baseline being created. + catalog_name: + type: str + description: + - Name of the catalog to be associated with the baseline. + downgrade_enabled: + type: bool + description: + - Indicates whether firmware downgrade is allowed for the devices in the baseline. + - This value will be set to C(True) by default, if not provided during baseline creation. + is_64_bit: + type: bool + description: + - Indicates if the repository contains 64-bit DUPs. + - This value will be set to C(True) by default, if not provided during baseline creation. + device_ids: + type: list + elements: int + description: + - List of device IDs. + - This option is mutually exclusive with I(device_service_tags) and I(device_group_names). + device_service_tags: + type: list + elements: str + description: + - List of device service tags. + - This option is mutually exclusive with I(device_ids) and I(device_group_names). + device_group_names: + type: list + elements: str + description: + - List of group names. + - This option is mutually exclusive with I(device_ids) and I(device_service_tags). + job_wait: + description: + - Provides the option to wait for job completion. + - This option is applicable when I(state) is C(present). + type: bool + default: true + version_added: 3.4.0 + job_wait_timeout: + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 600 + version_added: 3.4.0 +requirements: + - "python >= 3.8.6" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular. + - I(device_group_names) option is not applicable for OpenManage Enterprise Modular. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create baseline for device IDs + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_ids: + - 1010 + - 2020 + +- name: Create baseline for servicetags + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_service_tags: + - "SVCTAG1" + - "SVCTAG2" + +- name: Create baseline for device groups without job tracking + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" + baseline_description: "baseline_description" + catalog_name: "catalog_name" + device_group_names: + - "Group1" + - "Group2" + job_wait: no + +- name: Modify an existing baseline + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "existing_baseline_name" + new_baseline_name: "new_baseline_name" + baseline_description: "new baseline_description" + catalog_name: "catalog_other" + device_group_names: + - "Group3" + - "Group4" + - "Group5" + downgrade_enabled: no + is_64_bit: yes + +- name: Delete a baseline + dellemc.openmanage.ome_firmware_baseline: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + baseline_name: "baseline_name" +''' + +RETURN = r''' +--- +msg: + description: Overall status of the firmware baseline operation. + returned: always + type: str + sample: "Successfully created the firmware baseline." +baseline_status: + description: Details of the baseline status. + returned: success + type: dict + sample: { + "CatalogId": 123, + "Description": "BASELINE DESCRIPTION", + "DeviceComplianceReports": [], + "DowngradeEnabled": true, + "Id": 23, + "Is64Bit": true, + "Name": "my_baseline", + "RepositoryId": 123, + "RepositoryName": "catalog123", + "RepositoryType": "HTTP", + "Targets": [ + { + "Id": 10083, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + }, + { + "Id": 10076, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "TaskId": 11235, + "TaskStatusId": 2060 + } +job_id: + description: Job ID of the baseline task. + returned: When baseline job is in running state + type: int + sample: 10123 +baseline_id: + description: ID of the deleted baseline. + returned: When I(state) is C(absent) + type: int + sample: 10123 +error_info: + type: dict + description: Details of http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to retrieve baseline list either because the device ID(s) entered are invalid", + "Resolution": "Make sure the entered device ID(s) are valid and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +''' + +BASELINE_URI = "UpdateService/Baselines" +ID_BASELINE_URI = "UpdateService/Baselines({Id})" +DELETE_BASELINE_URI = "UpdateService/Actions/UpdateService.RemoveBaselines" +CATALOG_URI = "UpdateService/Catalogs" +BASELINE_JOB_RUNNING = "Firmware baseline '{name}' with ID {id} is running. Please retry after job completion." +BASELINE_DEL_SUCCESS = "Successfully deleted the firmware baseline." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +INVALID_BASELINE_ID = "Invalid baseline ID provided." +BASELINE_TRIGGERED = "Successfully triggered the firmware baseline task." +NO_CATALOG_MESSAGE = "Catalog name not provided for baseline creation." +NO_TARGETS_MESSAGE = "Targets not specified for baseline creation." +CATALOG_STATUS_MESSAGE = "Unable to create the firmware baseline as the catalog is in {status} status." +BASELINE_UPDATED = "Successfully {op} the firmware baseline." +SETTLING_TIME = 3 +JOB_POLL_INTERVAL = 10 +GROUP_ID = 6000 + + +import json +import time +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.common.dict_transformations import recursive_diff + + +def get_baseline_from_name(rest_obj, baseline): + resp = rest_obj.get_all_items_with_pagination(BASELINE_URI) + baselines_list = resp.get("value") + bsln = baseline + for d in baselines_list: + if d['Name'] == baseline.get('Name'): + bsln = d + break + nlist = list(bsln) + for k in nlist: + if str(k).lower().startswith('@odata'): + bsln.pop(k) + return bsln + + +def check_existing_baseline(module, rest_obj): + baseline_id = module.params.get("baseline_id") + srch_key = "Name" + srch_val = module.params.get("baseline_name") + if baseline_id: + srch_key = "Id" + srch_val = module.params.get("baseline_id") + baseline_cfgs = [] + resp = rest_obj.get_all_items_with_pagination(BASELINE_URI) + baselines = resp.get("value") + for d in baselines: + if d[srch_key] == srch_val: + baseline_cfgs.append(d) + if baseline_id: + break + return baseline_cfgs + + +def get_catrepo_ids(module, cat_name, rest_obj): + if cat_name is not None: + resp_data = rest_obj.get_all_items_with_pagination(CATALOG_URI) + values = resp_data["value"] + if values: + for catalog in values: + repo = catalog.get("Repository") + if repo.get("Name") == cat_name: + if catalog.get('Status') != 'Completed': + module.fail_json(msg=CATALOG_STATUS_MESSAGE.format(status=catalog.get('Status'))) + return catalog.get("Id"), repo.get("Id") + return None, None + + +def get_dev_ids(module, rest_obj, param, devkey): + paramlist = module.params[param] + resp_data = rest_obj.get_all_items_with_pagination("DeviceService/Devices") + values = resp_data["value"] + targets = [] + if values: + devlist = values + device_resp = dict([(device[devkey], device) for device in devlist]) + for st in paramlist: + if st in device_resp: + djson = device_resp[st] + target = {} + device_type = {} + device_type['Id'] = djson['Type'] + device_type['Name'] = "DEVICE" + target['Id'] = djson['Id'] + target['Type'] = device_type + targets.append(target) + else: + module.fail_json(msg="Unable to complete the operation because the entered target" + " {0} '{1}' is invalid.".format(devkey, st)) + return targets + + +def get_group_ids(module, rest_obj): + grp_name_list = module.params.get("device_group_names") + resp_data = rest_obj.get_all_items_with_pagination("GroupService/Groups") + values = resp_data["value"] + targets = [] + if values: + grplist = values + device_resp = dict([(str(grp['Name']), grp) for grp in grplist]) + for st in grp_name_list: + if st in device_resp: + djson = device_resp[st] + target = {} + device_type = {} + device_type['Id'] = GROUP_ID + device_type['Name'] = "GROUP" + target['Id'] = djson['Id'] + target['Type'] = device_type + targets.append(target) + else: + module.fail_json(msg="Unable to complete the operation because the entered target" + " Group Name '{0}' is invalid.".format(st)) + return targets + + +def get_target_list(module, rest_obj): + target_list = None + if module.params.get("device_service_tags"): + target_list = get_dev_ids(module, rest_obj, "device_service_tags", "DeviceServiceTag") + elif module.params.get("device_group_names"): + target_list = get_group_ids(module, rest_obj) + elif module.params.get("device_ids"): + target_list = get_dev_ids(module, rest_obj, "device_ids", "Id") + return target_list + + +def exit_baseline(module, rest_obj, baseline, op): + msg = BASELINE_TRIGGERED + time.sleep(SETTLING_TIME) + try: + bsln = get_baseline_from_name(rest_obj, baseline) + except Exception: + bsln = baseline + if module.params.get("job_wait"): + job_failed, job_message = rest_obj.job_tracking( + baseline.get('TaskId'), job_wait_sec=module.params["job_wait_timeout"], sleep_time=JOB_POLL_INTERVAL) + if job_failed is True: + module.fail_json(msg=job_message, baseline_status=bsln) + msg = BASELINE_UPDATED.format(op=op) + module.exit_json(msg=msg, baseline_status=bsln, changed=True) + + +def _get_baseline_payload(module, rest_obj): + cat_name = module.params.get("catalog_name") + cat_id, repo_id = get_catrepo_ids(module, cat_name, rest_obj) + if cat_id is None or repo_id is None: + module.fail_json(msg="No Catalog with name {0} found".format(cat_name)) + targets = get_target_list(module, rest_obj) + if targets is None: + module.fail_json(msg=NO_TARGETS_MESSAGE) + baseline_name = module.params.get("baseline_name") + baseline_payload = { + "Name": baseline_name, + "CatalogId": cat_id, + "RepositoryId": repo_id, + "Targets": targets + } + baseline_payload['Description'] = module.params.get("baseline_description") + de = module.params.get("downgrade_enabled") + baseline_payload['DowngradeEnabled'] = de if de is not None else True + sfb = module.params.get("is_64_bit") + baseline_payload['Is64Bit'] = sfb if sfb is not None else True + return baseline_payload + + +def create_baseline(module, rest_obj): + myparams = module.params + if not any([myparams.get("device_ids"), myparams.get("device_service_tags"), myparams.get("device_group_names")]): + module.fail_json(msg=NO_TARGETS_MESSAGE) + if not myparams.get("catalog_name"): + module.fail_json(msg=NO_CATALOG_MESSAGE) + payload = _get_baseline_payload(module, rest_obj) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + resp = rest_obj.invoke_request("POST", BASELINE_URI, data=payload) + exit_baseline(module, rest_obj, resp.json_data, 'created') + + +def update_modify_payload(module, rest_obj, modify_payload, current_baseline): + paylist = ['Name', "CatalogId", "RepositoryId", 'Description', 'DowngradeEnabled', 'Is64Bit'] + diff_tuple = recursive_diff(modify_payload, current_baseline) + diff = 0 + payload = dict([(item, current_baseline.get(item)) for item in paylist]) + if diff_tuple: + if diff_tuple[0]: + diff += 1 + payload.update(diff_tuple[0]) + payload['Targets'] = current_baseline.get('Targets', []) + inp_targets_list = get_target_list(module, rest_obj) + if inp_targets_list: + inp_target_dict = dict([(item['Id'], item['Type']['Id']) for item in inp_targets_list]) + cur_target_dict = dict([(item['Id'], item['Type']['Id']) for item in current_baseline.get('Targets', [])]) + diff_tuple = recursive_diff(inp_target_dict, cur_target_dict) + if diff_tuple: + diff += 1 + payload['Targets'] = inp_targets_list + if diff == 0: + module.exit_json(msg=NO_CHANGES_MSG) + payload['Id'] = current_baseline['Id'] + return payload + + +def modify_baseline(module, rest_obj, baseline_list): + d = baseline_list[0] + if d["TaskStatusId"] == 2050: + module.fail_json(msg=BASELINE_JOB_RUNNING.format(name=d["Name"], id=d["Id"]), job_id=d['TaskId']) + mparam = module.params + current_baseline = baseline_list[0] + modify_payload = {} + if mparam.get('catalog_name'): + cat_id, repo_id = get_catrepo_ids(module, mparam.get('catalog_name'), rest_obj) + if cat_id is None or repo_id is None: + module.fail_json(msg="No Catalog with name {0} found".format(mparam.get('catalog_name'))) + modify_payload["CatalogId"] = cat_id + modify_payload["RepositoryId"] = repo_id + if mparam.get('new_baseline_name'): + modify_payload['Name'] = mparam.get('new_baseline_name') + if mparam.get("baseline_description"): + modify_payload['Description'] = mparam.get("baseline_description") + if module.params.get("downgrade_enabled") is not None: + modify_payload['DowngradeEnabled'] = module.params.get("downgrade_enabled") + if module.params.get("is_64_bit") is not None: + modify_payload['Is64Bit'] = module.params.get("is_64_bit") + payload = update_modify_payload(module, rest_obj, modify_payload, current_baseline) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + resp = rest_obj.invoke_request("PUT", ID_BASELINE_URI.format(Id=str(payload["Id"])), data=payload) + exit_baseline(module, rest_obj, resp.json_data, 'modified') + + +def delete_baseline(module, rest_obj, baseline_list): + delete_ids = [] + d = baseline_list[0] + if d["TaskStatusId"] == 2050: + module.fail_json(msg=BASELINE_JOB_RUNNING.format(name=d["Name"], id=d["Id"]), job_id=d['TaskId']) + delete_ids.append(d["Id"]) + delete_payload = {"BaselineIds": delete_ids} + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + rest_obj.invoke_request('POST', DELETE_BASELINE_URI, data=delete_payload) + module.exit_json(msg=BASELINE_DEL_SUCCESS, changed=True, baseline_id=delete_ids[0]) + + +def main(): + specs = { + "state": {"default": "present", "choices": ['present', 'absent']}, + "baseline_name": {"type": 'str'}, + "baseline_id": {"type": 'int'}, + "baseline_description": {"type": 'str'}, + "new_baseline_name": {"type": 'str'}, + "catalog_name": {"type": 'str'}, + "downgrade_enabled": {"type": 'bool'}, + "is_64_bit": {"type": 'bool'}, + "device_ids": {"type": 'list', "elements": 'int'}, + "device_service_tags": {"type": 'list', "elements": 'str'}, + "device_group_names": {"type": 'list', "elements": 'str'}, + "job_wait": {"type": 'bool', "default": True}, + "job_wait_timeout": {"type": 'int', "default": 600} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[ + ('device_ids', 'device_service_tags', 'device_group_names'), + ('baseline_name', 'baseline_id') + ], + required_one_of=[('baseline_name', 'baseline_id')], + supports_check_mode=True) + + try: + with RestOME(module.params, req_session=True) as rest_obj: + baseline_list = check_existing_baseline(module, rest_obj) + if module.params.get('state') == 'absent': + if baseline_list: + delete_baseline(module, rest_obj, baseline_list) + module.exit_json(msg=NO_CHANGES_MSG) + else: + if baseline_list: + modify_baseline(module, rest_obj, baseline_list) + else: + if module.params.get('baseline_id'): + module.fail_json(msg=INVALID_BASELINE_ID) + create_baseline(module, rest_obj) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py new file mode 100644 index 00000000..9e138a00 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py @@ -0,0 +1,420 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_firmware_baseline_compliance_info +short_description: Retrieves baseline compliance details on OpenManage Enterprise +version_added: "2.0.0" +description: + - This module allows to retrieve firmware compliance for a list of devices, + or against a specified baseline on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + baseline_name: + description: + - Name of the baseline, for which the device compliance report is generated. + - This option is mandatory for generating baseline based device compliance report. + - I(baseline_name) is mutually exclusive with I(device_ids), I(device_service_tags) and I(device_group_names). + type: str + device_ids: + description: + - A list of unique identifier for device based compliance report. + - Either I(device_ids), I(device_service_tags) or I(device_group_names) + is required to generate device based compliance report. + - I(device_ids) is mutually exclusive with I(device_service_tags), + I(device_group_names) and I(baseline_name). + - Devices without reports are ignored. + type: list + elements: int + device_service_tags: + description: + - A list of service tags for device based compliance report. + - Either I(device_ids), I(device_service_tags) or I(device_group_names) + is required to generate device based compliance report. + - I(device_service_tags) is mutually exclusive with I(device_ids), + I(device_group_names) and I(baseline_name). + - Devices without reports are ignored. + type: list + elements: str + device_group_names: + description: + - A list of group names for device based compliance report. + - Either I(device_ids), I(device_service_tags) or I(device_group_names) + is required to generate device based compliance report. + - I(device_group_names) is mutually exclusive with I(device_ids), + I(device_service_tags) and I(baseline_name). + - Devices without reports are ignored. + type: list + elements: str +requirements: + - "python >= 3.8.6" +author: "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Retrieves device based compliance report for specified device IDs + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_ids: + - 11111 + - 22222 + +- name: Retrieves device based compliance report for specified service Tags + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tags: + - MXL1234 + - MXL4567 + +- name: Retrieves device based compliance report for specified group names + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_group_names: + - "group1" + - "group2" + +- name: Retrieves device compliance report for a specified baseline + dellemc.openmanage.ome_firmware_baseline_compliance_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall baseline compliance report status. + returned: on error + sample: "Failed to fetch the compliance baseline information." +baseline_compliance_info: + type: dict + description: Details of the baseline compliance report. + returned: success + sample: [ + { + "CatalogId": 53, + "ComplianceSummary": { + "ComplianceStatus": "CRITICAL", + "NumberOfCritical": 2, + "NumberOfDowngrade": 0, + "NumberOfNormal": 0, + "NumberOfWarning": 0 + }, + "Description": "", + "DeviceComplianceReports": [ + { + "ComplianceStatus": "CRITICAL", + "ComponentComplianceReports": [ + { + "ComplianceDependencies": [], + "ComplianceStatus": "DOWNGRADE", + "Criticality": "Ok", + "CurrentVersion": "OSC_1.1", + "Id": 1258, + "ImpactAssessment": "", + "Name": "OS COLLECTOR 2.1", + "Path": "FOLDER04118304M/2/Diagnostics_Application_JCCH7_WN64_4.0_A00_01.EXE", + "PrerequisiteInfo": "", + "RebootRequired": false, + "SourceName": "DCIM:INSTALLED#802__OSCollector.Embedded.1", + "TargetIdentifier": "101734", + "UniqueIdentifier": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "UpdateAction": "DOWNGRADE", + "Uri": "http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX", + "Version": "4.0" + }, + { + "ComplianceDependencies": [], + "ComplianceStatus": "CRITICAL", + "Criticality": "Recommended", + "CurrentVersion": "DN02", + "Id": 1259, + "ImpactAssessment": "", + "Name": "TOSHIBA AL14SE 1.8 TB 2.5 12Gb 10K 512n SAS HDD Drive", + "Path": "FOLDER04086111M/1/SAS-Drive_Firmware_VDGFM_WN64_DN03_A00.EXE", + "PrerequisiteInfo": "", + "RebootRequired": true, + "SourceName": "DCIM:INSTALLED#304_C_Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1", + "TargetIdentifier": "103730", + "UniqueIdentifier": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "UpdateAction": "UPGRADE", + "Uri": "http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX", + "Version": "DN03" + } + ], + "DeviceId": 11603, + "DeviceModel": "PowerEdge R630", + "DeviceName": null, + "DeviceTypeId": 1000, + "DeviceTypeName": "CPGCGS", + "FirmwareStatus": "Non-Compliant", + "Id": 194, + "RebootRequired": true, + "ServiceTag": "MXL1234" + } + ], + "DowngradeEnabled": true, + "Id": 53, + "Is64Bit": false, + "LastRun": "2019-09-27 05:08:16.301", + "Name": "baseline1", + "RepositoryId": 43, + "RepositoryName": "catalog2", + "RepositoryType": "CIFS", + "Targets": [ + { + "Id": 11603, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "TaskId": 11710, + "TaskStatusId": 0 + } + ] +error_info: + type: dict + description: Details of http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to retrieve baseline list either because the device ID(s) entered are invalid", + "Resolution": "Make sure the entered device ID(s) are valid and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +base_line_path = "UpdateService/Baselines" +baselines_report_by_device_ids_path = "UpdateService/Actions/UpdateService.GetBaselinesReportByDeviceids" +device_is_list_path = "DeviceService/Devices" +baselines_compliance_report_path = "UpdateService/Baselines({Id})/DeviceComplianceReports" +group_service_path = "GroupService/Groups" +EXIT_MESSAGE = "Unable to retrieve baseline list either because the device ID(s) entered are invalid, " \ + "the ID(s) provided are not associated with a baseline or a group is used as a target for a baseline." +MSG_ID = "CUPD3090" + + +def _get_device_id_from_service_tags(service_tags, rest_obj, module): + """ + Get device ids from device service tag + Returns :dict : device_id to service_tag map + :arg service_tags: service tag + :arg rest_obj: RestOME class object in case of request with session. + :returns: dict eg: {1345:"MXL1245"} + """ + try: + resp = rest_obj.get_all_report_details("DeviceService/Devices") + devices_list = resp["report_list"] + if devices_list: + service_tag_dict = {} + for item in devices_list: + if item["DeviceServiceTag"] in service_tags: + service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]}) + return service_tag_dict + else: + module.exit_json(msg="Unable to fetch the device information.", baseline_compliance_info=[]) + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def get_device_ids_from_group_ids(module, grou_id_list, rest_obj): + try: + device_id_list = [] + for group_id in grou_id_list: + group_id_path = group_service_path + "({group_id})/Devices".format(group_id=group_id) + resp_val = rest_obj.get_all_items_with_pagination(group_id_path) + grp_list_value = resp_val["value"] + if grp_list_value: + for device_item in grp_list_value: + device_id_list.append(device_item["Id"]) + if len(device_id_list) == 0: + module.exit_json(msg="Unable to fetch the device ids from specified device_group_names.", + baseline_compliance_info=[]) + return device_id_list + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def get_device_ids_from_group_names(module, rest_obj): + try: + grp_name_list = module.params.get("device_group_names") + resp = rest_obj.get_all_report_details(group_service_path) + group_id_list = [] + grp_list_resp = resp["report_list"] + if grp_list_resp: + for name in grp_name_list: + for group in grp_list_resp: + if group["Name"] == name: + group_id_list.append(group['Id']) + break + else: + module.exit_json(msg="Unable to fetch the specified device_group_names.", + baseline_compliance_info=[]) + return get_device_ids_from_group_ids(module, group_id_list, rest_obj) + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def get_identifiers(rest_obj, module): + if module.params.get("device_ids") is not None: + return module.params.get("device_ids"), "device_ids" + elif module.params.get("device_group_names") is not None: + return get_device_ids_from_group_names(module, rest_obj), "device_group_names" + else: + service_tags = module.params.get("device_service_tags") + service_tags_mapper = _get_device_id_from_service_tags(service_tags, rest_obj, module) + return list(service_tags_mapper.keys()), "device_service_tags" + + +def get_baseline_id_from_name(rest_obj, module): + try: + baseline_name = module.params.get("baseline_name") + baseline_id = 0 + if baseline_name is not None: + resp_val = rest_obj.get_all_items_with_pagination(base_line_path) + baseline_list = resp_val["value"] + if baseline_list: + for baseline in baseline_list: + if baseline["Name"] == baseline_name: + baseline_id = baseline["Id"] + break + else: + module.exit_json(msg="Specified baseline_name does not exist in the system.", + baseline_compliance_info=[]) + else: + module.exit_json(msg="No baseline exists in the system.", baseline_compliance_info=[]) + else: + module.fail_json(msg="baseline_name is a mandatory option.") + return baseline_id + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def get_baselines_report_by_device_ids(rest_obj, module): + try: + device_ids, identifier = get_identifiers(rest_obj, module) + if device_ids or identifier == "device_ids": + resp = rest_obj.invoke_request('POST', baselines_report_by_device_ids_path, data={"Ids": device_ids}) + return resp.json_data + else: + identifier_map = { + "device_group_names": "Device details not available as the group name(s) provided are invalid.", + "device_service_tags": "Device details not available as the service tag(s) provided are invalid." + } + message = identifier_map[identifier] + module.exit_json(msg=message) + except HTTPError as err: + err_message = json.load(err) + err_list = err_message.get('error', {}).get('@Message.ExtendedInfo', [{"Message": EXIT_MESSAGE}]) + if err_list: + err_reason = err_list[0].get("Message", EXIT_MESSAGE) + if MSG_ID in err_list[0].get('MessageId'): + module.exit_json(msg=err_reason) + raise err + except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def get_baseline_compliance_reports(rest_obj, module): + try: + baseline_id = get_baseline_id_from_name(rest_obj, module) + path = baselines_compliance_report_path.format(Id=baseline_id) + resp_val = rest_obj.get_all_items_with_pagination(path) + resp_data = resp_val["value"] + return resp_data + except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def validate_inputs(module): + module_params = module.params + device_service_tags = module_params.get("device_service_tags") + device_group_names = module_params.get("device_group_names") + device_ids = module_params.get("device_ids") + baseline_name = module_params.get("baseline_name") + if all(not identifer for identifer in [device_ids, device_service_tags, device_group_names, baseline_name]): + module.fail_json(msg="one of the following is required: device_ids, device_service_tags, " + "device_group_names, baseline_name to generate device based compliance report.") + + +def main(): + specs = { + "baseline_name": {"type": 'str', "required": False}, + "device_service_tags": {"required": False, "type": "list", "elements": 'str'}, + "device_ids": {"required": False, "type": "list", "elements": 'int'}, + "device_group_names": {"required": False, "type": "list", "elements": 'str'}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[['baseline_name', 'device_service_tags', 'device_ids', 'device_group_names']], + required_one_of=[['device_ids', 'device_service_tags', 'device_group_names', 'baseline_name']], + supports_check_mode=True + ) + try: + validate_inputs(module) + with RestOME(module.params, req_session=True) as rest_obj: + baseline_name = module.params.get("baseline_name") + if baseline_name is not None: + data = get_baseline_compliance_reports(rest_obj, module) + else: + data = get_baselines_report_by_device_ids(rest_obj, module) + if data: + module.exit_json(baseline_compliance_info=data) + else: + module.exit_json(msg="Unable to fetch the compliance baseline information.") + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py new file mode 100644 index 00000000..a9835916 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_firmware_baseline_info +short_description: Retrieves baseline details from OpenManage Enterprise +version_added: "2.0.0" +description: + - This module retrieves the list and details of all the baselines on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + baseline_name: + description: Name of the baseline.If I(baseline_name) is not provided, + all the available firmware baselines are returned. + type: str +requirements: + - "python >= 3.8.6" +author: "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Retrieve details of all the available firmware baselines + dellemc.openmanage.ome_firmware_baseline_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + +- name: Retrieve details of a specific firmware baseline identified by its baseline name + dellemc.openmanage.ome_firmware_baseline_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + baseline_name: "baseline_name" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall baseline information. + returned: on error + sample: "Successfully fetched firmware baseline information." +baseline_info: + type: dict + description: Details of the baselines. + returned: success + sample: { + "@odata.id": "/api/UpdateService/Baselines(239)", + "@odata.type": "#UpdateService.Baselines", + "CatalogId": 22, + "ComplianceSummary": { + "ComplianceStatus": "CRITICAL", + "NumberOfCritical": 1, + "NumberOfDowngrade": 0, + "NumberOfNormal": 0, + "NumberOfWarning": 0 + }, + "Description": "baseline_description", + "DeviceComplianceReports@odata.navigationLink": "/api/UpdateService/Baselines(239)/DeviceComplianceReports", + "DowngradeEnabled": true, + "Id": 239, + "Is64Bit": true, + "LastRun": "2020-05-22 16:42:40.307", + "Name": "baseline_name", + "RepositoryId": 12, + "RepositoryName": "HTTP DELL", + "RepositoryType": "DELL_ONLINE", + "Targets": [ + { + "Id": 10342, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "TaskId": 41415, + "TaskStatusId": 2060 + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def get_specific_baseline(module, baseline_name, resp_data): + """Get specific baseline.""" + baseline = None + for each in resp_data["value"]: + if each['Name'] == baseline_name: + baseline = each + break + else: + module.exit_json(msg="Unable to complete the operation because the requested baseline" + " with name '{0}' does not exist.".format(baseline_name), baseline_info=[]) + return baseline + + +def main(): + specs = { + "baseline_name": {"type": 'str', "required": False}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=False) as rest_obj: + baseline_name = module.params.get("baseline_name") + resp = rest_obj.invoke_request('GET', "UpdateService/Baselines") + data = resp.json_data + if len(data["value"]) == 0 and not baseline_name: + module.exit_json(msg="No baselines present.", baseline_info=[]) + if baseline_name is not None: + data = get_specific_baseline(module, baseline_name, data) + module.exit_json(msg="Successfully fetched firmware baseline information.", baseline_info=data) + except HTTPError as err: + if err.getcode() == 404: + module.fail_json(msg="404 Not Found.The requested resource is not available.") + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py new file mode 100644 index 00000000..29b7ed90 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py @@ -0,0 +1,644 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_firmware_catalog +short_description: Create, modify, or delete a firmware catalog on OpenManage Enterprise or OpenManage Enterprise Modular +version_added: "2.0.0" +description: This module allows to create, modify, or delete a firmware catalog on OpenManage Enterprise or OpenManage Enterprise Modular. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + state: + description: + - C(present) creates or modifies a catalog. + - C(absent) deletes an existing catalog. + choices: [present, absent] + default: present + type: str + version_added: 3.4.0 + catalog_name: + type: list + elements: str + description: + - Name of the firmware catalog to be created. + - This option is mutually exclusive with I(catalog_id). + - Provide the list of firmware catalog names that are supported when I(state) is C(absent). + new_catalog_name: + type: str + description: + - New name of the firmware catalog. + version_added: 3.4.0 + catalog_id: + type: list + elements: int + description: + - ID of the catalog. + - This option is mutually exclusive with I(catalog_name). + - Provide the list of firmware catalog IDs that are supported when I(state) is C(absent). + version_added: 3.4.0 + catalog_description: + type: str + description: + - Description for the catalog. + source: + type: str + description: + - The IP address of the system where the firmware catalog is stored on the local network. + - By default, this option is set to downloads.dell.com when I(repository_type) is C(DELL_ONLINE). + source_path: + type: str + description: + - Specify the complete path of the catalog file location without the file name. + - This is option ignored when I(repository_type) is C(DELL_ONLINE). + file_name: + type: str + description: + - Catalog file name associated with the I(source_path). + - This option is ignored when I(repository_type) is C(DELL_ONLINE). + repository_type: + type: str + description: + - Type of repository. The supported types are NFS, CIFS, HTTP, HTTPS,and DELL_ONLINE. + choices: ["NFS", "CIFS", "HTTP", "HTTPS", "DELL_ONLINE"] + repository_username: + type: str + description: + - User name of the repository where the catalog is stored. + - This option is mandatory when I(repository_type) is CIFS. + - This option is ignored when I(repository_type) is C(DELL_ONLINE). + repository_password: + type: str + description: + - Password to access the repository. + - This option is mandatory when I(repository_type) is CIFS. + - This option is ignored when I(repository_type) is C(DELL_ONLINE). + - C(NOTE) The module always reports the changed status, when this is provided. + repository_domain: + type: str + description: + - Domain name of the repository. + - This option is ignored when I(repository_type) is C(DELL_ONLINE). + check_certificate: + type: bool + description: + - The certificate warnings are ignored when I(repository_type) is HTTPS. If C(True). If not, certificate warnings + are not ignored. + default: False + job_wait: + description: + - Provides the option to wait for job completion. + - This option is applicable when I(state) is C(present). + type: bool + default: true + version_added: 3.4.0 + job_wait_timeout: + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 600 + version_added: 3.4.0 +requirements: + - "python >= 3.8.6" +author: + - "Sajna Shetty(@Sajna-Shetty)" + - "Jagadeesh N V(@jagadeeshnv)" +notes: + - If I(repository_password) is provided, then the module always reports the changed status. + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create a catalog from HTTPS repository + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "HTTPS" + source: "downloads.dell.com" + source_path: "catalog" + file_name: "catalog.gz" + check_certificate: True + +- name: Create a catalog from HTTP repository + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "HTTP" + source: "downloads.dell.com" + source_path: "catalog" + file_name: "catalog.gz" + +- name: Create a catalog using CIFS share + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "CIFS" + source: "192.167.0.1" + source_path: "cifs/R940" + file_name: "catalog.gz" + repository_username: "repository_username" + repository_password: "repository_password" + repository_domain: "repository_domain" + +- name: Create a catalog using NFS share + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "NFS" + source: "192.166.0.2" + source_path: "/nfs/R940" + file_name: "catalog.xml" + +- name: Create a catalog using repository from Dell.com + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "catalog_description" + repository_type: "DELL_ONLINE" + check_certificate: True + +- name: Modify a catalog using a repository from CIFS share + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_name: "catalog_name" + catalog_description: "new catalog_description" + repository_type: "CIFS" + source: "192.167.0.2" + source_path: "cifs/R941" + file_name: "catalog1.gz" + repository_username: "repository_username" + repository_password: "repository_password" + repository_domain: "repository_domain" + +- name: Modify a catalog using a repository from Dell.com + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + catalog_id: 10 + new_catalog_name: "new_catalog_name" + repository_type: "DELL_ONLINE" + catalog_description: "catalog_description" + +- name: Delete catalog using catalog name + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + catalog_name: ["catalog_name1", "catalog_name2"] + +- name: Delete catalog using catalog id + dellemc.openmanage.ome_firmware_catalog: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + catalog_id: [11, 34] +''' + +RETURN = r''' +--- +msg: + description: Overall status of the firmware catalog operation. + returned: always + type: str + sample: "Successfully triggered the job to create a catalog with Task ID : 10094" +catalog_status: + description: Details of the catalog operation. + returned: When I(state) is C(present) + type: dict + sample: { + "AssociatedBaselines": [], + "BaseLocation": null, + "BundlesCount": 0, + "Filename": "catalog.gz", + "Id": 0, + "LastUpdated": null, + "ManifestIdentifier": null, + "ManifestVersion": null, + "NextUpdate": null, + "PredecessorIdentifier": null, + "ReleaseDate": null, + "ReleaseIdentifier": null, + "Repository": { + "CheckCertificate": true, + "Description": "HTTPS Desc", + "DomainName": null, + "Id": null, + "Name": "catalog4", + "Password": null, + "RepositoryType": "HTTPS", + "Source": "company.com", + "Username": null + }, + "Schedule": null, + "SourcePath": "catalog", + "Status": null, + "TaskId": 10094 + } +job_id: + description: Job ID of the catalog task. + returned: When catalog job is in a running state + type: int + sample: 10123 +catalog_id: + description: IDs of the deleted catalog. + returned: When I(state) is C(absent) + type: int + sample: 10123 +associated_baselines: + description: IDs of the baselines associated with catalog. + returned: When I(state) is C(absent) + type: list + elements: dict + sample: [ + { + "BaselineId": 24, + "BaselineName": "new" + }, + { + "BaselineId": 25, + "BaselineName": "c7" + }, + { + "BaselineId": 27, + "BaselineName": "c4" + } + ] +error_info: + type: dict + description: Details of the http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to create or update the catalog because a + repository with the same name already exists.", + "Resolution": "Enter a different name and retry the operation.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } + +''' + +JOB_URI = "JobService/Jobs({TaskId})" +BASELINE_URI = "UpdateService/Baselines" +CATALOG_URI = "UpdateService/Catalogs" +CATALOG_URI_ID = "UpdateService/Catalogs({Id})" +DELETE_CATALOG_URI = "UpdateService/Actions/UpdateService.RemoveCatalogs" +CATALOG_JOB_RUNNING = "Catalog job '{name}' with ID {id} is running.Retry after job completion." +CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied." +CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No changes found to be applied." +INVALID_CATALOG_ID = "Invalid catalog ID provided." +CATALOG_DEL_SUCCESS = "Successfully deleted the firmware catalog(s)." +CATALOG_BASELINE_ATTACHED = "Unable to delete the catalog as it is with baseline(s)." +CATALOG_EXISTS = "The catalog with the name '{new_name}' already exists in the system." +DELL_ONLINE_EXISTS = "Catalog with 'DELL_ONLINE' repository already exists with the name '{catalog_name}'." +NAMES_ERROR = "Only delete operations accept multiple catalog names or IDs." +CATALOG_ID_NOT_FOUND = "Catalog with ID '{catalog_id}' not found." +CATALOG_NAME_NOT_FOUND = "Catalog '{catalog_name}' not found." +CATALOG_UPDATED = "Successfully {operation} the firmware catalog." +JOB_POLL_INTERVAL = 10 +SETTLING_TIME = 3 + +import json +import time +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def check_existing_catalog(module, rest_obj, state, name=None): + catalog_cfgs = [] + if name: + catalog_id = None + catalog_name = [name] + else: + catalog_id = module.params.get("catalog_id") + catalog_name = module.params.get("catalog_name") + resp = rest_obj.get_all_items_with_pagination(CATALOG_URI) + catalogs_detail = resp.get("value") + all_catalog = {} + if state == "present": + all_catalog = dict( + [(each_catalog["Repository"]["Name"], each_catalog["Repository"]["RepositoryType"]) for each_catalog in + catalogs_detail]) + for each_catalog in catalogs_detail: + if catalog_name: + if each_catalog['Repository']['Name'] in catalog_name: + catalog_cfgs.append(each_catalog) + if state == "present": + break + continue + if catalog_id: + if each_catalog['Id'] in catalog_id: + catalog_cfgs.append(each_catalog) + if state == "present": + break + continue + return catalog_cfgs, all_catalog + + +def get_updated_catalog_info(module, rest_obj, catalog_resp): + try: + catalog, all_catalog = check_existing_catalog(module, rest_obj, "present", name=catalog_resp["Repository"]["Name"]) + except Exception: + catalog = catalog_resp + return catalog[0] + + +def exit_catalog(module, rest_obj, catalog_resp, operation, msg): + if module.params.get("job_wait"): + job_failed, job_message = rest_obj.job_tracking( + catalog_resp.get('TaskId'), job_wait_sec=module.params["job_wait_timeout"], sleep_time=JOB_POLL_INTERVAL) + catalog = get_updated_catalog_info(module, rest_obj, catalog_resp) + if job_failed is True: + module.fail_json(msg=job_message, catalog_status=catalog) + catalog_resp = catalog + msg = CATALOG_UPDATED.format(operation=operation) + time.sleep(SETTLING_TIME) + catalog = get_updated_catalog_info(module, rest_obj, catalog_resp) + module.exit_json(msg=msg, catalog_status=catalog, changed=True) + + +def _get_catalog_payload(params, name): + catalog_payload = {} + repository_type = params.get("repository_type") + if params.get("file_name") is not None: + catalog_payload["Filename"] = params["file_name"] + if params.get("source_path") is not None: + catalog_payload["SourcePath"] = params["source_path"] + repository_dict = { + "Name": name, + "Description": params.get("catalog_description"), + "RepositoryType": repository_type, + "Source": params.get("source"), + "CheckCertificate": params.get("check_certificate"), + } + if repository_type != "DELL_ONLINE": + repository_dict.update({"DomainName": params.get("repository_domain"), + "Username": params.get("repository_username"), + "Password": params.get("repository_password") + }) + if repository_type == "DELL_ONLINE" and not params.get("source"): + repository_dict["Source"] = "downloads.dell.com" + repository_payload = dict([(k, v) for k, v in repository_dict.items() if v is not None]) + if repository_payload: + catalog_payload["Repository"] = repository_payload + return catalog_payload + + +def validate_dell_online(all_catalog, module): + """ + only one dell_online repository type catalog creation is possible from ome + """ + catalog_name = module.params["catalog_name"][0] + for name, repo_type in all_catalog.items(): + if repo_type == "DELL_ONLINE" and name != catalog_name: + module.fail_json( + msg=DELL_ONLINE_EXISTS.format( + catalog_name=name)) + + +def create_catalog(module, rest_obj): + if module.check_mode: + module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True) + payload = _get_catalog_payload(module.params, module.params.get("catalog_name")[0]) + resp = rest_obj.invoke_request("POST", CATALOG_URI, data=payload) + resp_data = resp.json_data + job_id = resp_data.get("TaskId") + msg = "Successfully triggered the job to create a catalog with Task Id : {0}".format(job_id) + exit_catalog(module, rest_obj, resp_data, "created", msg) + + +def get_current_catalog_settings(current_payload): + catalog_payload = {} + if current_payload.get("Filename") is not None: + catalog_payload["Filename"] = current_payload["Filename"] + if current_payload.get("SourcePath") is not None: + catalog_payload["SourcePath"] = current_payload["SourcePath"] + repository_dict = { + "Name": current_payload["Repository"].get("Name"), + "Id": current_payload["Repository"].get("Id"), + "Description": current_payload["Repository"].get("Description"), + "RepositoryType": current_payload["Repository"].get("RepositoryType"), + "Source": current_payload["Repository"].get("Source"), + "DomainName": current_payload["Repository"].get("DomainName"), + "Username": current_payload["Repository"].get("Username"), + "Password": current_payload["Repository"].get("Password"), + "CheckCertificate": current_payload["Repository"].get("CheckCertificate"), + } + repository_payload = dict([(k, v) for k, v in repository_dict.items() if v is not None]) + if repository_payload: + catalog_payload["Repository"] = repository_payload + return catalog_payload + + +def compare_payloads(modify_payload, current_payload): + """ + :param modify_payload: payload created to update existing setting + :param current_payload: already existing payload for specified baseline + :return: bool - compare existing and requested setting values of baseline in case of modify operations + if both are same return True + """ + diff = False + for key, val in modify_payload.items(): + if current_payload is None or current_payload.get(key) is None: + return True + elif isinstance(val, dict): + if compare_payloads(val, current_payload.get(key)): + return True + elif val != current_payload.get(key): + return True + return diff + + +def modify_catalog(module, rest_obj, catalog_list, all_catalog): + params = module.params + catalog_id = catalog_list[0]["Id"] + name = catalog_list[0]["Repository"]["Name"] + modify_payload = _get_catalog_payload(module.params, name) + new_catalog_name = params.get("new_catalog_name") + if new_catalog_name: + if new_catalog_name != name and new_catalog_name in all_catalog: + module.fail_json(msg=CATALOG_EXISTS.format(new_name=new_catalog_name)) + modify_payload["Repository"]["Name"] = new_catalog_name + catalog_payload = get_current_catalog_settings(catalog_list[0]) + if modify_payload.get("Repository") and \ + modify_payload.get("Repository").get("RepositoryType") and \ + modify_payload.get("Repository").get("RepositoryType") != catalog_payload["Repository"]["RepositoryType"]: + module.fail_json(msg="Repository type cannot be changed to another repository type.") + new_catalog_current_setting = catalog_payload.copy() + repo_id = new_catalog_current_setting["Repository"]["Id"] + del new_catalog_current_setting["Repository"]["Id"] + diff = compare_payloads(modify_payload, new_catalog_current_setting) + if module.check_mode and diff: + module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True) + if not diff: + module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False) + new_catalog_current_setting["Repository"].update(modify_payload["Repository"]) + catalog_payload.update(modify_payload) + catalog_payload["Repository"] = new_catalog_current_setting["Repository"] + catalog_payload["Repository"]["Id"] = repo_id + catalog_payload["Id"] = catalog_id + catalog_put_uri = CATALOG_URI_ID.format(Id=catalog_id) + resp = rest_obj.invoke_request('PUT', catalog_put_uri, data=catalog_payload) + resp_data = resp.json_data + job_id = resp_data.get("TaskId") + msg = "Successfully triggered the job to update a catalog with Task Id : {0}".format(job_id) + exit_catalog(module, rest_obj, resp_data, "modified", msg) + + +def validate_delete_operation(rest_obj, module, catalog_list, delete_ids): + associated_baselines = [] + for catalog in catalog_list: + if catalog.get('AssociatedBaselines'): + associated_baselines.append({"catalog_id": catalog["Id"], + "associated_baselines": catalog.get("AssociatedBaselines")}) + if catalog.get('Status') != "Completed": + resp = rest_obj.invoke_request("GET", JOB_URI.format(TaskId=catalog['TaskId'])) + job_data = resp.json_data + if job_data['LastRunStatus']['Id'] == 2050: + module.fail_json(msg=CATALOG_JOB_RUNNING.format(name=catalog["Name"], id=catalog["Id"]), + job_id=catalog['TaskId']) + if associated_baselines: + module.fail_json(msg=CATALOG_BASELINE_ATTACHED, associated_baselines=associated_baselines) + if module.check_mode and len(catalog_list) > 0: + module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True, catalog_id=delete_ids) + if len(catalog_list) == 0: + module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False) + + +def delete_catalog(module, rest_obj, catalog_list): + delete_ids = [d["Id"] for d in catalog_list] + validate_delete_operation(rest_obj, module, catalog_list, delete_ids) + delete_payload = {"CatalogIds": delete_ids} + rest_obj.invoke_request('POST', DELETE_CATALOG_URI, data=delete_payload) + module.exit_json(msg=CATALOG_DEL_SUCCESS, changed=True, catalog_id=delete_ids) + + +def validate_names(state, module): + """ + The state present doest not supports more than one name/id + """ + catalog_name = module.params.get("catalog_name", []) + catalog_id = module.params.get("catalog_id", []) + if state != "absent" and ((catalog_name and len(catalog_name) > 1) or (catalog_id and len(catalog_id) > 1)): + module.fail_json(msg=NAMES_ERROR) + + +def perform_present_action(module, rest_obj, requested_catalog_list, all_catalog): + if requested_catalog_list: + modify_catalog(module, rest_obj, requested_catalog_list, all_catalog) + else: + if module.params.get('catalog_id'): + module.fail_json(msg=INVALID_CATALOG_ID) + repository_type = module.params.get("repository_type") + if repository_type and repository_type == "DELL_ONLINE": + validate_dell_online(all_catalog, module) + create_catalog(module, rest_obj) + + +def main(): + specs = { + "state": {"default": "present", "choices": ['present', 'absent']}, + "catalog_name": {"type": 'list', "elements": 'str'}, + "new_catalog_name": {"type": 'str'}, + "catalog_id": {"type": 'list', "elements": 'int'}, + "catalog_description": {"required": False, "type": 'str'}, + "source": {"required": False, "type": 'str'}, + "source_path": {"required": False, "type": 'str'}, + "file_name": {"required": False, "type": 'str'}, + "repository_type": {"required": False, + "choices": ["NFS", "CIFS", "HTTP", "HTTPS", "DELL_ONLINE"]}, + "repository_username": {"required": False, "type": 'str'}, + "repository_password": {"required": False, "type": 'str', "no_log": True}, + "repository_domain": {"required": False, "type": 'str'}, + "check_certificate": {"required": False, "type": 'bool', "default": False}, + "job_wait": {"type": 'bool', "default": True}, + "job_wait_timeout": {"type": 'int', "default": 600} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ['state', 'present', + ['repository_type'], False], + ['state', 'present', + ['new_catalog_name', 'catalog_description', 'catalog_name', 'catalog_id', 'source', 'source_path', + 'file_name', 'repository_type', 'repository_username', 'repository_password', + 'repository_domain', 'check_certificate'], True], + ], + mutually_exclusive=[('catalog_name', 'catalog_id')], + required_one_of=[('catalog_name', 'catalog_id')], + supports_check_mode=True) + + try: + with RestOME(module.params, req_session=True) as rest_obj: + state = module.params['state'] + validate_names(state, module) + requested_catalog_list, all_catalog = check_existing_catalog(module, rest_obj, state) + if state == 'absent': + delete_catalog(module, rest_obj, requested_catalog_list) + else: + perform_present_action(module, rest_obj, requested_catalog_list, all_catalog) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py new file mode 100644 index 00000000..411a6221 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py @@ -0,0 +1,452 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_groups +short_description: Manages static device groups on OpenManage Enterprise +description: This module allows to create, modify, and delete static device groups on OpenManage Enterprise. +version_added: "3.5.0" +author: + - Jagadeesh N V(@jagadeeshnv) +extends_documentation_fragment: + - dellemc.openmanage.oment_auth_options +options: + state: + type: str + description: + - C(present) allows to create or modify a device group. + - C(absent) allows to delete a device group. + choices: [present, absent] + default: present + name: + type: list + elements: str + description: + - Name of the device group to be created, modified, or deleted. + - If I(state) is absent, multiple names can be provided. + - This option is case insensitive. + - This option is mutually exclusive with I(group_id). + group_id: + type: list + elements: int + description: + - ID of the device group to be created, modified, or deleted. + - If I(state) is absent, multiple IDs can be provided. + - This option is mutually exclusive with I(name). + new_name: + type: str + description: + - New name for the existing device group. + - This is applicable only when I(state) is C(present). + description: + type: str + description: + - Description for the device group. + - This is applicable only when I(state) is C(present). + parent_group_name: + type: str + default: "Static Groups" + description: + - Name of the parent device group under which the device group to be created or modified. + - This is applicable only when I(state) is C(present). + - C(NOTE) If device group with such a name does not exist, device group with I(parent_group_name) is created. + - This option is case insensitive. + - This option is mutually exclusive with I(parent_group_id). + parent_group_id: + type: int + description: + - ID of the parent device group under which the device group to be created or modified. + - This is applicable only when I(state) is C(present). + - This option is mutually exclusive with I(parent_group_name). +requirements: + - "python >= 3.8.6" +notes: + - This module manages only static device groups on Dell EMC OpenManage Enterprise. + - If a device group with the name I(parent_group_name) does not exist, a new device group with the same name is created. + - Make sure the entered parent group is not the descendant of the provided group. + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Create a new device group + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "group 1" + description: "Group 1 description" + parent_group_name: "group parent 1" + +- name: Modify a device group using the group ID + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + group_id: 1234 + description: "Group description updated" + parent_group_name: "group parent 2" + +- name: Delete a device group using the device group name + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + name: "group 1" + +- name: Delete multiple device groups using the group IDs + dellemc.openmanage.ome_groups: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: absent + group_id: + - 1234 + - 5678 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the device group operation. + returned: always + sample: "Successfully deleted the device group(s)." +group_status: + description: Details of the device group operation status. + returned: success + type: dict + sample: { + "Description": "my group description", + "Id": 12123, + "MembershipTypeId": 12, + "Name": "group 1", + "ParentId": 12345, + "TypeId": 3000, + "IdOwner": 30, + "CreatedBy": "admin", + "CreationTime": "2021-01-01 10:10:10.100", + "DefinitionDescription": "UserDefined", + "DefinitionId": 400, + "GlobalStatus": 5000, + "HasAttributes": false, + "UpdatedBy": "", + "UpdatedTime": "2021-01-01 11:11:10.100", + "Visible": true + } +group_ids: + type: list + elements: int + description: List of the deleted device group IDs. + returned: when I(state) is C(absent) + sample: [1234, 5678] +invalid_groups: + type: list + elements: str + description: List of the invalid device group IDs or names. + returned: when I(state) is C(absent) + sample: [1234, 5678] +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CGRP9013", + "RelatedProperties": [], + "Message": "Unable to update group 12345 with the provided parent 54321 because a group/parent + relationship already exists.", + "MessageArgs": [ + "12345", + "54321" + ], + "Severity": "Warning", + "Resolution": "Make sure the entered parent ID does not create a bidirectional relationship and retry + the operation." + } + ] + } +} +""" + +import json +import time +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params + +GROUP_URI = "GroupService/Groups" +OP_URI = "GroupService/Actions/GroupService.{op}Group" +# GROUPS_HIERARCHY = "GroupService/AllGroupsHierarchy" +MULTIPLE_GROUPS_MSG = "Provide only one unique device group when state is present." +NONEXIST_GROUP_ID = "A device group with the provided ID does not exist." +NONEXIST_PARENT_ID = "A parent device group with the provided ID does not exist." +INVALID_PARENT = "The provided parent device group is not a valid user-defined static device group." +INVALID_GROUPS_DELETE = "Provide valid static device group(s) for deletion." +INVALID_GROUPS_MODIFY = "Provide valid static device group for modification." +PARENT_CREATION_FAILED = "Unable to create a parent device group with the name {pname}." +CREATE_SUCCESS = "Successfully {op}d the device group." +GROUP_PARENT_SAME = "Provided parent and the device group cannot be the same." +GROUP_NAME_EXISTS = "Unable to rename the group because a group with the provided name '{gname}' already exists." +DELETE_SUCCESS = "Successfully deleted the device group(s)." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +STATIC_ROOT = 'Static Groups' +SETTLING_TIME = 2 + + +def get_valid_groups(module, rest_obj, group_arg, group_set): + parent = {} + static_root = {} + group_dict = {} + group_resp = rest_obj.get_all_items_with_pagination(GROUP_URI) + if module.params.get('state') == 'absent': + group_dict = dict([(str(g[group_arg]).lower(), g) for g in group_resp.get("value") + if str(g[group_arg]).lower() in group_set]) + else: + parg = module.params.get('parent_group_id') + if parg: # Checking id first as name has a default value + pkey = 'Id' + else: + pkey = 'Name' + parg = module.params.get('parent_group_name') + count = 0 + for g in group_resp.get("value"): + if str(g[group_arg]).lower() in group_set: + group_dict = g + count = count + 1 + if str(g[pkey]).lower() == str(parg).lower(): + parent = g + count = count + 1 + if g['Name'] == STATIC_ROOT: + static_root = g + count = count + 1 + if count == 3: + break + return group_dict, parent, static_root + + +def is_valid_static_group(grp): + if grp['TypeId'] == 3000 and grp['MembershipTypeId'] == 12: + return True + return False + + +def create_parent(rest_obj, module, static_root): + try: + prt = static_root + payload = {} + payload['MembershipTypeId'] = 12 # Static members + payload['Name'] = module.params.get('parent_group_name') + payload['ParentId'] = prt['Id'] + prt_resp = rest_obj.invoke_request('POST', OP_URI.format(op='Create'), data={"GroupModel": payload}) + return int(prt_resp.json_data) + except Exception: + return static_root['Id'] + + +def get_parent_id(rest_obj, module, parent, static_root): + parent_id = module.params.get("parent_group_id") + if parent_id: # Checking id first as name has a default value + if not parent: + module.fail_json(msg=NONEXIST_PARENT_ID) + if parent['Name'] != STATIC_ROOT: + if not is_valid_static_group(parent): + module.fail_json(msg=INVALID_PARENT) + return parent['Id'] + else: + if parent: + if parent['Name'] != STATIC_ROOT: + if not is_valid_static_group(parent): + module.fail_json(msg=INVALID_PARENT) + return parent['Id'] + else: + if module.check_mode: + return 0 + else: + prtid = create_parent(rest_obj, module, static_root) + time.sleep(SETTLING_TIME) + return prtid + return static_root['Id'] + + +def get_ome_group_by_name(rest_obj, name): + grp = {} + try: + resp = rest_obj.invoke_request("GET", GROUP_URI, query_param={"$filter": "Name eq '{0}'".format(name)}) + group_resp = resp.json_data.get('value') + if group_resp: + grp = group_resp[0] + except Exception: + grp = {} + return grp + + +def get_ome_group_by_id(rest_obj, id): + grp = {} + try: + resp = rest_obj.invoke_request('GET', GROUP_URI + "({0})".format(id)) + grp = resp.json_data + except Exception: + grp = {} + return grp + + +def exit_group_operation(module, rest_obj, payload, operation): + group_resp = rest_obj.invoke_request('POST', OP_URI.format(op=operation), data={"GroupModel": payload}) + cid = int(group_resp.json_data) + time.sleep(SETTLING_TIME) + try: + grp = get_ome_group_by_id(rest_obj, cid) + group = rest_obj.strip_substr_dict(grp) + except Exception: + payload['Id'] = cid + group = payload + module.exit_json(changed=True, msg=CREATE_SUCCESS.format(op=operation.lower()), group_status=group) + + +def create_group(rest_obj, module, parent, static_root): + payload = {} + payload['MembershipTypeId'] = 12 # Static members + mparams = module.params + payload['Name'] = mparams.get('name')[0] + if mparams.get('parent_group_name').lower() == payload['Name'].lower(): + module.fail_json(msg=GROUP_PARENT_SAME) + parent_id = get_parent_id(rest_obj, module, parent, static_root) + payload['ParentId'] = parent_id + if mparams.get('description'): + payload['Description'] = mparams.get('description') + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND) + exit_group_operation(module, rest_obj, payload, 'Create') + + +def modify_group(rest_obj, module, valid_group_dict, parent, static_root): + if not is_valid_static_group(valid_group_dict): + module.fail_json(msg=INVALID_GROUPS_MODIFY) + grp = valid_group_dict + diff = 0 + payload = dict([(k, grp.get(k)) for k in ["Name", "Description", "MembershipTypeId", "ParentId", "Id"]]) + new_name = module.params.get('new_name') + if new_name: + if new_name != payload['Name']: + dup_grp = get_ome_group_by_name(rest_obj, new_name) + if dup_grp: + module.fail_json(msg=GROUP_NAME_EXISTS.format(gname=new_name)) + payload['Name'] = new_name + diff += 1 + desc = module.params.get('description') + if desc: + if desc != payload['Description']: + payload['Description'] = desc + diff += 1 + parent_id = get_parent_id(rest_obj, module, parent, static_root) + if parent_id == payload['Id']: + module.fail_json(msg=GROUP_PARENT_SAME) + if parent_id != payload['ParentId']: + payload['ParentId'] = parent_id + diff += 1 + if diff == 0: + gs = rest_obj.strip_substr_dict(grp) + module.exit_json(msg=NO_CHANGES_MSG, group_status=gs) + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND) + exit_group_operation(module, rest_obj, payload, 'Update') + + +def delete_groups(rest_obj, module, group_set, group_dict): + deletables = [] + invalids = [] + for g in group_set: + grp = group_dict.get(str(g).lower()) + if grp: + if is_valid_static_group(grp): # For Query Groups MembershipTypeId = 24 + deletables.append(grp['Id']) + else: + invalids.append(g) + if invalids: + module.fail_json(msg=INVALID_GROUPS_DELETE, invalid_groups=invalids) + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND, group_ids=deletables) + rest_obj.invoke_request("POST", OP_URI.format(op='Delete'), data={"GroupIds": deletables}) + module.exit_json(changed=True, msg=DELETE_SUCCESS, group_ids=deletables) + + +def main(): + specs = { + "name": {"type": "list", "elements": 'str'}, + "group_id": {"type": "list", "elements": 'int'}, + "state": {"type": "str", "choices": ["present", "absent"], "default": "present"}, + "description": {"type": "str"}, + "new_name": {"type": "str"}, + "parent_group_name": {"type": "str", "default": STATIC_ROOT}, + "parent_group_id": {"type": "int"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ["state", "present", ("new_name", "description", "parent_group_name", "parent_group_id"), True], + ], + mutually_exclusive=[ + ("name", "group_id"), ("parent_group_name", "parent_group_id"), + ], + required_one_of=[("name", "group_id")], + supports_check_mode=True + ) + + try: + if module.params.get('name'): + group_arg = 'Name' + group_set = set(v.lower() for v in module.params.get('name')) + else: + group_arg = 'Id' + group_set = set(str(v).lower() for v in module.params.get('group_id')) + if len(group_set) != 1 and module.params['state'] == 'present': + module.fail_json(msg=MULTIPLE_GROUPS_MSG) + with RestOME(module.params, req_session=True) as rest_obj: + valid_group_dict, parent, static_root = get_valid_groups(module, rest_obj, group_arg, group_set) + if module.params["state"] == "absent": + if valid_group_dict: + delete_groups(rest_obj, module, group_set, valid_group_dict) + module.exit_json(msg=NO_CHANGES_MSG) + else: + if valid_group_dict: + modify_group(rest_obj, module, valid_group_dict, parent, static_root) + elif group_arg == 'Id': + module.fail_json(msg=NONEXIST_GROUP_ID) + create_group(rest_obj, module, parent, static_root) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py new file mode 100644 index 00000000..4906dcf5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py @@ -0,0 +1,603 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_identity_pool +short_description: Manages identity pool settings on OpenManage Enterprise +version_added: "2.1.0" +description: This module allows to create, modify, or delete a single identity pool on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + state: + description: + - C(present) modifies an existing identity pool. If the provided I (pool_name) does not exist, + it creates an identity pool. + - C(absent) deletes an existing identity pool. + type: str + default: present + choices: [present, absent] + pool_name: + type: str + required: True + description: + - This option is mandatory for I(state) when creating, modifying and deleting an identity pool. + new_pool_name: + type: str + description: + - After creating an identity pool, I(pool_name) can be changed to I(new_pool_name). + - This option is ignored when creating an identity pool. + pool_description: + type: str + description: + - Description of the identity pool. + ethernet_settings: + type: dict + description: + - Applicable for creating and modifying an identity pool using Ethernet settings. + - I(starting_mac_address) and I(identity_count) are required to create an identity pool. + suboptions: + starting_mac_address: + description: Starting MAC address of the ethernet setting. + type: str + identity_count: + description: Number of MAC addresses. + type: int + fcoe_settings: + type: dict + description: + - Applicable for creating and modifying an identity pool using FCoE settings. + - I(starting_mac_address) and I(identity_count) are required to create an identity pool. + suboptions: + starting_mac_address: + description: Starting MAC Address of the FCoE setting. + type: str + identity_count: + description: Number of MAC addresses. + type: int + iscsi_settings: + type: dict + description: + - Applicable for creating and modifying an identity pool using ISCSI settings. + - I(starting_mac_address), I(identity_count), I(iqn_prefix), I(ip_range) and I(subnet_mask) are + required to create an identity pool. + suboptions: + starting_mac_address: + description: Starting MAC address of the iSCSI setting.This is required option for iSCSI setting. + type: str + identity_count: + description: Number of MAC addresses. + type: int + initiator_config: + type: dict + description: + - Applicable for creating and modifying an identity pool using iSCSI Initiator settings. + suboptions: + iqn_prefix: + description: IQN prefix addresses. + type: str + initiator_ip_pool_settings: + type: dict + description: + - Applicable for creating and modifying an identity pool using ISCSI Initiator IP pool settings. + suboptions: + ip_range: + description: Range of non-multicast IP addresses. + type: str + subnet_mask: + description: Subnet mask for I(ip_range). + type: str + gateway: + description: IP address of gateway. + type: str + primary_dns_server: + description: IP address of the primary DNS server. + type: str + secondary_dns_server: + description: IP address of the secondary DNS server. + type: str + fc_settings: + type: dict + description: + - Applicable for creating and modifying an identity pool using fibre channel(FC) settings. + - This option allows OpenManage Enterprise to generate a Worldwide port name (WWPN) and Worldwide node name (WWNN) address. + - The value 0x2001 is beginning to the starting address for the generation of a WWPN, and 0x2000 for a WWNN. + - I(starting_address) and I(identity_count) are required to create an identity pool. + suboptions: + starting_address: + description: Starting MAC Address of FC setting.I(starting_address) is required to option to create FC settings. + type: str + identity_count: + description: Number of MAC addresses.I(identity_count) is required to option to create FC settings. + type: int +requirements: + - "python >= 3.8.6" +author: + - "Sajna Shetty(@Sajna-Shetty)" + - "Deepak Joshi(@Dell-Deepak-Joshi))" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create an identity pool using ethernet, FCoE, iSCSI and FC settings + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + pool_name: "pool1" + pool_description: "Identity pool with Ethernet, FCoE, iSCSI and FC settings" + ethernet_settings: + starting_mac_address: "50:50:50:50:50:00" + identity_count: 60 + fcoe_settings: + starting_mac_address: "70:70:70:70:70:00" + identity_count: 75 + iscsi_settings: + starting_mac_address: "60:60:60:60:60:00" + identity_count: 30 + initiator_config: + iqn_prefix: "iqn.myprefix." + initiator_ip_pool_settings: + ip_range: "10.33.0.1-10.33.0.255" + subnet_mask: "255.255.255.0" + gateway: "192.168.4.1" + primary_dns_server : "10.8.8.8" + secondary_dns_server : "8.8.8.8" + fc_settings: + starting_address: "30:30:30:30:30:00" + identity_count: 45 + +- name: Create an identity pool using only ethernet settings + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool2" + pool_description: "create identity pool with ethernet" + ethernet_settings: + starting_mac_address: "aa-bb-cc-dd-ee-aa" + identity_count: 80 + +- name: Modify an identity pool + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool2" + new_pool_name: "pool3" + pool_description: "modifying identity pool with ethernet and fcoe settings" + ethernet_settings: + starting_mac_address: "90-90-90-90-90-90" + identity_count: 61 + fcoe_settings: + starting_mac_address: "aabb.ccdd.5050" + identity_count: 77 + +- name: Modify an identity pool using iSCSI and FC settings + dellemc.openmanage.ome_identity_pool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + pool_name: "pool_new" + new_pool_name: "pool_new2" + pool_description: "modifying identity pool with iscsi and fc settings" + iscsi_settings: + identity_count: 99 + initiator_config: + iqn_prefix: "iqn1.myprefix2." + initiator_ip_pool_settings: + gateway: "192.168.4.5" + fc_settings: + starting_address: "10:10:10:10:10:10" + identity_count: 98 + +- name: Delete an identity pool + dellemc.openmanage.ome_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + pool_name: "pool2" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the identity pool operation. + returned: always + sample: "Successfully created an identity pool." +pool_status: + type: dict + description: Details of the user operation, when I(state) is C(present). + returned: success + sample: { + "Id":29, + "IsSuccessful":True, + "Issues":[] + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "@Message.ExtendedInfo": [{ + "Message": "Unable to process the request because an error occurred: + Ethernet-MAC Range overlap found (in this Identity Pool or in a different one) .", + "MessageArgs": [Ethernet-MAC Range overlap found (in this Identity Pool or in a different one)"], + "MessageId": "CGEN6001", + "RelatedProperties": [], + "Resolution": "Retry the operation. If the issue persists, contact your system administrator.", + "Severity": "Critical" + }], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + }} +''' + +import re +import json +import codecs +import binascii +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +IDENTITY_URI = "IdentityPoolService/IdentityPools" +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." + + +def get_identity_pool_id_by_name(pool_name, rest_obj): + pool_id = 0 + attributes = None + identity_list = rest_obj.get_all_report_details(IDENTITY_URI)["report_list"] + for item in identity_list: + if pool_name == item["Name"]: + pool_id = item["Id"] + attributes = item + break + return pool_id, attributes + + +def mac_validation(mac_input): + match_found = re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$|" + "([0-9a-f]{4}([.])[0-9a-f]{4}([.])[0-9a-f]{4})$", mac_input.lower()) + return match_found + + +def mac_to_base64_conversion(mac_address, module): + try: + if mac_address: + allowed_mac_separators = [':', '-', '.'] + for sep in allowed_mac_separators: + if sep in mac_address: + b64_mac_address = codecs.encode(codecs.decode( + mac_address.replace(sep, ''), 'hex'), 'base64') + address = codecs.decode(b64_mac_address, 'utf-8').rstrip() + return address + except binascii.Error: + module.fail_json(msg='Encoding of MAC address {0} to base64 ' + 'failed'.format(mac_address)) + + +def update_modify_setting(modify_payload, existing_payload, setting_type, sub_keys): + """update current pool sub setting setting to modify payload if not provided + in the options to avoid the null update from ome""" + for sub_key in sub_keys: + if sub_key not in modify_payload[setting_type] and sub_key in existing_payload[setting_type]: + modify_payload[setting_type][sub_key] = existing_payload[setting_type][sub_key] + elif existing_payload[setting_type]: + if modify_payload[setting_type].get(sub_key) and existing_payload[setting_type].get(sub_key): + modify_setting = modify_payload[setting_type][sub_key] + existing_setting_payload = existing_payload[setting_type][sub_key] + diff_item = list(set(existing_setting_payload) - set(modify_setting)) + for key in diff_item: + modify_payload[setting_type][sub_key][key] = existing_setting_payload[key] + + +def get_updated_modify_payload(modify_payload, existing_payload): + """update current pool setting setting to modify payload if not provided + in the options to avoid the null update from ome""" + remove_unwanted_key_list = ['@odata.type', '@odata.id', 'CreatedBy', 'CreationTime', 'LastUpdatedBy', + 'LastUpdateTime', 'UsageCounts', 'UsageIdentitySets@odata.navigationLink'] + [existing_payload.pop(key) for key in remove_unwanted_key_list if key in existing_payload] + for key, val in existing_payload.items(): + if key not in modify_payload: + modify_payload[key] = val + else: + if existing_payload.get(key) and key == "EthernetSettings" or key == "FcoeSettings": + update_modify_setting(modify_payload, existing_payload, key, ["Mac"]) + elif existing_payload.get(key) and key == "FcSettings": + update_modify_setting(modify_payload, existing_payload, key, ["Wwnn", "Wwpn"]) + elif existing_payload.get(key) and key == "IscsiSettings": + update_modify_setting(modify_payload, existing_payload, key, + ["Mac", "InitiatorConfig", "InitiatorIpPoolSettings"]) + modify_payload = dict([(k, v) for k, v in modify_payload.items() if v is not None]) + return modify_payload + + +def update_mac_settings(payload, settings_params, setting_type, module): + """payload update for ethernet and fcoe settings and isci settings + and convert to MAC address to base 64 format""" + mac_address = settings_params.get("starting_mac_address") + mac_base_64_format = None + if mac_address: + match_found = mac_validation(mac_address) + if match_found: + mac_base_64_format = mac_to_base64_conversion(mac_address, module) + else: + module.fail_json(msg="Please provide the valid MAC address format for {0} settings." + .format(setting_type.split('Settings')[0])) + sub_setting_mapper = {"StartingMacAddress": mac_base_64_format, + "IdentityCount": settings_params.get("identity_count")} + sub_settings_payload = dict([(k, v) for k, v in sub_setting_mapper.items() if v is not None]) + if any(sub_settings_payload): + payload.update({setting_type: {"Mac": sub_settings_payload}}) + + +def update_iscsi_specific_settings(payload, settings_params, setting_type): + """payload update for Iscsi specific settings""" + sub_setting_mapper = {} + initiator_config = settings_params.get("initiator_config") + if initiator_config and initiator_config.get("iqn_prefix"): + sub_setting_mapper.update({ + "InitiatorConfig": {"IqnPrefix": initiator_config.get("iqn_prefix")}}) + if settings_params.get("initiator_ip_pool_settings"): + initiator_ip_pool_settings = settings_params["initiator_ip_pool_settings"] + initiator_ip_pool_settings = {"IpRange": initiator_ip_pool_settings.get("ip_range"), + "SubnetMask": initiator_ip_pool_settings.get("subnet_mask"), + "Gateway": initiator_ip_pool_settings.get("gateway"), + "PrimaryDnsServer": initiator_ip_pool_settings.get("primary_dns_server"), + "SecondaryDnsServer": initiator_ip_pool_settings.get("secondary_dns_server")} + initiator_ip_pool_settings = dict([(k, v) for k, v in initiator_ip_pool_settings.items() if v is not None]) + sub_setting_mapper.update({ + "InitiatorIpPoolSettings": initiator_ip_pool_settings}) + if any(sub_setting_mapper): + if "IscsiSettings" in payload: + """update MAC address setting""" + sub_setting_mapper.update(payload[setting_type]) + sub_setting_mapper = dict([(key, val) for key, val in sub_setting_mapper.items() if any(val)]) + payload.update({setting_type: sub_setting_mapper}) + + +def get_wwn_address_prefix(starting_address): + """Prefix wwnn and wwpn MAC address with 20x00 and 20x01 respectively""" + delimiter, wwnn_prefix, wwpn_prefix = None, None, None + if "." in starting_address: + delimiter = "." + elif ":" in starting_address: + delimiter = ":" + elif "-" in starting_address: + delimiter = "-" + length = len(starting_address.split(delimiter)[0]) + if length == 4: + wwnn_prefix = "2000{0}".format(delimiter) + wwpn_prefix = "2001{0}".format(delimiter) + else: + wwnn_prefix = "20{0}00{0}".format(delimiter) + wwpn_prefix = "20{0}01{0}".format(delimiter) + return wwnn_prefix, wwpn_prefix + + +def update_fc_settings(payload, settings_params, setting_type, module): + """payload update for Fibre Channel specific settings + payload: other setting payload + settings_params: fc setting parameters + setting_type: "FcSettings" + """ + sub_setting_mapper = {} + starting_address = settings_params.get("starting_address") + identity_count = settings_params.get("identity_count") + wwnn_payload = {} + wwpn_payload = {} + if starting_address: + if not mac_validation(starting_address): + module.fail_json(msg="Please provide the valid starting address format for FC settings.") + wwnn_prefix, wwpn_prefix = get_wwn_address_prefix(starting_address) + wwnn_address = mac_to_base64_conversion(wwnn_prefix + starting_address, module) + wwpn_address = mac_to_base64_conversion(wwpn_prefix + starting_address, module) + wwnn_payload.update({"StartingAddress": wwnn_address}) + wwpn_payload.update({"StartingAddress": wwpn_address}) + if identity_count is not None: + wwnn_payload.update({"IdentityCount": identity_count}) + wwpn_payload.update({"IdentityCount": identity_count}) + sub_setting_mapper.update({"Wwnn": wwnn_payload, + "Wwpn": wwpn_payload}) + sub_setting_mapper = dict([(key, val) for key, val in sub_setting_mapper.items() if any(val)]) + if any(sub_setting_mapper): + payload.update({setting_type: sub_setting_mapper}) + + +def get_payload(module, pool_id=None): + """create payload for create and modify operations""" + module_params = module.params + setting_payload = { + "Description": module_params.get("pool_description"), + "Name": module_params["pool_name"] + } + fcoe_settings_params = module_params.get("fcoe_settings") + ethernet_settings_params = module_params.get("ethernet_settings") + iscsi_settings_params = module_params.get("iscsi_settings") + fc_settings_params = module_params.get("fc_settings") + if fcoe_settings_params: + update_mac_settings(setting_payload, fcoe_settings_params, "FcoeSettings", module) + if ethernet_settings_params: + update_mac_settings(setting_payload, ethernet_settings_params, "EthernetSettings", module) + if iscsi_settings_params: + update_mac_settings(setting_payload, iscsi_settings_params, "IscsiSettings", module) + update_iscsi_specific_settings(setting_payload, iscsi_settings_params, "IscsiSettings") + if fc_settings_params: + update_fc_settings(setting_payload, fc_settings_params, "FcSettings", module) + if pool_id: + new_name = module_params.get("new_pool_name") + if new_name is not None: + setting_payload.update({"Name": new_name}) + setting_payload["Id"] = pool_id + payload = dict([(k, v) for k, v in setting_payload.items() if v is not None]) + return payload + + +def compare_nested_dict(modify_setting_payload, existing_setting_payload): + """compare existing and requested setting values of identity pool in case of modify operations + if both are same return True""" + for key, val in modify_setting_payload.items(): + if existing_setting_payload is None or existing_setting_payload.get(key) is None: + return False + elif isinstance(val, dict): + if not compare_nested_dict(val, existing_setting_payload.get(key)): + return False + elif val != existing_setting_payload.get(key): + return False + return True + + +def validate_modify_create_payload(setting_payload, module, action): + for key, val in setting_payload.items(): + if key in ["EthernetSettings", "FcoeSettings"] and val: + sub_config = val.get("Mac") + if sub_config is None or not all([sub_config.get("IdentityCount"), sub_config.get("StartingMacAddress")]): + module.fail_json(msg="Both starting MAC address and identity count is required to {0} an" + " identity pool using {1} settings.".format(action, ''.join(key.split('Settings')))) + elif key == "FcSettings" and val: + sub_config = val.get("Wwnn") + if sub_config is None or not all([sub_config.get("IdentityCount"), sub_config.get("StartingAddress")]): + module.fail_json(msg="Both starting MAC address and identity count is required to" + " {0} an identity pool using Fc settings.".format(action)) + elif key == "IscsiSettings" and val: + sub_config1 = val.get("Mac") + sub_config2 = val.get("InitiatorIpPoolSettings") + if sub_config1 is None or not all([sub_config1.get("IdentityCount"), sub_config1.get("StartingMacAddress")]): + module.fail_json(msg="Both starting MAC address and identity count is required to {0} an" + " identity pool using {1} settings.".format(action, ''.join(key.split('Settings')))) + elif sub_config2: + if not all([sub_config2.get("IpRange"), sub_config2.get("SubnetMask")]): + module.fail_json(msg="Both ip range and subnet mask in required to {0} an identity" + " pool using iSCSI settings.".format(action)) + + +def pool_create_modify(module, rest_obj): + pool_name = module.params["pool_name"] + pool_id, existing_payload = get_identity_pool_id_by_name(pool_name, rest_obj) + method = "POST" + uri = IDENTITY_URI + action = "create" + setting_payload = get_payload(module, pool_id) + if pool_id: + action = "modify" + method = "PUT" + uri = uri + "({0})".format(pool_id) + if compare_nested_dict(setting_payload, existing_payload): + module.exit_json(msg=NO_CHANGES_FOUND) + else: + setting_payload = get_updated_modify_payload(setting_payload, existing_payload) + validate_modify_create_payload(setting_payload, module, action) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + resp = rest_obj.invoke_request(method, uri, data=setting_payload) + msg = get_success_message(action, resp.json_data) + return msg + + +def pool_delete(module, rest_obj): + try: + pool_name = module.params["pool_name"] + pool_id, existing_payload = get_identity_pool_id_by_name(pool_name, rest_obj) + if not pool_id: + message = "The identity pool '{0}' is not present in the system.".format(pool_name) + module.exit_json(msg=message) + method = "DELETE" + uri = IDENTITY_URI + "({0})".format(pool_id) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + rest_obj.invoke_request(method, uri) + return {"msg": "Successfully deleted the identity pool."} + except Exception as err: + raise err + + +def get_success_message(action, resp_data): + message = { + "create": "Successfully created an identity pool.", + "modify": "Successfully modified the identity pool." + } + return {"msg": message[action], "result": resp_data} + + +def main(): + settings_options = {"starting_mac_address": {"type": 'str'}, + "identity_count": {"type": 'int'}} + iscsi_specific_settings = {"starting_mac_address": {"type": 'str'}, + "identity_count": {"type": 'int'}, + "initiator_config": {"options": {"iqn_prefix": {"type": 'str'}}, "type": "dict"}, + "initiator_ip_pool_settings": {"options": {"ip_range": {"type": 'str'}, + "subnet_mask": {"type": 'str'}, + "gateway": {"type": 'str'}, + "primary_dns_server": {"type": 'str'}, + "secondary_dns_server": {"type": 'str'}}, + "type": "dict"}} + fc_settings = {"starting_address": {"type": "str"}, "identity_count": {"type": "int"}} + + specs = { + "state": {"type": "str", "required": False, "default": "present", "choices": ['present', 'absent']}, + "pool_name": {"required": True, "type": "str"}, + "new_pool_name": {"required": False, "type": "str"}, + "pool_description": {"required": False, "type": "str"}, + "ethernet_settings": {"required": False, "type": "dict", + "options": settings_options}, + "fcoe_settings": {"required": False, "type": "dict", "options": settings_options}, + "iscsi_settings": {"required": False, "type": "dict", + "options": iscsi_specific_settings}, + "fc_settings": {"required": False, "type": "dict", "options": fc_settings}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + state = module.params["state"] + if state == "present": + message = pool_create_modify(module, rest_obj) + module.exit_json(msg=message["msg"], pool_status=message["result"], changed=True) + else: + message = pool_delete(module, rest_obj) + module.exit_json(msg=message["msg"], changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py new file mode 100644 index 00000000..26b0d545 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_job_info +short_description: Get job details for a given job ID or an entire job queue on OpenMange Enterprise +version_added: "2.0.0" +description: This module retrieves job details for a given job ID or an entire job queue on OpenMange Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + job_id: + description: Unique ID of the job. + type: int + system_query_options: + description: Options for pagination of the output. + type: dict + suboptions: + top: + description: Number of records to return. Default value is 100. + type: int + skip: + description: Number of records to skip. Default value is 0. + type: int + filter: + description: Filter records by the values supported. + type: str +requirements: + - "python >= 3.8.6" +author: "Jagadeesh N V(@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Get all jobs details + dellemc.openmanage.ome_job_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + +- name: Get job details for id + dellemc.openmanage.ome_job_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + job_id: 12345 + +- name: Get filtered job details + dellemc.openmanage.ome_job_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + top: 2 + skip: 1 + filter: "JobType/Id eq 8" + +''' + +RETURN = r''' +--- +msg: + description: Overall status of the job facts operation. + returned: always + type: str + sample: "Successfully fetched the job info" +job_info: + description: Details of the OpenManage Enterprise jobs. + returned: success + type: dict + sample: { + "value": [ + { + "Builtin": false, + "CreatedBy": "system", + "Editable": true, + "EndTime": null, + "Id": 12345, + "JobDescription": "Refresh Inventory for Device", + "JobName": "Refresh Inventory for Device", + "JobStatus": { + "Id": 2080, + "Name": "New" + }, + "JobType": { + "Id": 8, + "Internal": false, + "Name": "Inventory_Task" + }, + "LastRun": "2000-01-29 10:51:34.776", + "LastRunStatus": { + "Id": 2060, + "Name": "Completed" + }, + "NextRun": null, + "Params": [], + "Schedule": "", + "StartTime": null, + "State": "Enabled", + "Targets": [ + { + "Data": "''", + "Id": 123123, + "JobId": 12345, + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "UpdatedBy": null, + "Visible": true + } + ]} +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +JOBS_URI = "JobService/Jobs" + + +def _get_query_parameters(module_params): + """Builds query parameter + :returns: dictionary, which builds the query format + eg : {"$filter": "JobType/Id eq 8"} + """ + system_query_options_param = module_params.get("system_query_options") + query_parameter = {} + if system_query_options_param: + query_parameter = dict([("$" + k, v) for k, v in system_query_options_param.items() if v is not None]) + return query_parameter + + +def main(): + specs = { + "job_id": {"required": False, "type": 'int'}, + "system_query_options": {"required": False, "type": 'dict', "options": { + "top": {"type": 'int', "required": False}, + "skip": {"type": 'int', "required": False}, + "filter": {"type": 'str', "required": False}, + }}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True + ) + + try: + with RestOME(module.params, req_session=True) as rest_obj: + resp_status = [] + if module.params.get("job_id") is not None: + # Fetch specific job + job_id = module.params.get("job_id") + jpath = "{0}({1})".format(JOBS_URI, job_id) + resp = rest_obj.invoke_request('GET', jpath) + job_facts = resp.json_data + resp_status.append(resp.status_code) + else: + # query applicable only for all jobs list fetching + query_param = _get_query_parameters(module.params) + if query_param: + resp = rest_obj.invoke_request('GET', JOBS_URI, query_param=query_param) + job_facts = resp.json_data + resp_status.append(resp.status_code) + else: + # Fetch all jobs, filter and pagination options + job_report = rest_obj.get_all_report_details(JOBS_URI) + job_facts = {"@odata.context": job_report["resp_obj"].json_data["@odata.context"], + "@odata.count": len(job_report["report_list"]), + "value": job_report["report_list"]} + if job_facts["@odata.count"] > 0: + resp_status.append(200) + except HTTPError as httperr: + module.fail_json(msg=str(httperr), job_info=json.load(httperr)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err: + module.fail_json(msg=str(err)) + if 200 in resp_status: + module.exit_json(msg="Successfully fetched the job info", job_info=job_facts) + else: + module.fail_json(msg="Failed to fetch the job info") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py new file mode 100644 index 00000000..08e307c7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py @@ -0,0 +1,283 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_network_port_breakout +short_description: This module allows to automate the port portioning or port breakout to logical sub ports +version_added: "2.1.0" +description: + - This module allows to automate breaking out of IOMs in fabric mode into logical sub ports. + - The port breakout operation is only supported in OpenManage Enterprise Modular. +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + target_port: + required: True + description: "The ID of the port in the switch to breakout. Enter the port ID in the format: service tag:port. + For example, 2HB7NX2:ethernet1/1/13." + type: str + breakout_type: + required: True + description: + - The preferred breakout type. For example, 4X10GE. + - To revoke the default breakout configuration, enter 'HardwareDefault'. + type: str +requirements: + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Port breakout configuration + dellemc.openmanage.ome_network_port_breakout: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + target_port: "2HB7NX2:phy-port1/1/11" + breakout_type: "1X40GE" + +- name: Revoke the default breakout configuration + dellemc.openmanage.ome_network_port_breakout: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + target_port: "2HB7NX2:phy-port1/1/11" + breakout_type: "HardwareDefault" +''' + +RETURN = r''' +--- +msg: + description: Overall status of the port configuration. + returned: always + type: str + sample: Port breakout configuration job submitted successfully. +breakout_status: + description: Details of the OpenManage Enterprise jobs. + returned: success + type: dict + sample: { + "Builtin": false, + "CreatedBy": "root", + "Editable": true, + "EndTime": null, + "Id": 11111, + "JobDescription": "", + "JobName": "Breakout Port", + "JobStatus": {"Id": 1112, "Name": "New"}, + "JobType": {"Id": 3, "Internal": false, "Name": "DeviceAction_Task"}, + "LastRun": null, + "LastRunStatus": {"Id": 1113, "Name": "NotRun"}, + "NextRun": null, + "Params": [ + {"JobId": 11111, "Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}, + {"JobId": 11111, "Key": "interfaceId", "Value": "2HB7NX2:phy-port1/1/11"}, + {"JobId": 11111, "Key": "breakoutType", "Value": "1X40GE"}], + "Schedule": "startnow", + "StartTime": null, + "State": "Enabled", + "Targets": [ + {"Data": "", "Id": 11112, "JobId": 34206, "TargetType": { "Id": 1000, "Name": "DEVICE"}} + ], + "UpdatedBy": null, + "UserGenerated": true, + "Visible": true + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +import re +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +DEVICE_URI = "DeviceService/Devices" +PORT_INFO_URI = "DeviceService/Devices({0})/InventoryDetails('portInformation')" +JOB_URI = "JobService/Jobs" + + +def get_device_id(module, rest_obj): + """ + This function returns device id. + :param module: ansible module arguments. + :param rest_obj: rest object for making requests. + :return: device id + """ + regex = "^[a-z0-9A-Z]+[:][a-z0-9A-Z/-]+$" + target_port = module.params["target_port"] + if re.search(regex, target_port) is None: + module.fail_json(msg="Invalid target port {0}.".format(target_port)) + service_tag = target_port.split(":") + query = "DeviceServiceTag eq '{0}'".format(service_tag[0]) + device_id, failed_msg = None, "Unable to retrieve the device information because" \ + " the device with the entered service tag {0} is not present." + response = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": query}) + if response.status_code == 200 and response.json_data.get("value"): + device_info = response.json_data.get("value")[0] + device_id = device_info["Id"] + else: + module.fail_json(msg=failed_msg.format(service_tag[0])) + return device_id + + +def get_port_information(module, rest_obj, device_id): + """ + This function returns the existing breakout configuration details. + :param module: ansible module arguments. + :param rest_obj: rest object for making requests. + :param device_id: device id + :return: str, {}, str + """ + response = rest_obj.invoke_request("GET", PORT_INFO_URI.format(device_id)) + breakout_config, breakout_capability, target_port = None, None, module.params["target_port"] + for each in response.json_data.get("InventoryInfo"): + if not each["Configuration"] == "NoBreakout" and each["Id"] == target_port: + breakout_capability = each["PortBreakoutCapabilities"] + breakout_config = each["Configuration"] + interface_id = each["Id"] + break + else: + module.fail_json(msg="{0} does not support port breakout" + " or invalid port number entered.".format(target_port)) + return breakout_config, breakout_capability, interface_id + + +def get_breakout_payload(device_id, breakout_type, interface_id): + """ + Payload for breakout configuration. + :param device_id: device id + :param breakout_type: requested breakout type + :param interface_id: port number with service tag + :return: json + """ + payload = { + "Id": 0, "JobName": "Breakout Port", "JobDescription": "", + "Schedule": "startnow", "State": "Enabled", + "JobType": {"Id": 3, "Name": "DeviceAction_Task"}, + "Params": [ + {"Key": "breakoutType", "Value": breakout_type}, + {"Key": "interfaceId", "Value": interface_id}, + {"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}], + "Targets": [ + {"JobId": 0, "Id": device_id, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}} + ]} + return payload + + +def check_mode(module, changes=False): + """ + The check mode function to check whether the changes found or not. + :param module: ansible module arguments + :param changes: boolean to return the appropriate message. + :return: None + """ + if module.check_mode: + message = "Changes found to commit!" if changes else "No changes found to commit!" + module.exit_json(msg=message, changed=changes) + + +def set_breakout(module, rest_obj, breakout_config, breakout_capability, interface_id, device_id): + """ + Configuration the breakout feature for given option. + :param module: ansible module arguments. + :param rest_obj: rest object for making requests. + :param breakout_config: Existing breakout configuration. + :param breakout_capability: Available breakout configuration. + :param interface_id: port number with service tag + :param device_id: device id + :return: rest object + """ + breakout_type, response = module.params["breakout_type"], {} + payload = get_breakout_payload(device_id, breakout_type, interface_id) + if breakout_config == "HardwareDefault" and not breakout_type == "HardwareDefault": + for config in breakout_capability: + if breakout_type == config["Type"]: + check_mode(module, changes=True) + response = rest_obj.invoke_request("POST", JOB_URI, data=payload) + break + else: + supported_type = ", ".join(i["Type"] for i in breakout_capability) + module.fail_json(msg="Invalid breakout type: {0}, supported values are {1}.".format(breakout_type, + supported_type)) + elif not breakout_config == "HardwareDefault" and breakout_type == "HardwareDefault": + check_mode(module, changes=True) + response = rest_obj.invoke_request("POST", JOB_URI, data=payload) + elif breakout_config == breakout_type: + check_mode(module, changes=False) + module.exit_json(msg="The port is already configured with the selected breakout configuration.") + else: + module.fail_json(msg="Device does not support changing a port breakout" + " configuration to different breakout type. Configure the port to" + " HardwareDefault and retry the operation.") + return response + + +def main(): + specs = { + "target_port": {"required": True, "type": 'str'}, + "breakout_type": {"required": True, "type": 'str'}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + device_id = get_device_id(module, rest_obj) + breakout_config, breakout_capability, interface_id = get_port_information(module, rest_obj, device_id) + breakout_status = set_breakout(module, rest_obj, breakout_config, + breakout_capability, interface_id, device_id) + if breakout_status: + module.exit_json(msg="Port breakout configuration job submitted successfully.", + breakout_status=breakout_status.json_data, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (SSLValidationError, ConnectionError, TypeError, ValueError, IndexError, SSLError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py new file mode 100644 index 00000000..90ac7a83 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_network_vlan +short_description: Create, modify & delete a VLAN +version_added: "2.1.0" +description: + - This module allows to, + - Create a VLAN on OpenManage Enterprise. + - Modify or delete an existing VLAN on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + state: + type: str + description: + - C(present) creates a new VLAN or modifies an existing VLAN. + - C(absent) deletes an existing VLAN. + - I(WARNING) Deleting a VLAN can impact the network infrastructure. + choices: [present, absent] + default: present + name: + required: true + type: str + description: Provide the I(name) of the VLAN to be created, deleted or modified. + new_name: + type: str + description: Provide the I(name) of the VLAN to be modified. + description: + type: str + description: Short description of the VLAN to be created or modified. + vlan_minimum: + type: int + description: + - The minimum VLAN value of the range. + vlan_maximum: + type: int + description: + - The maximum VLAN value of the range. + - A single value VLAN is created if the vlan_maximum and vlan_minmum values are the same. + type: + type: str + description: + - Types of supported VLAN networks. + - "For the description of each network type, + use API U(https://I(hostname)/api/NetworkConfigurationService/NetworkTypes)." + choices: ['General Purpose (Bronze)', 'General Purpose (Silver)', 'General Purpose (Gold)', + 'General Purpose (Platinum)', 'Cluster Interconnect', 'Hypervisor Management', + 'Storage - iSCSI', 'Storage - FCoE', 'Storage - Data Replication', + 'VM Migration', 'VMWare FT Logging'] +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V(@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create a VLAN range + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan1" + description: "VLAN desc" + type: "General Purpose (Bronze)" + vlan_minimum: 35 + vlan_maximum: 40 + tags: create_vlan_range + +- name: Create a VLAN with a single value + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan2" + description: "VLAN desc" + type: "General Purpose (Bronze)" + vlan_minimum: 127 + vlan_maximum: 127 + tags: create_vlan_single + +- name: Modify a VLAN + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "vlan1" + new_name: "vlan_gold1" + description: "new description" + type: "General Purpose (Gold)" + vlan_minimum: 45 + vlan_maximum: 50 + tags: modify_vlan + +- name: Delete a VLAN + dellemc.openmanage.ome_network_vlan: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "vlan1" + tags: delete_vlan +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the VLAN operation. + returned: always + sample: "Successfully created the VLAN." +vlan_status: + type: dict + description: Details of the VLAN that is either created or modified. + returned: when I(state=present) + sample: { + "@odata.context": "/api/$metadata#NetworkConfigurationService.Network", + "@odata.type": "#NetworkConfigurationService.Network", + "@odata.id": "/api/NetworkConfigurationService/Networks(1234)", + "Id": 1234, + "Name": "vlan1", + "Description": "VLAN description", + "VlanMaximum": 130, + "VlanMinimum": 140, + "Type": 1, + "CreatedBy": "admin", + "CreationTime": "2020-01-01 05:54:36.113", + "UpdatedBy": null, + "UpdatedTime": "2020-01-01 05:54:36.113", + "InternalRefNWUUId": "6d6effcc-eca4-44bd-be07-1234ab5cd67e" + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CTEM1043", + "RelatedProperties": [], + "Message": "Unable to create or update the network because the entered VLAN minimum 0 + is not within a valid range ( 1 - 4000 or 4021 - 4094 ).", + "MessageArgs": [ + "0", + "1", + "4000", + "4021", + "4094" + ], + "Severity": "Warning", + "Resolution": "Enter a valid VLAN minimum as identified in the message and retry the operation." + } + ] + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +VLAN_CONFIG = "NetworkConfigurationService/Networks" +VLAN_ID_CONFIG = "NetworkConfigurationService/Networks({Id})" +VLAN_TYPES = "NetworkConfigurationService/NetworkTypes" +VLAN_RANGE_OVERLAP = "Unable to create or update the VLAN because the entered range" \ + " overlaps with {vlan_name} with the range {vlan_min}-{vlan_max}." +VLAN_VALUE_MSG = "VLAN-minimum value is greater than VLAN-maximum value." +CHECK_MODE_MSG = "Changes found to be applied." + + +def format_payload(src_dict): + address_payload_map = { + "name": "Name", + "vlan_maximum": "VlanMaximum", + "vlan_minimum": "VlanMinimum", + "type": "Type" + } + if src_dict: + return dict([(address_payload_map[key], val) for key, val in src_dict.items() if key in address_payload_map]) + + +def get_item_id(rest_obj, name, uri): + resp = rest_obj.invoke_request('GET', uri) + tlist = [] + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get('Name', "") == name: + return xtype.get('Id'), tlist + return 0, tlist + + +def check_overlapping_vlan_range(payload, vlans): + current_vlan = None + for xtype in vlans: + overlap = list(range(max(xtype.get('VlanMinimum', 0), payload["VlanMinimum"]), + min(xtype.get('VlanMaximum', 0), payload["VlanMaximum"]) + 1)) + if overlap: + current_vlan = xtype + break + return current_vlan + + +def create_vlan(module, rest_obj, vlans): + payload = format_payload(module.params) + if not all(payload.values()): + module.fail_json(msg="The vlan_minimum, vlan_maximum and type values are required for creating a VLAN.") + if payload["VlanMinimum"] > payload["VlanMaximum"]: + module.fail_json(msg=VLAN_VALUE_MSG) + overlap = check_overlapping_vlan_range(payload, vlans) + if overlap: + module.fail_json(msg=VLAN_RANGE_OVERLAP.format(vlan_name=overlap["Name"], vlan_min=overlap["VlanMinimum"], + vlan_max=overlap["VlanMaximum"])) + if module.check_mode: + module.exit_json(changed=True, msg=CHECK_MODE_MSG) + if module.params.get("description"): + payload["Description"] = module.params.get("description") + payload["Type"], types = get_item_id(rest_obj, module.params["type"], VLAN_TYPES) + if not payload["Type"]: + module.fail_json(msg="Network type '{0}' not found.".format(module.params["type"])) + resp = rest_obj.invoke_request("POST", VLAN_CONFIG, data=payload) + module.exit_json(msg="Successfully created the VLAN.", vlan_status=resp.json_data, changed=True) + + +def delete_vlan(module, rest_obj, vlan_id): + if module.check_mode: + module.exit_json(changed=True, msg=CHECK_MODE_MSG) + resp = rest_obj.invoke_request("DELETE", VLAN_ID_CONFIG.format(Id=vlan_id)) + module.exit_json(msg="Successfully deleted the VLAN.", changed=True) + + +def modify_vlan(module, rest_obj, vlan_id, vlans): + payload = format_payload(module.params) + payload["Description"] = module.params.get("description") + if module.params.get("type"): + payload["Type"], types = get_item_id(rest_obj, module.params["type"], VLAN_TYPES) + if not payload["Type"]: + module.fail_json(msg="Network type '{0}' not found.".format(module.params["type"])) + if module.params.get("new_name"): + payload["Name"] = module.params["new_name"] + current_setting = {} + for i in range(len(vlans)): + if vlans[i]['Id'] == vlan_id: + current_setting = vlans.pop(i) + break + diff = 0 + for config, pload in payload.items(): + pval = payload.get(config) + if pval is not None: + if current_setting.get(config) != pval: + payload[config] = pval + diff += 1 + else: + payload[config] = current_setting.get(config) + if payload["VlanMinimum"] > payload["VlanMaximum"]: + module.fail_json(msg=VLAN_VALUE_MSG) + overlap = check_overlapping_vlan_range(payload, vlans) + if overlap: + module.fail_json(msg=VLAN_RANGE_OVERLAP.format(vlan_name=overlap["Name"], vlan_min=overlap["VlanMinimum"], + vlan_max=overlap["VlanMaximum"])) + if diff == 0: # Idempotency + if module.check_mode: + module.exit_json(msg="No changes found to be applied to the VLAN configuration.") + module.exit_json(msg="No changes found to be applied as the entered values are the same as the" + " current configuration.", vlan_status=current_setting) + if module.check_mode: + module.exit_json(changed=True, msg=CHECK_MODE_MSG) + payload["Id"] = vlan_id + resp = rest_obj.invoke_request("PUT", VLAN_ID_CONFIG.format(Id=vlan_id), data=payload) + module.exit_json(msg="Successfully updated the VLAN.", vlan_status=resp.json_data, changed=True) + + +def check_existing_vlan(module, rest_obj): + vlan_id, vlans = get_item_id(rest_obj, module.params["name"], VLAN_CONFIG + "?$top=9999") + return vlan_id, vlans + + +def main(): + specs = { + "state": {"required": False, "choices": ['present', 'absent'], "default": "present"}, + "name": {"required": True, "type": "str"}, + "new_name": {"required": False, "type": "str"}, + "description": {"required": False, "type": "str"}, + "vlan_minimum": {"required": False, "type": "int"}, + "vlan_maximum": {"required": False, "type": "int"}, + "type": {"required": False, "type": "str", + "choices": ['General Purpose (Bronze)', 'General Purpose (Silver)', 'General Purpose (Gold)', + 'General Purpose (Platinum)', 'Cluster Interconnect', 'Hypervisor Management', + 'Storage - iSCSI', 'Storage - FCoE', 'Storage - Data Replication', 'VM Migration', + 'VMWare FT Logging']} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[['state', 'present', ('new_name', 'description', 'vlan_minimum', 'vlan_maximum', 'type',), True]], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + vlan_id, vlans = check_existing_vlan(module, rest_obj) + if module.params["state"] == "present": + if vlan_id: + modify_vlan(module, rest_obj, vlan_id, vlans) + create_vlan(module, rest_obj, vlans) + else: + if vlan_id: + delete_vlan(module, rest_obj, vlan_id) + if module.check_mode: + module.exit_json(msg="No changes found to be applied to the VLAN configuration.") + module.exit_json(msg="VLAN {0} does not exist.".format(module.params["name"])) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, SSLError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py new file mode 100644 index 00000000..f1de512b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_network_vlan_info +short_description: Retrieves the information about networks VLAN(s) present in OpenManage Enterprise +version_added: "2.1.0" +description: + This module allows to retrieve the following. + - A list of all the network VLANs with their detailed information. + - Information about a specific network VLAN using VLAN I(id) or VLAN I(name). +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + id: + description: + - A unique identifier of the network VLAN available in the device. + - I(id) and I(name) are mutually exclusive. + type: int + name: + description: + - A unique name of the network VLAN available in the device. + - I(name) and I(id) are mutually exclusive. + type: str + +requirements: + - "python >= 3.8.6" +author: "Deepak Joshi(@deepakjoshishri)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = """ +--- +- name: Retrieve information about all network VLANs(s) available in the device + dellemc.openmanage.ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + +- name: Retrieve information about a network VLAN using the VLAN ID + dellemc.openmanage.ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + id: 12345 + +- name: Retrieve information about a network VLAN using the VLAN name + dellemc.openmanage.ome_network_vlan_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + name: "Network VLAN - 1" +""" + +RETURN = ''' +--- +msg: + type: str + description: Detailed information of the network VLAN(s). + returned: success + sample: { + "msg": "Successfully retrieved the network VLAN information.", + "network_vlan_info": [ + { + "CreatedBy": "admin", + "CreationTime": "2020-09-02 18:48:42.129", + "Description": "Description of Logical Network - 1", + "Id": 20057, + "InternalRefNWUUId": "42b9903d-93f8-4184-adcf-0772e4492f71", + "Name": "Network VLAN - 1", + "Type": { + "Description": "This is the network for general purpose traffic. QOS Priority : Bronze.", + "Id": 1, + "Name": "General Purpose (Bronze)", + "NetworkTrafficType": "Ethernet", + "QosType": { + "Id": 4, + "Name": "Bronze" + }, + "VendorCode": "GeneralPurpose" + }, + "UpdatedBy": null, + "UpdatedTime": "2020-09-02 18:48:42.129", + "VlanMaximum": 111, + "VlanMinimum": 111 + }, + { + "CreatedBy": "admin", + "CreationTime": "2020-09-02 18:49:11.507", + "Description": "Description of Logical Network - 2", + "Id": 20058, + "InternalRefNWUUId": "e46ccb3f-ef57-4617-ac76-46c56594005c", + "Name": "Network VLAN - 2", + "Type": { + "Description": "This is the network for general purpose traffic. QOS Priority : Silver.", + "Id": 2, + "Name": "General Purpose (Silver)", + "NetworkTrafficType": "Ethernet", + "QosType": { + "Id": 3, + "Name": "Silver" + }, + "VendorCode": "GeneralPurpose" + }, + "UpdatedBy": null, + "UpdatedTime": "2020-09-02 18:49:11.507", + "VlanMaximum": 112, + "VlanMinimum": 112 + } + ] +} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +# Base URI to fetch all logical networks information +NETWORK_VLAN_BASE_URI = "NetworkConfigurationService/Networks" +NETWORK_TYPE_BASE_URI = "NetworkConfigurationService/NetworkTypes" +QOS_TYPE_BASE_URI = "NetworkConfigurationService/QosTypes" + +# Module Success Message +MODULE_SUCCESS_MESSAGE = "Successfully retrieved the network VLAN information." + +# Module Failure Messages +MODULE_FAILURE_MESSAGE = "Failed to retrieve the network VLAN information." +NETWORK_VLAN_NAME_NOT_FOUND = "Provided network VLAN with name - '{0}' does not exist." + +SAFE_MAX_LIMIT = 9999 + + +def clean_data(data): + """ + data: A dictionary. + return: A data dictionary after removing items that are not required for end user. + """ + for k in ['@odata.id', '@odata.type', '@odata.context', '@odata.count']: + data.pop(k, None) + return data + + +def get_type_information(rest_obj, uri): + """ + rest_obj: Object containing information about connection to device. + return: dict with information retrieved from URI. + """ + type_info_dict = {} + resp = rest_obj.invoke_request('GET', uri) + if resp.status_code == 200: + type_info = resp.json_data.get('value') if isinstance(resp.json_data.get('value'), list) \ + else [resp.json_data] + for item in type_info: + item = clean_data(item) + type_info_dict[item['Id']] = item + return type_info_dict + + +def get_network_type_and_qos_type_information(rest_obj): + """ + rest_obj: Object containing information about connection to device. + return: Dictionary with information for "Type" and "QosType" keys. + """ + # Fetch network type and qos type information once + network_type_dict = get_type_information(rest_obj, NETWORK_TYPE_BASE_URI) + qos_type_dict = get_type_information(rest_obj, QOS_TYPE_BASE_URI) + # Update each network type with qos type info + for key, item in network_type_dict.items(): + item['QosType'] = qos_type_dict[item['QosType']] + return network_type_dict + + +def main(): + specs = { + "id": {"required": False, "type": 'int'}, + "name": {"required": False, "type": 'str'} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[["id", "name"]], + supports_check_mode=True) + try: + with RestOME(module.params, req_session=True) as rest_obj: + # Form URI to fetch network VLAN information + network_vlan_uri = "{0}({1})".format(NETWORK_VLAN_BASE_URI, module.params.get("id")) if module.params.get( + "id") else "{0}?$top={1}".format(NETWORK_VLAN_BASE_URI, SAFE_MAX_LIMIT) + resp = rest_obj.invoke_request('GET', network_vlan_uri) + if resp.status_code == 200: + network_vlan_info = resp.json_data.get('value') if isinstance(resp.json_data.get('value'), list) else [ + resp.json_data] + if module.params.get("name"): + network_vlan_name = module.params.get("name") + network_vlan = [] + for item in network_vlan_info: + if item["Name"] == network_vlan_name.strip(): + network_vlan = [item] + break + if not network_vlan: + module.fail_json(msg=NETWORK_VLAN_NAME_NOT_FOUND.format(network_vlan_name)) + network_vlan_info = network_vlan + # Get network type and Qos Type information + network_type_dict = get_network_type_and_qos_type_information(rest_obj) + # Update each network VLAN with network type and wos type information + for network_vlan in network_vlan_info: + network_vlan = clean_data(network_vlan) + network_vlan['Type'] = network_type_dict[network_vlan['Type']] + module.exit_json(msg=MODULE_SUCCESS_MESSAGE, network_vlan_info=network_vlan_info) + else: + module.fail_json(msg=MODULE_FAILURE_MESSAGE) + except HTTPError as err: + if err.getcode() == 404: + module.fail_json(msg=str(err)) + module.fail_json(msg=str(MODULE_FAILURE_MESSAGE), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, KeyError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py new file mode 100644 index 00000000..7ead69f7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_powerstate +short_description: Performs the power management operations on OpenManage Enterprise +version_added: "2.1.0" +description: This module performs the supported power management operations on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + power_state: + description: Desired end power state. + type: str + required: True + choices: ['on', 'off', 'coldboot', 'warmboot', 'shutdown'] + device_service_tag: + description: + - Targeted device service tag. + - I(device_service_tag) is mutually exclusive with I(device_id). + type: str + device_id: + description: + - Targeted device id. + - I(device_id) is mutually exclusive with I(device_service_tag). + type: int +requirements: + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Power state operation based on device id + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 11111 + power_state: "off" + +- name: Power state operation based on device service tag + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "KLBR111" + power_state: "on" + +- name: Power state operation based on list of device ids + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: "{{ item.device_id }}" + power_state: "{{ item.state }}" + with_items: + - { "device_id": 11111, "state": "on" } + - { "device_id": 22222, "state": "off" } + +- name: Power state operation based on list of device service tags + dellemc.openmanage.ome_powerstate: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: "{{ item.service_tag }}" + power_state: "{{ item.state }}" + with_items: + - { "service_tag": "KLBR111", "state": "on" } + - { "service_tag": "KLBR222", "state": "off" } +''' + +RETURN = r''' +--- +msg: + type: str + description: "Overall power state operation job status." + returned: always + sample: "Power State operation job submitted successfully." +job_status: + type: dict + description: "Power state operation job and progress details from the OME." + returned: success + sample: { + "Builtin": false, + "CreatedBy": "user", + "Editable": true, + "EndTime": null, + "Id": 11111, + "JobDescription": "DeviceAction_Task", + "JobName": "DeviceAction_Task_PowerState", + "JobStatus": { + "Id": 1111, + "Name": "New" + }, + "JobType": { + "Id": 1, + "Internal": false, + "Name": "DeviceAction_Task" + }, + "LastRun": "2019-04-01 06:39:02.69", + "LastRunStatus": { + "Id": 1112, + "Name": "Running" + }, + "NextRun": null, + "Params": [ + { + "JobId": 11111, + "Key": "powerState", + "Value": "2" + }, + { + "JobId": 11111, + "Key": "operationName", + "Value": "POWER_CONTROL" + } + ], + "Schedule": "", + "StartTime": null, + "State": "Enabled", + "Targets": [ + { + "Data": "", + "Id": 11112, + "JobId": 11111, + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "UpdatedBy": null, + "Visible": true + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +VALID_OPERATION = {"on": 2, "off": 12, "coldboot": 5, "warmboot": 10, "shutdown": 8} +POWER_STATE_MAP = {"on": 17, "off": 18, "poweringon": 20, "poweringoff": 21} +NOT_APPLICABLE_OPTIONS = ["coldboot", "warmboot", "shutdown"] + + +def spawn_update_job(rest_obj, payload): + """Spawns an update job and tracks it to completion.""" + job_uri, job_details = "JobService/Jobs", {} + job_resp = rest_obj.invoke_request("POST", job_uri, data=payload) + if job_resp.status_code == 201: + job_details = job_resp.json_data + return job_details + + +def build_power_state_payload(device_id, device_type, valid_option): + """Build the payload for requested device.""" + payload = { + "Id": 0, + "JobName": "DeviceAction_Task_PowerState", + "JobDescription": "DeviceAction_Task", + "Schedule": "startnow", + "State": "Enabled", + "JobType": {"Id": 3, "Name": "DeviceAction_Task"}, + "Params": [{"Key": "operationName", "Value": "POWER_CONTROL"}, + {"Key": "powerState", "Value": str(valid_option)}], + "Targets": [{"Id": int(device_id), "Data": "", + "TargetType": {"Id": device_type, "Name": "DEVICE"}}], + } + return payload + + +def get_device_state(module, resp, device_id): + """Get the current state and device type from response.""" + current_state, device_type, invalid_device = None, None, True + for device in resp['report_list']: + if device['Id'] == int(device_id): + current_state = device.get('PowerState', None) + device_type = device['Type'] + invalid_device = False + break + if invalid_device: + module.fail_json(msg="Unable to complete the operation because the entered target" + " device id '{0}' is invalid.".format(device_id)) + if device_type not in (1000, 2000): + module.fail_json(msg="Unable to complete the operation because power" + " state supports device type 1000 and 2000.") + return current_state, device_type + + +def get_device_resource(module, rest_obj): + """Getting the device id filtered from the device inventory.""" + power_state = module.params['power_state'] + device_id = module.params['device_id'] + service_tag = module.params['device_service_tag'] + resp_data = rest_obj.get_all_report_details("DeviceService/Devices") + if resp_data['report_list'] and service_tag is not None: + device_resp = dict([(device.get('DeviceServiceTag'), str(device.get('Id'))) for device in resp_data['report_list']]) + if service_tag in device_resp: + device_id = device_resp[service_tag] + else: + module.fail_json(msg="Unable to complete the operation because the entered target" + " device service tag '{0}' is invalid.".format(service_tag)) + current_state, device_type = get_device_state(module, resp_data, device_id) + + # For check mode changes. + valid_option, valid_operation = VALID_OPERATION[power_state], False + if power_state in NOT_APPLICABLE_OPTIONS and current_state != POWER_STATE_MAP["on"]: + valid_operation = True + elif (valid_option == current_state) or \ + (power_state == "on" and current_state in (POWER_STATE_MAP["on"], POWER_STATE_MAP['poweringon'])) or \ + (power_state in ("off", "shutdown") and + current_state in (POWER_STATE_MAP["off"], POWER_STATE_MAP['poweringoff'])): + valid_operation = True + + if module.check_mode and valid_operation: + module.exit_json(msg="No changes found to commit.") + elif module.check_mode and not valid_operation: + module.exit_json(msg="Changes found to commit.", changed=True) + payload = build_power_state_payload(device_id, device_type, valid_option) + return payload + + +def main(): + specs = { + "power_state": {"required": True, "type": "str", + "choices": ["on", "off", "coldboot", "warmboot", "shutdown"]}, + "device_service_tag": {"required": False, "type": "str"}, + "device_id": {"required": False, "type": "int"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[["device_service_tag", "device_id"]], + mutually_exclusive=[["device_service_tag", "device_id"]], + supports_check_mode=True + ) + try: + if module.params['device_id'] is None and module.params['device_service_tag'] is None: + module.fail_json(msg="device_id and device_service_tag attributes should not be None.") + job_status = {} + with RestOME(module.params, req_session=True) as rest_obj: + payload = get_device_resource(module, rest_obj) + job_status = spawn_update_job(rest_obj, payload) + except HTTPError as err: + module.fail_json(msg=str(err), job_status=json.load(err)) + except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err: + module.fail_json(msg=str(err)) + module.exit_json(msg="Power State operation job submitted successfully.", + job_status=job_status, changed=True) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py new file mode 100644 index 00000000..d2f7a87c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py @@ -0,0 +1,863 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ome_profile +short_description: Create, modify, delete, assign, unassign and migrate a profile on OpenManage Enterprise +version_added: "3.1.0" +description: "This module allows to create, modify, delete, assign, unassign, and migrate a profile on OpenManage Enterprise." +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + command: + description: + - C(create) creates new profiles. + - "C(modify) modifies an existing profile. Only I(name), I(description), I(boot_to_network_iso), and I(attributes) + can be modified." + - C(delete) deletes an existing profile. + - C(assign) Deploys an existing profile on a target device and returns a task ID. + - C(unassign) unassigns a profile from a specified target and returns a task ID. + - C(migrate) migrates an existing profile and returns a task ID. + choices: [create, modify, delete, assign, unassign, migrate] + default: create + type: str + name_prefix: + description: + - The name provided when creating a profile is used a prefix followed by the number assigned to it by OpenManage Enterprise. + - This is applicable only for a create operation. + - This option is mutually exclusive with I(name). + type: str + default: Profile + name: + description: + - Name of the profile. + - This is applicable for modify, delete, assign, unassign, and migrate operations. + - This option is mutually exclusive with I(name_prefix) and I(number_of_profiles). + type: str + new_name: + description: + - New name of the profile. + - Applicable when I(command) is C(modify). + type: str + number_of_profiles: + description: + - Provide the number of profiles to be created. + - This is applicable when I(name_prefix) is used with C(create). + - This option is mutually exclusive with I(name). + - Openmanage Enterprise can create a maximum of 100 profiles. + type: int + default: 1 + template_name: + description: + - Name of the template for creating the profile(s). + - This is applicable when I(command) is C(create). + - This option is mutually exclusive with I(template_id). + type: str + template_id: + description: + - ID of the template. + - This is applicable when I(command) is C(create). + - This option is mutually exclusive with I(template_name). + type: int + device_id: + description: + - ID of the target device. + - This is applicable when I(command) is C(assign) and C(migrate). + - This option is mutually exclusive with I(device_service_tag). + type: int + device_service_tag: + description: + - Identifier of the target device. + - This is typically 7 to 8 characters in length. + - Applicable when I(command) is C(assign), and C(migrate). + - This option is mutually exclusive with I(device_id). + - If the device does not exist when I(command) is C(assign) then the profile is auto-deployed. + type: str + description: + description: Description of the profile. + type: str + boot_to_network_iso: + description: + - Details of the Share iso. + - Applicable when I(command) is C(create), C(assign), and C(modify). + type: dict + suboptions: + boot_to_network: + description: Enable or disable a network share. + type: bool + required: true + share_type: + description: Type of network share. + type: str + choices: [NFS, CIFS] + share_ip: + description: IP address of the network share. + type: str + share_user: + description: User name when I(share_type) is C(CIFS). + type: str + share_password: + description: User password when I(share_type) is C(CIFS). + type: str + workgroup: + description: User workgroup when I(share_type) is C(CIFS). + type: str + iso_path: + description: Specify the full ISO path including the share name. + type: str + iso_timeout: + description: Set the number of hours that the network ISO file will remain mapped to the target device(s). + type: int + choices: [1, 2, 4, 8, 16] + default: 4 + filters: + description: + - Filters the profiles based on selected criteria. + - This is applicable when I(command) is C(delete) or C(unassign). + - This supports suboption I(ProfileIds) which takes a list of profile IDs. + - This also supports OData filter expressions with the suboption I(Filters). + - See OpenManage Enterprise REST API guide for the filtering options available. + - I(WARNING) When this option is used in case of C(unassign), task ID is not returned for any of the profiles affected. + type: dict + force: + description: + - Provides the option to force the migration of a profile even if the source device cannot be contacted. + - This option is applicable when I(command) is C(migrate). + type: bool + default: false + attributes: + description: Attributes for C(modify) and C(assign). + type: dict + suboptions: + Attributes: + description: + - List of attributes to be modified, when I(command) is C(modify). + - List of attributes to be overridden when I(command) is C(assign). + - "Use the I(Id) If the attribute Id is available. If not, use the comma separated I (DisplayName). + For more details about using the I(DisplayName), see the example provided." + type: list + elements: dict + Options: + description: + - Provides the different shut down options. + - This is applicable when I(command) is C(assign). + type: dict + Schedule: + description: + - Schedule for profile deployment. + - This is applicable when I(command) is C(assign). + type: dict +requirements: + - "python >= 3.8.6" +author: "Jagadeesh N V (@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). + - C(assign) operation on a already assigned profile will not redeploy. +''' + +EXAMPLES = r''' +--- +- name: Create two profiles from a template + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 2 + +- name: Create profile with NFS share + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 1 + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.1" + iso_path: "path/to/my_iso.iso" + iso_timeout: 8 + +- name: Create profile with CIFS share + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: create + template_name: "template 1" + name_prefix: "omam_profile" + number_of_profiles: 1 + boot_to_network_iso: + boot_to_network: True + share_type: CIFS + share_ip: "192.168.0.2" + share_user: "username" + share_password: "password" + workgroup: "workgroup" + iso_path: "\\path\\to\\my_iso.iso" + iso_timeout: 8 + +- name: Modify profile name with NFS share and attributes + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: modify + name: "Profile 00001" + new_name: "modified profile" + description: "new description" + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.3" + iso_path: "path/to/my_iso.iso" + iso_timeout: 8 + attributes: + Attributes: + - Id: 4506 + Value: "server attr 1" + IsIgnored: false + - Id: 4507 + Value: "server attr 2" + IsIgnored: false + # Enter the comma separated string as appearing in the Detailed view on GUI + # System -> Server Topology -> ServerTopology 1 Aisle Name + - DisplayName: 'System, Server Topology, ServerTopology 1 Aisle Name' + Value: Aisle 5 + IsIgnored: false + +- name: Delete a profile using profile name + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + name: "Profile 00001" + +- name: Delete profiles using filters + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + filters: + SelectAll: True + Filters: =contains(ProfileName,'Profile 00002') + +- name: Delete profiles using profile list filter + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + filters: + ProfileIds: + - 17123 + - 16124 + +- name: Assign a profile to target along with network share + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: assign + name: "Profile 00001" + device_id: 12456 + boot_to_network_iso: + boot_to_network: True + share_type: NFS + share_ip: "192.168.0.1" + iso_path: "path/to/my_iso.iso" + iso_timeout: 8 + attributes: + Attributes: + - Id: 4506 + Value: "server attr 1" + IsIgnored: true + Options: + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + EndHostPowerState: 1 + StrictCheckingVlan: True + Schedule: + RunNow: True + RunLater: False + +- name: Unassign a profile using profile name + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + name: "Profile 00003" + +- name: Unassign profiles using filters + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + filters: + SelectAll: True + Filters: =contains(ProfileName,'Profile 00003') + +- name: Unassign profiles using profile list filter + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "unassign" + filters: + ProfileIds: + - 17123 + - 16123 + +- name: Migrate a profile + dellemc.openmanage.ome_profile: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "migrate" + name: "Profile 00001" + device_id: 12456 +''' + +RETURN = r''' +--- +msg: + description: Overall status of the profile operation. + returned: always + type: str + sample: "Successfully created 2 profile(s)." +profile_ids: + description: IDs of the profiles created. + returned: when I(command) is C(create) + type: list + sample: [1234, 5678] +job_id: + description: + - Task ID created when I(command) is C(assign), C(migrate) or C(unassign). + - C(assign) and C(unassign) operations do not trigger a task if a profile is auto-deployed. + returned: when I(command) is C(assign), C(migrate) or C(unassign) + type: int + sample: 14123 +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +import time +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.common.dict_transformations import recursive_diff + +PROFILE_VIEW = "ProfileService/Profiles" +TEMPLATE_VIEW = "TemplateService/Templates" +DEVICE_VIEW = "DeviceService/Devices" +JOB_URI = "JobService/Jobs({job_id})" +PROFILE_ACTION = "ProfileService/Actions/ProfileService.{action}" +PROFILE_ATTRIBUTES = "ProfileService/Profiles({profile_id})/AttributeDetails" +PROFILE_NOT_FOUND = "Profile with the name '{name}' not found." +CHANGES_MSG = "Changes found to be applied." +NO_CHANGES_MSG = "No changes found to be applied." +SEPRTR = ',' + + +def get_template_details(module, rest_obj): + id = module.params.get('template_id') + query_param = {"$filter": "Id eq {0}".format(id)} + srch = 'Id' + if not id: + id = module.params.get('template_name') + query_param = {"$filter": "Name eq '{0}'".format(id)} + srch = 'Name' + resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param) + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get(srch) == id: + return xtype + module.fail_json(msg="Template with {0} '{1}' not found.".format(srch, id)) + + +def get_target_details(module, rest_obj): + id = module.params.get('device_id') + query_param = {"$filter": "Id eq {0}".format(id)} + srch = 'Id' + if not id: + id = module.params.get('device_service_tag') + query_param = {"$filter": "Identifier eq '{0}'".format(id)} + srch = 'Identifier' + resp = rest_obj.invoke_request('GET', DEVICE_VIEW, query_param=query_param) + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get(srch) == id: + return xtype + return "Target with {0} '{1}' not found.".format(srch, id) + + +def get_profile(rest_obj, module): + """Get profile id based on requested profile name.""" + profile_name = module.params["name"] + profile = None + query_param = {"$filter": "ProfileName eq '{0}'".format(profile_name)} + profile_req = rest_obj.invoke_request("GET", PROFILE_VIEW, query_param=query_param) + for each in profile_req.json_data.get('value'): + if each['ProfileName'] == profile_name: + profile = each + break + return profile + + +def get_network_iso_payload(module): + boot_iso_dict = module.params.get("boot_to_network_iso") + iso_payload = {} + if boot_iso_dict: + iso_payload = {"BootToNetwork": False} + if boot_iso_dict.get("boot_to_network"): + iso_payload["BootToNetwork"] = True + share_type = boot_iso_dict.get("share_type") + iso_payload["ShareType"] = share_type + share_detail = {} + sh_ip = boot_iso_dict.get("share_ip") + share_detail["IpAddress"] = sh_ip + share_detail["ShareName"] = sh_ip + # share_detail["ShareName"] = boot_iso_dict.get("share_name") if boot_iso_dict.get("share_name") else sh_ip + share_detail["User"] = boot_iso_dict.get("share_user") + share_detail["Password"] = boot_iso_dict.get("share_password") + share_detail["WorkGroup"] = boot_iso_dict.get("workgroup") + iso_payload["ShareDetail"] = share_detail + if str(boot_iso_dict.get("iso_path")).lower().endswith('.iso'): + iso_payload["IsoPath"] = boot_iso_dict.get("iso_path") + else: + module.fail_json(msg="ISO path does not have extension '.iso'") + iso_payload["IsoTimeout"] = boot_iso_dict.get("iso_timeout") + return iso_payload + + +def recurse_subattr_list(subgroup, prefix, attr_detailed, attr_map, adv_list): + if isinstance(subgroup, list): + for each_sub in subgroup: + nprfx = "{0}{1}{2}".format(prefix, SEPRTR, each_sub.get("DisplayName")) + if each_sub.get("SubAttributeGroups"): + recurse_subattr_list(each_sub.get("SubAttributeGroups"), nprfx, attr_detailed, attr_map, adv_list) + else: + for attr in each_sub.get('Attributes'): + attr['prefix'] = nprfx + # case sensitive, remove whitespaces for optim + constr = "{0}{1}{2}".format(nprfx, SEPRTR, attr['DisplayName']) + if constr in adv_list: + attr_detailed[constr] = attr['AttributeId'] + attr_map[attr['AttributeId']] = attr + + +def get_subattr_all(attr_dtls, adv_list): + attr_detailed = {} + attr_map = {} + for each in attr_dtls: + recurse_subattr_list(each.get('SubAttributeGroups'), each.get('DisplayName'), attr_detailed, attr_map, adv_list) + return attr_detailed, attr_map + + +def attributes_check(module, rest_obj, inp_attr, profile_id): + diff = 0 + try: + resp = rest_obj.invoke_request("GET", PROFILE_ATTRIBUTES.format(profile_id=profile_id)) + attr_dtls = resp.json_data + disp_adv_list = inp_attr.get("Attributes", {}) + adv_list = [] + for attr in disp_adv_list: + if attr.get("DisplayName"): + split_k = str(attr.get("DisplayName")).split(SEPRTR) + trimmed = map(str.strip, split_k) + n_k = SEPRTR.join(trimmed) + adv_list.append(n_k) + attr_detailed, attr_map = get_subattr_all(attr_dtls.get('AttributeGroups'), adv_list) + payload_attr = inp_attr.get("Attributes", []) + rem_attrs = [] + for attr in payload_attr: + if attr.get("DisplayName"): + split_k = str(attr.get("DisplayName")).split(SEPRTR) + trimmed = map(str.strip, split_k) + n_k = SEPRTR.join(trimmed) + id = attr_detailed.get(n_k, "") + attr['Id'] = id + attr.pop("DisplayName", None) + else: + id = attr.get('Id') + if id: + ex_val = attr_map.get(id, {}) + if not ex_val: + rem_attrs.append(attr) + continue + if attr.get('Value') != ex_val.get("Value") or attr.get('IsIgnored') != ex_val.get("IsIgnored"): + diff = diff + 1 + for rem in rem_attrs: + payload_attr.remove(rem) + # module.exit_json(attr_detailed=attr_detailed, inp_attr=disp_adv_list, payload_attr=payload_attr, adv_list=adv_list) + except Exception: + diff = 1 + return diff + + +def assign_profile(module, rest_obj): + mparam = module.params + payload = {} + if mparam.get('name'): + prof = get_profile(rest_obj, module) + if prof: + payload['Id'] = prof['Id'] + else: + module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name'))) + target = get_target_details(module, rest_obj) + if isinstance(target, dict): + payload['TargetId'] = target['Id'] + if prof['ProfileState'] == 4: + if prof['TargetId'] == target['Id']: + module.exit_json(msg="The profile is assigned to the target {0}.".format(target['Id'])) + else: + module.fail_json(msg="The profile is assigned to a different target. Use the migrate command or " + "unassign the profile and then proceed with assigning the profile to the target.") + action = "AssignProfile" + msg = "Successfully applied the assign operation." + try: + resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='GetInvalidTargetsForAssignProfile'), + data={'Id': prof['Id']}) + if target['Id'] in list(resp.json_data): + module.fail_json(msg="The target device is invalid for the given profile.") + except HTTPError: + resp = None + ad_opts_list = ['Attributes', 'Options', 'Schedule'] + else: + if mparam.get('device_id'): + module.fail_json(msg=target) + action = "AssignProfileForAutoDeploy" + msg = "Successfully applied the assign operation for auto-deployment." + payload['Identifier'] = mparam.get('device_service_tag') + if prof['ProfileState'] == 1: + if prof['TargetName'] == payload['Identifier']: + module.exit_json(msg="The profile is assigned to the target {0}.".format(payload['Identifier'])) + else: + module.fail_json(msg="The profile is assigned to a different target. " + "Unassign the profile and then proceed with assigning the profile to the target.") + ad_opts_list = ['Attributes'] + boot_iso_dict = get_network_iso_payload(module) + if boot_iso_dict: + payload["NetworkBootToIso"] = boot_iso_dict + ad_opts = mparam.get("attributes") + for opt in ad_opts_list: + if ad_opts and ad_opts.get(opt): + diff = attributes_check(module, rest_obj, ad_opts, prof['Id']) + payload[opt] = ad_opts.get(opt) + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action=action), data=payload) + res_dict = {'msg': msg, 'changed': True} + if action == 'AssignProfile': + try: + res_prof = get_profile(rest_obj, module) + time.sleep(5) + if res_prof.get('DeploymentTaskId'): + res_dict['job_id'] = res_prof.get('DeploymentTaskId') + res_dict['msg'] = "Successfully triggered the job for the assign operation." + except HTTPError: + res_dict['msg'] = "Successfully applied the assign operation. Failed to fetch job details." + module.exit_json(**res_dict) + + +def unassign_profile(module, rest_obj): + mparam = module.params + prof = {} + if mparam.get('name'): + payload = {} + prof = get_profile(rest_obj, module) + if prof: + if prof['ProfileState'] == 0: + module.exit_json(msg="Profile is in an unassigned state.") + if prof['DeploymentTaskId']: + try: + resp = rest_obj.invoke_request('GET', JOB_URI.format(job_id=prof['DeploymentTaskId'])) + job_dict = resp.json_data + job_status = job_dict.get('LastRunStatus') + if job_status.get('Name') == 'Running': + module.fail_json(msg="Profile deployment task is in progress. Wait for the job to finish.") + except HTTPError: + msg = "Unable to fetch job details. Applied the unassign operation" + payload['ProfileIds'] = [prof['Id']] + else: + module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name'))) + if mparam.get('filters'): + payload = mparam.get('filters') + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + msg = "Successfully applied the unassign operation. No job was triggered." + resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='UnassignProfiles'), data=payload) + res_dict = {'msg': msg, 'changed': True} + try: + res_prof = get_profile(rest_obj, module) + time.sleep(3) + if res_prof.get('DeploymentTaskId'): + res_dict['job_id'] = res_prof.get('DeploymentTaskId') + res_dict['msg'] = "Successfully triggered a job for the unassign operation." + except HTTPError: + res_dict['msg'] = "Successfully triggered a job for the unassign operation. Failed to fetch the job details." + module.exit_json(**res_dict) + + +def create_profile(module, rest_obj): + mparam = module.params + payload = {} + template = get_template_details(module, rest_obj) + payload["TemplateId"] = template["Id"] + payload["NamePrefix"] = mparam.get("name_prefix") + payload["NumberOfProfilesToCreate"] = mparam["number_of_profiles"] + if mparam.get("description"): + payload["Description"] = mparam["description"] + boot_iso_dict = get_network_iso_payload(module) + if boot_iso_dict: + payload["NetworkBootToIso"] = boot_iso_dict + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request('POST', PROFILE_VIEW, data=payload) + profile_id_list = resp.json_data + module.exit_json(msg="Successfully created {0} profile(s).".format(len(profile_id_list)), + changed=True, profile_ids=profile_id_list) + + +def modify_profile(module, rest_obj): + mparam = module.params + payload = {} + prof = get_profile(rest_obj, module) + if not prof: + module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name'))) + diff = 0 + new_name = mparam.get('new_name') + payload['Name'] = new_name if new_name else prof['ProfileName'] + if new_name and new_name != prof['ProfileName']: + diff += 1 + desc = mparam.get('description') + if desc and desc != prof['ProfileDescription']: + payload['Description'] = desc + diff += 1 + boot_iso_dict = get_network_iso_payload(module) + rdict = prof.get('NetworkBootToIso') if prof.get('NetworkBootToIso') else {} + if boot_iso_dict: + nest_diff = recursive_diff(boot_iso_dict, rdict) + if nest_diff: + # module.warn(json.dumps(nest_diff)) + if nest_diff[0]: + diff += 1 + payload["NetworkBootToIso"] = boot_iso_dict + ad_opts = mparam.get("attributes") + if ad_opts and ad_opts.get("Attributes"): + diff = diff + attributes_check(module, rest_obj, ad_opts, prof['Id']) + if ad_opts.get("Attributes"): + payload["Attributes"] = ad_opts.get("Attributes") + payload['Id'] = prof['Id'] + if diff: + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request('PUT', PROFILE_VIEW + "({0})".format(payload['Id']), data=payload) + module.exit_json(msg="Successfully modified the profile.", changed=True) + module.exit_json(msg=NO_CHANGES_MSG) + + +def delete_profile(module, rest_obj): + mparam = module.params + if mparam.get('name'): + prof = get_profile(rest_obj, module) + if prof: + if prof['ProfileState'] > 0: + module.fail_json(msg="Profile has to be in an unassigned state for it to be deleted.") + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request('DELETE', PROFILE_VIEW + "({0})".format(prof['Id'])) + module.exit_json(msg="Successfully deleted the profile.", changed=True) + else: + module.exit_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name'))) + if mparam.get('filters'): + payload = mparam.get('filters') + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='Delete'), data=payload) + module.exit_json(msg="Successfully completed the delete operation.", changed=True) + + +def migrate_profile(module, rest_obj): + mparam = module.params + payload = {} + payload['ForceMigrate'] = mparam.get('force') + target = get_target_details(module, rest_obj) + if not isinstance(target, dict): + module.fail_json(msg=target) + payload['TargetId'] = target['Id'] + prof = get_profile(rest_obj, module) + if prof: + if target['Id'] == prof['TargetId']: + module.exit_json(msg=NO_CHANGES_MSG) + try: + resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='GetInvalidTargetsForAssignProfile'), + data={'Id': prof['Id']}) + if target['Id'] in list(resp.json_data): + module.fail_json(msg="The target device is invalid for the given profile.") + except HTTPError: + resp = None + if prof['ProfileState'] == 4: # migrate applicable in deployed state only + payload['ProfileId'] = prof['Id'] + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) + resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='MigrateProfile'), data=payload) + msg = "Successfully applied the migrate operation." + res_dict = {'msg': msg, 'changed': True} + try: + time.sleep(5) + res_prof = get_profile(rest_obj, module) + if res_prof.get('DeploymentTaskId'): + res_dict['job_id'] = res_prof.get('DeploymentTaskId') + res_dict['msg'] = "Successfully triggered the job for the migrate operation." + except HTTPError: + res_dict['msg'] = "Successfully applied the migrate operation. Failed to fetch job details." + module.exit_json(**res_dict) + else: + module.fail_json(msg="Profile needs to be in a deployed state for a migrate operation.") + else: + module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name'))) + + +def profile_operation(module, rest_obj): + command = module.params.get("command") + if command == "create": + create_profile(module, rest_obj) + if command == "modify": + modify_profile(module, rest_obj) + if command == "delete": + delete_profile(module, rest_obj) + if command == "assign": + assign_profile(module, rest_obj) + if command == "unassign": + unassign_profile(module, rest_obj) + if command == "migrate": + migrate_profile(module, rest_obj) + + +def main(): + network_iso_spec = {"boot_to_network": {"required": True, "type": 'bool'}, + "share_type": {"choices": ['NFS', 'CIFS']}, + "share_ip": {"type": 'str'}, + "share_user": {"type": 'str'}, + "share_password": {"type": 'str', "no_log": True}, + "workgroup": {"type": 'str'}, + "iso_path": {"type": 'str'}, + "iso_timeout": {"type": 'int', "default": 4, + "choices": [1, 2, 4, 8, 16]}} + assign_spec = {"Attributes": {"type": 'list', "elements": 'dict'}, + "Options": {"type": 'dict'}, + "Schedule": {"type": 'dict'}} + specs = { + "command": {"default": "create", + "choices": ['create', 'modify', 'delete', 'assign', 'unassign', 'migrate']}, + "name_prefix": {"default": "Profile", "type": 'str'}, + "name": {"type": 'str'}, + "new_name": {"type": 'str'}, + "number_of_profiles": {"default": 1, "type": 'int'}, + "template_name": {"type": 'str'}, + "template_id": {"type": "int"}, + "device_id": {"type": 'int'}, + "device_service_tag": {"type": 'str'}, + "description": {"type": 'str'}, + "boot_to_network_iso": {"type": 'dict', "options": network_iso_spec, + "required_if": [ + ['boot_to_network', True, ['share_type', 'share_ip', 'iso_path']], + ['share_type', 'CIFS', ['share_user', 'share_password']] + ]}, + "filters": {"type": 'dict'}, + "attributes": {"type": 'dict', "options": assign_spec}, + "force": {"default": False, "type": 'bool'} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ['command', 'create', ['template_name', 'template_id'], True], + ['command', 'modify', ['name']], + ['command', 'modify', ['new_name', 'description', 'attributes', 'boot_to_network_iso'], True], + ['command', 'assign', ['name']], + ['command', 'assign', ['device_id', 'device_service_tag'], True], + ['command', 'unassign', ['name', "filters"], True], + ['command', 'delete', ['name', "filters"], True], + ['command', 'migrate', ['name']], + ['command', 'migrate', ['device_id', 'device_service_tag'], True], + ], + mutually_exclusive=[ + ['name', 'name_prefix'], + ['name', 'number_of_profiles'], + ['name', 'filters'], + ['device_id', 'device_service_tag'], + ['template_name', 'template_id']], + supports_check_mode=True) + try: + with RestOME(module.params, req_session=True) as rest_obj: + profile_operation(module, rest_obj) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py new file mode 100644 index 00000000..81e3cb2c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py @@ -0,0 +1,262 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: ome_server_interface_profile_info +short_description: Retrieves the information of server interface profile on OpenManage Enterprise Modular. +description: This module allows to retrieves the information of server interface profile + on OpenManage Enterprise Modular. +version_added: "5.1.0" +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_id: + type: list + description: + - The ID of the device. + - I(device_id) is mutually exclusive with I(device_service_tag). + elements: int + device_service_tag: + type: list + description: + - The service tag of the device. + - I(device_service_tag) is mutually exclusive with I(device_id). + elements: str +requirements: + - "python >= 3.8.6" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to OpenManage Enterprise Modular. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Retrieves the server interface profiles of all the device using device ID. + dellemc.openmanage.ome_server_interface_profile_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 10001 + - 10002 + +- name: Retrieves the server interface profiles of all the device using device service tag. + dellemc.openmanage.ome_server_interface_profile_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - 6GHH6H2 + - 6KHH6H3 +""" + +RETURN = """ +--- +msg: + type: str + description: Overall status of the server interface profile information. + returned: on success + sample: "Successfully retrieved the server interface profile information." +server_profiles: + type: list + description: Returns the information of collected server interface profile information. + returned: success + sample: [ + { + "BondingTechnology": "LACP", + "Id": "6KZK6K2", + "ServerInterfaceProfile": [ + { + "FabricId": "1ea6bf64-3cf0-4e06-a136-5046d874d1e7", + "Id": "NIC.Mezzanine.1A-1-1", + "NativeVLAN": 0, + "Networks": [ + { + "CreatedBy": "system", + "CreationTime": "2018-11-27 10:22:14.140", + "Description": "VLAN 1", + "Id": 10001, + "InternalRefNWUUId": "add035b9-a971-400d-a3fa-bb365df1d476", + Name": "VLAN 1", + "Type": 2, + "UpdatedBy": null, + "UpdatedTime": "2018-11-27 10:22:14.140", + "VlanMaximum": 1, + "VlanMinimum": 1 + } + ], + "NicBonded": true, + "OnboardedPort": "59HW8X2:ethernet1/1/1" + }, + { + "FabricId": "3ea6be04-5cf0-4e05-a136-5046d874d1e6", + "Id": "NIC.Mezzanine.1A-2-1", + "NativeVLAN": 0, + "Networks": [ + { + "CreatedBy": "system", + "CreationTime": "2018-09-25 14:46:12.374", + "Description": null, + "Id": 10155, + "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d", + "Name": "jagvlan", + "Type": 1, + "UpdatedBy": null, + "UpdatedTime": "2018-09-25 14:46:12.374", + "VlanMaximum": 143, + "VlanMinimum": 143 + } + ], + "NicBonded": false, + "OnboardedPort": "6H7J6Z2:ethernet1/1/1" + } + ] + } + ] +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +""" + + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params + +DOMAIN_URI = "ManagementDomainService/Domains" +PROFILE_URI = "NetworkService/ServerProfiles" +DEVICE_URI = "DeviceService/Devices" +NETWORK_PROFILE_URI = "NetworkService/ServerProfiles('{0}')/ServerInterfaceProfiles" + +DOMAIN_FAIL_MSG = "The information retrieval operation of server interface profile is supported only on " \ + "OpenManage Enterprise Modular." +CONFIG_FAIL_MSG = "one of the following is required: device_id, device_service_tag." +INVALID_DEVICE = "Unable to complete the operation because the entered " \ + "target device {0}(s) '{1}' are invalid." +PROFILE_ERR_MSG = "Unable to complete the operation because the server " \ + "profile(s) for {0} do not exist in the Fabric Manager." +SUCCESS_MSG = "Successfully retrieved the server interface profile information." + + +def check_domain_service(module, rest_obj): + try: + rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5) + except HTTPError as err: + err_message = json.load(err) + if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006": + module.fail_json(msg=DOMAIN_FAIL_MSG) + return + + +def get_sip_info(module, rest_obj): + invalid, valid_service_tag, device_map = [], [], {} + device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag") + key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag) + resp_data = rest_obj.get_all_report_details(DEVICE_URI) + if resp_data['report_list']: + for each in value: + each_device = list(filter(lambda d: d[key] in [each], resp_data["report_list"])) + if each_device and key == "DeviceServiceTag": + valid_service_tag.append(each) + elif each_device and key == "Id": + valid_service_tag.append(each_device[0]["DeviceServiceTag"]) + device_map[each_device[0]["DeviceServiceTag"]] = each + if not each_device: + invalid.append(each) + if invalid: + err_value = "id" if key == "Id" else "service tag" + module.fail_json(msg=INVALID_DEVICE.format(err_value, ",".join(map(str, set(invalid))))) + + invalid_fabric_tag, sip_info = [], [] + for pro_id in valid_service_tag: + profile_dict = {} + try: + profile_resp = rest_obj.invoke_request("GET", "{0}('{1}')".format(PROFILE_URI, pro_id)) + except HTTPError as err: + err_message = json.load(err) + if err_message.get('error', {}).get('@Message.ExtendedInfo')[0]["MessageId"] == "CDEV5008": + if key == "Id": + invalid_fabric_tag.append(device_map[pro_id]) + else: + invalid_fabric_tag.append(pro_id) + else: + profile_data = rest_obj.strip_substr_dict(profile_resp.json_data) + profile_dict.update(profile_data) + np_resp = rest_obj.invoke_request("GET", NETWORK_PROFILE_URI.format(pro_id)) + sip_strip = [] + for each in np_resp.json_data["value"]: + np_strip_data = rest_obj.strip_substr_dict(each) + np_strip_data["Networks"] = [rest_obj.strip_substr_dict(each) for each in np_strip_data["Networks"]] + sip_strip.append(np_strip_data) + profile_dict["ServerInterfaceProfile"] = sip_strip + sip_info.append(profile_dict) + + if invalid_fabric_tag: + module.fail_json(msg=PROFILE_ERR_MSG.format(", ".join(set(map(str, invalid_fabric_tag))))) + return sip_info + + +def main(): + argument_spec = { + "device_id": {"required": False, "type": "list", "elements": "int"}, + "device_service_tag": {"required": False, "type": "list", "elements": "str"}, + } + argument_spec.update(ome_auth_params) + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[('device_id', 'device_service_tag')], + required_one_of=[["device_id", "device_service_tag"]], + supports_check_mode=True, ) + if not any([module.params.get("device_id"), module.params.get("device_service_tag")]): + module.fail_json(msg=CONFIG_FAIL_MSG) + try: + with RestOME(module.params, req_session=True) as rest_obj: + check_domain_service(module, rest_obj) + sip_info = get_sip_info(module, rest_obj) + module.exit_json(msg=SUCCESS_MSG, server_profiles=sip_info) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, + AttributeError, IndexError, KeyError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py new file mode 100644 index 00000000..d30e7f38 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py @@ -0,0 +1,425 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_server_interface_profiles +short_description: Configure server interface profiles +version_added: "5.1.0" +description: This module allows to configure server interface profiles on OpenManage Enterprise Modular. +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + device_id: + description: + - Device id of the Server under chassis fabric. + - I(device_id) and I(device_service_tag) is mutually exclusive. + type: list + elements: int + device_service_tag: + description: + - Service tag of the Server under chassis fabric. + - I(device_service_tag) and I(device_id) is mutually exclusive. + type: list + elements: str + nic_teaming: + description: + - NIC teaming options. + - C(NoTeaming) the NICs are not bonded and provide no load balancing or redundancy. + - C(LACP) use LACP for NIC teaming. + - C(Other) use other technology for NIC teaming. + choices: ['LACP', 'NoTeaming', 'Other'] + type: str + nic_configuration: + description: NIC configuration for the Servers to be applied. + type: list + elements: dict + suboptions: + nic_identifier: + description: + - ID of the NIC or port number. + - C(Note) This will not be validated. + type: str + required: True + team: + description: + - Group two or more ports. The ports must be connected to the same pair of Ethernet switches. + - I(team) is applicable only if I(nic_teaming) is C(LACP). + type: bool + untagged_network: + description: + - The maximum or minimum VLAN id of the network to be untagged. + - The I(untagged_network) can be retrieved using the M(dellemc.openmanage.ome_network_vlan_info) + - If I(untagged_network) needs to be unset this needs to be sent as C(0) + - C(Note) The network cannot be added as a untagged network if it is already assigned to a tagged network. + type: int + tagged_networks: + description: + - List of tagged networks + - Network cannot be added as a tagged network if it is already assigned to untagged network + type: dict + suboptions: + state: + description: + - Indicates if a list of networks needs to be added or deleted. + - C(present) to add the network to the tagged list + - C(absent) to delete the Network from the tagged list + choices: [present, absent] + type: str + default: present + names: + description: + - List of network name to be marked as tagged networks + - The I(names) can be retrieved using the M(dellemc.openmanage.ome_network_vlan_info) + type: list + elements: str + required: True + job_wait: + description: + - Provides the option to wait for job completion. + type: bool + default: true + job_wait_timeout: + description: + - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration. + - This option is applicable when I(job_wait) is C(True). + type: int + default: 120 +requirements: + - "python >= 3.8.6" +author: "Jagadeesh N V (@jagadeeshnv)" +notes: + - This module supports C(check_mode). + - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular. +''' + +EXAMPLES = r''' +--- +- name: Modify Server Interface Profile for the server using the service tag + dellemc.openmanage.ome_server_interface_profiles: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_service_tag: + - SVCTAG1 + - SVCTAG2 + nic_teaming: LACP + nic_configuration: + - nic_identifier: NIC.Mezzanine.1A-1-1 + team: no + untagged_network: 2 + tagged_networks: + names: + - vlan1 + - nic_identifier: NIC.Mezzanine.1A-2-1 + team: yes + untagged_network: 3 + tagged_networks: + names: + - range120-125 + +- name: Modify Server Interface Profile for the server using the device id + dellemc.openmanage.ome_server_interface_profiles: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: + - 34523 + - 48999 + nic_teaming: NoTeaming + nic_configuration: + - nic_identifier: NIC.Mezzanine.1A-1-1 + team: no + untagged_network: 2 + tagged_networks: + names: + - vlan2 + - nic_identifier: NIC.Mezzanine.1A-2-1 + team: yes + untagged_network: 3 + tagged_networks: + names: + - range120-125 +''' + +RETURN = r''' +--- +msg: + description: Status of the overall server interface operation. + returned: always + type: str + sample: Successfully triggered apply server profiles job. +job_id: + description: Job ID of the task to apply the server interface profiles. + returned: on applying the Interface profiles + type: int + sample: 14123 +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import \ + get_rest_items, strip_substr_dict, job_tracking, apply_diff_key + +SERVER_PROFILE = "NetworkService/ServerProfiles('{service_tag}')" +SERVER_INTERFACE = "NetworkService/ServerProfiles('{service_tag}')/ServerInterfaceProfiles" +VLANS = "NetworkConfigurationService/Networks" +DEVICE_URI = "DeviceService/Devices" +APPLY_SERVER_PROFILES = "NetworkService/Actions/NetworkService.ApplyServersInterfaceProfiles" +JOB_URI = "JobService/Jobs({job_id})" +LAST_EXEC = "JobService/Jobs({job_id})/LastExecutionDetail" +APPLY_TRIGGERED = "Successfully initiated the apply server profiles job." +NO_STAG = "No profile found for service tag {service_tag}." +CHANGES_MSG = "Changes found to be applied." +NO_CHANGES_MSG = "No changes found to be applied." +VLAN_NOT_FOUND = "The VLAN with a name {vlan_name} not found." +DUPLICATE_NIC_IDENTIFIED = "Duplicate NIC identfiers provided." +INVALID_UNTAGGED = "The untagged VLAN {id} provided for the NIC ID {nic_id} is not valid." +NW_OVERLAP = "Network profiles of {service_tag} provided for tagged or untagged VLANs of {nic_id} overlaps." +INVALID_DEV_ST = "Unable to complete the operation because the entered target device service tag(s) '{0}' are invalid." +INVALID_DEV_ID = "Unable to complete the operation because the entered target device ids '{0}' are invalid." + + +def get_valid_service_tags(module, rest_obj): + service_tags = [] + nic_configs = module.params.get('nic_configuration') + if nic_configs: + nic_ids = [(nic.get('nic_identifier')) for nic in nic_configs] + if len(nic_ids) > len(set(nic_ids)): + module.exit_json(failed=True, msg=DUPLICATE_NIC_IDENTIFIED) + dev_map = get_rest_items(rest_obj, uri=DEVICE_URI) + if module.params.get('device_service_tag'): + cmp_set = set(module.params.get('device_service_tag')) - set(dict(dev_map).values()) + if cmp_set: + module.exit_json(failed=True, msg=INVALID_DEV_ST.format(",".join(cmp_set))) + service_tags = list(set(module.params.get('device_service_tag'))) + if module.params.get('device_id'): + cmp_set = set(module.params.get('device_id')) - set(dict(dev_map).keys()) + if cmp_set: + module.exit_json(failed=True, msg=INVALID_DEV_ID.format(",".join(map(str, cmp_set)))) + service_tags = [(dev_map.get(id)) for id in set(module.params.get('device_id'))] + return service_tags + + +def _get_profile(module, rest_obj, stag): + prof = {} + try: + resp = rest_obj.invoke_request("GET", SERVER_PROFILE.format(service_tag=stag)) + prof = resp.json_data + except HTTPError: + module.exit_json(failed=True, msg=NO_STAG.format(service_tag=stag)) + return prof + + +def _get_interface(module, rest_obj, stag): + intrfc_dict = {} + try: + intrfc = rest_obj.invoke_request("GET", SERVER_INTERFACE.format(service_tag=stag)) + intrfc_list = intrfc.json_data.get("value") + intrfc_dict = dict((sip['Id'], {"NativeVLAN": sip['NativeVLAN'], + "NicBonded": sip["NicBonded"], + "Networks": set([(ntw['Id']) for ntw in sip['Networks']]) + }) for sip in intrfc_list) + except HTTPError: + module.exit_json(failed=True, msg=NO_STAG.format(service_tag=stag)) + return intrfc_dict + + +def get_server_profiles(module, rest_obj, service_tags): + profile_dict = {} + for stag in service_tags: + prof = _get_profile(module, rest_obj, stag) + intrfc = _get_interface(module, rest_obj, stag) + prof["ServerInterfaceProfiles"] = intrfc + prof = strip_substr_dict(prof) + profile_dict[stag] = prof + return profile_dict + + +def get_vlan_ids(rest_obj): + resp = rest_obj.invoke_request("GET", VLANS) + vlans = resp.json_data.get('value') + vlan_map = {} + natives = {} + for vlan in vlans: + vlan_map[vlan['Name']] = vlan['Id'] + if vlan['VlanMaximum'] == vlan['VlanMinimum']: + natives[vlan['VlanMaximum']] = vlan['Id'] + natives.update({0: 0}) + return vlan_map, natives + + +def compare_profile(template, profile): + diff = 0 + diff = diff + apply_diff_key(template, profile, ["BondingTechnology"]) + # bond_tex = profile["BondingTechnology"] + # ignore_bond = 0 if profile['BondingTechnology'] == 'LACP' else -1 + sip = profile.get('ServerInterfaceProfiles') + for nic, ntw in sip.items(): + tmp = template.get(nic, {}) + diff = diff + apply_diff_key(tmp, ntw, ["NativeVLAN"]) + diff = diff + apply_diff_key(tmp, ntw, ["NicBonded"]) + untags = ntw.get("Networks") + s = set(untags) | set(tmp.get('present', set())) + s = s - set(tmp.get('absent', set())) + if s.symmetric_difference(set(untags)): + ntw["Networks"] = s + diff = diff + 1 + return diff + + +def get_template(module, vlan_dict, natives): + template = {"ServerInterfaceProfiles": {}} + mparams = module.params + ignore_teaming = True + if mparams.get('nic_teaming'): + template['BondingTechnology'] = mparams.get('nic_teaming') + if mparams.get('nic_teaming') != "LACP": + ignore_teaming = False + if mparams.get('nic_configuration'): + for nic in mparams.get('nic_configuration'): + nic_data = {} + if nic.get('team') is not None and ignore_teaming: + nic_data['NicBonded'] = nic.get('team') # if ignore_teaming else False + ntvlan = nic.get('untagged_network') + if ntvlan is not None: + if ntvlan not in natives: + module.exit_json(failed=True, msg=INVALID_UNTAGGED.format(id=ntvlan, nic_id=nic['nic_identifier']), + natives=natives) + nic_data['NativeVLAN'] = ntvlan + if nic.get('tagged_networks'): + tg = nic.get('tagged_networks') + nic_data[tg.get('state')] = set() + for vlan_name in tg.get('names'): + if vlan_name in vlan_dict: + nic_data[tg.get('state')].add(vlan_dict[vlan_name]) + else: + module.exit_json(failed=True, msg=VLAN_NOT_FOUND.format(vlan_name=vlan_name)) + template[nic['nic_identifier']] = nic_data + return template + + +def get_payload(module, rest_obj, profile_dict): + vlan_dict, natives = get_vlan_ids(rest_obj) + template = get_template(module, vlan_dict, natives) + diff = 0 + payload = [] + for stag, prof in profile_dict.items(): + df = compare_profile(template, prof) + if df: + sip_list = [] + for k, v in prof["ServerInterfaceProfiles"].items(): + if natives.get(v['NativeVLAN']) in set(v['Networks']): + module.exit_json(failed=True, msg=NW_OVERLAP.format(service_tag=stag, nic_id=k)) + sips = {"Id": k, "NativeVLAN": v['NativeVLAN'], "NicBonded": v["NicBonded"], + "Networks": [({'Id': ntw}) for ntw in v['Networks']]} + sip_list.append(sips) + prof["ServerInterfaceProfiles"] = sip_list + payload.append(prof) + diff = diff + df + if not diff: + module.exit_json(msg=NO_CHANGES_MSG) + if module.check_mode: + module.exit_json(msg=CHANGES_MSG, changed=True) # , payload=payload) + return payload + + +def handle_job(module, rest_obj, job_id): + if module.params.get("job_wait"): + job_failed, msg, job_dict, wait_time = job_tracking( + rest_obj, JOB_URI.format(job_id=job_id), max_job_wait_sec=module.params.get('job_wait_timeout')) + try: + job_resp = rest_obj.invoke_request('GET', LAST_EXEC.format(job_id=job_id)) + msg = job_resp.json_data.get("Value") + msg = msg.replace('\n', ' ') + except Exception: + msg = job_dict.get('JobDescription', msg) + module.exit_json(failed=job_failed, msg=msg, job_id=job_id, changed=True) + else: + module.exit_json(changed=True, msg=APPLY_TRIGGERED, job_id=job_id) + + +def main(): + specs = {"device_id": {"type": 'list', "elements": 'int'}, + "device_service_tag": {"type": 'list', "elements": 'str'}, + "nic_teaming": {"choices": ['LACP', 'NoTeaming', 'Other']}, + "nic_configuration": { + "type": 'list', "elements": 'dict', + "options": { + "nic_identifier": {"type": 'str', "required": True}, + "team": {"type": 'bool'}, + "untagged_network": {"type": 'int'}, + "tagged_networks": { + "type": 'dict', "options": { + "state": {"choices": ['present', 'absent'], "default": 'present'}, + "names": {"type": 'list', "elements": 'str', 'required': True} + }, + } + }}, + "job_wait": {"type": 'bool', "default": True}, + "job_wait_timeout": {"type": 'int', "default": 120}} + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[ + ('device_id', 'device_service_tag',)], + required_one_of=[('device_id', 'device_service_tag',), + ('nic_teaming', 'nic_configuration')], + supports_check_mode=True) + try: + with RestOME(module.params, req_session=True) as rest_obj: + service_tags = get_valid_service_tags(module, rest_obj) + profiles = get_server_profiles(module, rest_obj, service_tags) + apply_data = get_payload(module, rest_obj, profiles) + resp = rest_obj.invoke_request("POST", APPLY_SERVER_PROFILES, data=apply_data) + jobid = resp.json_data.get("JobId") + handle_job(module, rest_obj, jobid) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py new file mode 100644 index 00000000..b4cd907e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py @@ -0,0 +1,735 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_smart_fabric +short_description: Create, modify or delete a fabric on OpenManage Enterprise Modular +version_added: "2.1.0" +description: + - This module allows to create a fabric, and modify or delete an existing fabric + on OpenManage Enterprise Modular. +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + state: + type: str + description: + - C(present) creates a new fabric or modifies an existing fabric. + - C(absent) deletes an existing fabric. + - "Notes: The create, modify, or delete fabric operation takes around 15-20 minutes to complete. It is recommended + not to start an another operation until the current operation is completed." + choices: [present, absent] + default: present + name: + required: true + type: str + description: Provide the I(name) of the fabric to be created, deleted or modified. + new_name: + type: str + description: Provide the I(name) of the fabric to be modified. + description: + type: str + description: Provide a short description of the fabric to be created or modified. + fabric_design: + type: str + description: + - "Specify the fabric topology.See the use API + U(https://www.dell.com/support/manuals/en-in/poweredge-mx7000/omem_1_20_10_ug/smartfabric-network-topologies) + to know why its topology." + - I(fabric_design) is mandatory for fabric creation. + choices: [2xMX5108n_Ethernet_Switches_in_same_chassis, + 2xMX9116n_Fabric_Switching_Engines_in_same_chassis, + 2xMX9116n_Fabric_Switching_Engines_in_different_chassis] + primary_switch_service_tag: + type: str + description: + - Service tag of the first switch. + - I(primary_switch_service_tag) is mandatory for fabric creation. + - I(primary_switch_service_tag) must belong to the model selected in I(fabric_design). + secondary_switch_service_tag: + type: str + description: + - Service tag of the second switch. + - I(secondary_switch_service_tag) is mandatory for fabric creation. + - I(secondary_switch_service_tag) must belong to the model selected in I(fabric_design). + override_LLDP_configuration: + type: str + description: + - Enable this configuration to allow Fabric Management Address to be included in LLDP messages. + - "Notes: OpenManage Enterprise Modular 1.0 does not support this option. + Some software networking solutions require a single management address to be transmitted by all Ethernet switches + to represent the entire fabric. Enable this feature only when connecting to such a solution." + choices: ['Enabled', 'Disabled'] +requirements: + - "python >= 3.8.6" +author: + - "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create a fabric + dellemc.openmanage.ome_smart_fabric: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "fabric1" + description: "fabric desc" + fabric_design: "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" + primary_switch_service_tag: "SVTG123" + secondary_switch_service_tag: "PXYT456" + override_LLDP_configuration: "Enabled" + +- name: Modify a fabric + dellemc.openmanage.ome_smart_fabric: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: present + name: "fabric1" + new_name: "fabric_gold1" + description: "new description" + +- name: Delete a fabric + dellemc.openmanage.ome_smart_fabric: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "fabric1" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the fabric operation. + returned: always + sample: "Fabric creation operation is initiated." +fabric_id: + type: str + description: Returns the ID when an fabric is created, modified or deleted. + returned: success + sample: "1312cceb-c3dd-4348-95c1-d8541a17d776" +additional_info: + type: dict + description: Additional details of the fabric operation. + returned: when I(state=present) and additional information present in response. + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "RelatedProperties": [], + "Message": "Fabric update is successful. The OverrideLLDPConfiguration attribute is not provided in the + payload, so it preserves the previous value.", + "MessageArgs": [], + "Severity": "Informational", + "Resolution": "Please update the Fabric with the OverrideLLDPConfiguration as Disabled or Enabled if + necessary." + } + ] + } +} +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "RelatedProperties": [], + "Message": "Unable to perform operation, because the fabric manager was not reachable.", + "MessageArgs": [], + "Severity": "Warning", + "Resolution": "Make sure of the following and retry the operation: 1) There is at least one advanced + I/O Module in power-on mode. For example, MX9116n Ethernet Switch and MX5108n Ethernet Switch. However, + if an advanced I/O Module is available in the power-on mode, make sure that the network profile is not + set when the fabric manager is in the switch-over mode. 2) If the issue persists, wait for few minutes and retry the operation." + } + ] + } +} +''' + +import json +import socket +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ssl import SSLError + +FABRIC_URI = "NetworkService/Fabrics" +FABRIC_ID_URI = "NetworkService/Fabrics('{fabric_id}')" +DOMAIN_URI = "ManagementDomainService/Domains" +DEVICE_URI = "DeviceService/Devices" + +MSM_URI = "DeviceService/Devices({lead_chassis_device_id})/InventoryDetails('deviceSoftware')" + +CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied." +CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No Changes found to be applied." +FABRIC_NOT_FOUND_ERROR_MSG = "The smart fabric '{0}' is not present in the system." +DOMAIN_SERVICE_TAG_ERROR_MSG = "Unable to retrieve the domain information because the" \ + " domain of the provided service tag {0} is not available." +LEAD_CHASSIS_ERROR_MSG = "System should be a lead chassis if the assigned fabric topology type is {0}." +SYSTEM_NOT_SUPPORTED_ERROR_MSG = "Fabric management is not supported on the specified system." +DESIGN_MODEL_ERROR_MSG = "The network type of the {0} must be {1}." +DEVICE_SERVICE_TAG_TYPE_ERROR_MSG = "The {0} type must be {1}." +DEVICE_SERVICE_TAG_NOT_FOUND_ERROR_MSG = "Unable to retrieve the device information because the device" \ + " with the provided service tag {0} is not available." +IDEMPOTENCY_MSG = "Specified fabric details are the same as the existing settings." +REQUIRED_FIELD = "Options 'fabric_design', 'primary_switch_service_tag' and 'secondary_switch_service_tag'" \ + " are required for fabric creation." +DUPLICATE_TAGS = "The switch details of the primary switch overlaps with the secondary switch details." +PRIMARY_SWITCH_OVERLAP_MSG = "The primary switch service tag is overlapping with existing secondary switch details." +SECONDARY_SWITCH_OVERLAP_MSG = "The switch details of the secondary switch overlaps with the existing primary" \ + " switch details." + + +def get_service_tag_with_fqdn(rest_obj, module): + """ + get the service tag, if hostname is dnsname + """ + hostname = module.params["hostname"] + service_tag = None + device_details = rest_obj.get_all_items_with_pagination(DEVICE_URI) + for each_device in device_details["value"]: + for item in each_device["DeviceManagement"]: + if item.get("DnsName") == hostname or item.get('NetworkAddress') == hostname: + return each_device["DeviceServiceTag"] + return service_tag + + +def validate_lead_msm_version(each_domain, module, fabric_design=None): + """ + validate lead chassis for design type + and find the msm version of the domain + """ + role_type = each_domain["DomainRoleTypeValue"].upper() + if fabric_design and fabric_design == "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" and \ + role_type != "LEAD": + module.fail_json(msg=LEAD_CHASSIS_ERROR_MSG.format(fabric_design)) + msm_version = each_domain["Version"] + return msm_version + + +def get_ip_from_host(hostname): + """ + workaround: + when Virtual IP DNS name used in hostname, the DNS Name not reflected in device info + instead it shows original IP DNSName which causes failure in finding service tag of the device + Solution: Convert VIP DNS name to IP + """ + ipaddr = hostname + try: + result = socket.getaddrinfo(hostname, None) + last_element = result[-1] + ip_address = last_element[-1][0] + if ip_address: + ipaddr = ip_address + except socket.gaierror: + ipaddr = hostname + except Exception: + ipaddr = hostname + return ipaddr + + +def get_msm_device_details(rest_obj, module): + """ + Get msm details + :param rest_obj: session object + :param module: Ansible module object + :return: tuple + 1st item: service tag of the domain + 2nd item: msm version of ome-M device + """ + hostname = get_ip_from_host(module.params["hostname"]) + fabric_design = module.params.get("fabric_design") + msm_version = "" + service_tag = get_service_tag_with_fqdn(rest_obj, module) + domain_details = rest_obj.get_all_items_with_pagination(DOMAIN_URI) + for each_domain in domain_details["value"]: + if service_tag and service_tag == each_domain["Identifier"]: + msm_version = validate_lead_msm_version(each_domain, module, fabric_design) + break + if hostname in each_domain["PublicAddress"]: + msm_version = validate_lead_msm_version(each_domain, module, fabric_design) + service_tag = each_domain["Identifier"] + break + else: + module.fail_json(msg=SYSTEM_NOT_SUPPORTED_ERROR_MSG) + return service_tag, msm_version + + +def compare_payloads(modify_payload, current_payload): + """ + :param modify_payload: payload created to update existing setting + :param current_payload: already existing payload for specified fabric + :return: bool - compare existing and requested setting values of fabric in case of modify operations + if both are same return True + """ + diff = False + for key, val in modify_payload.items(): + if current_payload is None or current_payload.get(key) is None: + return True + elif isinstance(val, dict): + if compare_payloads(val, current_payload.get(key)): + return True + elif val != current_payload.get(key): + return True + return diff + + +def idempotency_check_for_state_present(fabric_id, current_payload, expected_payload, module): + """ + idempotency check in case of state present + :param fabric_id: fabric id + :param current_payload: payload created + :param expected_payload: already existing payload for specified fabric + :param module: ansible module object + :return: None + """ + if fabric_id: + exp_dict = expected_payload.copy() + cur_dict = current_payload.copy() + for d in (exp_dict, cur_dict): + fab_dz_lst = d.pop("FabricDesignMapping", []) + for fab in fab_dz_lst: + d[fab.get('DesignNode')] = fab.get('PhysicalNode') + payload_diff = compare_payloads(exp_dict, cur_dict) + if module.check_mode: + if payload_diff: + module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True) + else: + module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False) + elif not payload_diff: + module.exit_json(msg=IDEMPOTENCY_MSG, changed=False) + else: + if module.check_mode: + module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True) + + +def design_node_dict_update(design_node_map): + """ + make one level dictionary for design map for easy processing + :param design_node_map: design node map content + :return: dict + """ + d = {} + for item in design_node_map: + if item["DesignNode"] == "Switch-A" and item.get('PhysicalNode'): + d.update({'PhysicalNode1': item['PhysicalNode']}) + if item["DesignNode"] == "Switch-B" and item.get('PhysicalNode'): + d.update({'PhysicalNode2': item['PhysicalNode']}) + return d + + +def validate_switches_overlap(current_dict, modify_dict, module): + """ + Validation in case of modify operation when current setting user provided switches details overlaps + :param current_dict: modify payload created + :param modify_dict: current payload of specified fabric + :param module: Ansible module object + """ + modify_primary_switch = modify_dict.get("PhysicalNode1") + current_secondary_switch = current_dict.get("PhysicalNode2") + modify_secondary_switch = modify_dict.get("PhysicalNode2") + current_primary_switch = current_dict.get("PhysicalNode1") + if modify_primary_switch and current_primary_switch != modify_primary_switch: + module.fail_json(msg="The modify operation does not support primary_switch_service_tag update.") + if modify_secondary_switch and current_secondary_switch != modify_secondary_switch: + module.fail_json(msg="The modify operation does not support secondary_switch_service_tag update.") + flag = all([modify_primary_switch, modify_secondary_switch, current_primary_switch, + current_secondary_switch]) and (modify_primary_switch == current_secondary_switch and + modify_secondary_switch == current_primary_switch) + if not flag and modify_primary_switch is not None and current_secondary_switch is not None and \ + modify_primary_switch == current_secondary_switch: + module.fail_json(PRIMARY_SWITCH_OVERLAP_MSG) + if not flag and modify_secondary_switch is not None and current_primary_switch is not None and \ + modify_secondary_switch == current_primary_switch: + module.fail_json(SECONDARY_SWITCH_OVERLAP_MSG) + + +def fabric_design_map_payload_creation(design_map_modify_payload, design_map_current_payload, module): + """ + process FabricDesignMapping contents + :param design_map_modify_payload: modify payload created + :param design_map_current_payload: current payload of specified fabric + :param module: Ansible module object + :return: list + """ + modify_dict = design_node_dict_update(design_map_modify_payload) + current_dict = design_node_dict_update(design_map_current_payload) + validate_switches_overlap(current_dict, modify_dict, module) + current_dict.update(modify_dict) + design_list = [] + for key, val in current_dict.items(): + if key == "PhysicalNode1": + design_list.append({'DesignNode': 'Switch-A', 'PhysicalNode': val}) + else: + design_list.append({'DesignNode': 'Switch-B', 'PhysicalNode': val}) + return design_list + + +def merge_payload(modify_payload, current_payload, module): + """ + :param modify_payload: payload created to update existing setting + :param current_payload: already existing payload for specified fabric + :param module: Ansible module object + :return: bool - compare existing and requested setting values of fabric in case of modify operations + if both are same return True + """ + _current_payload = dict(current_payload) + _current_payload.update(modify_payload) + if modify_payload.get("FabricDesign") and current_payload.get("FabricDesign"): + _current_payload["FabricDesign"].update(modify_payload["FabricDesign"]) + elif modify_payload.get("FabricDesign") and not current_payload.get("FabricDesign"): + _current_payload["FabricDesign"] = modify_payload["FabricDesign"] + fabric_design_map_list = fabric_design_map_payload_creation(modify_payload.get("FabricDesignMapping", []), + current_payload.get("FabricDesignMapping", []), module) + if fabric_design_map_list: + _current_payload.update({"FabricDesignMapping": fabric_design_map_list}) + return _current_payload + + +def get_fabric_design(fabric_design_uri, rest_obj): + """ + Get the fabric design name from the fabric design uri which is returned from GET request + :param fabric_design_uri: fabric design uri + :param rest_obj: session object + :return: dict + """ + fabric_design = {} + if fabric_design_uri: + resp = rest_obj.invoke_request("GET", fabric_design_uri.split('/api/')[-1]) + design_type = resp.json_data.get("Name") + fabric_design = {"Name": design_type} + return fabric_design + + +def get_current_payload(fabric_details, rest_obj): + """ + extract payload from existing fabric details, which is + obtained from GET request of existing fabric, to match with payload created + :param fabric_details: dict - specified fabric details + :return: dict + """ + if fabric_details.get("OverrideLLDPConfiguration") and fabric_details.get("OverrideLLDPConfiguration") not in \ + ["Enabled", "Disabled"]: + fabric_details.pop("OverrideLLDPConfiguration", None) + payload = { + "Id": fabric_details["Id"], + "Name": fabric_details["Name"], + "Description": fabric_details.get("Description"), + "OverrideLLDPConfiguration": fabric_details.get("OverrideLLDPConfiguration"), + "FabricDesignMapping": fabric_details.get("FabricDesignMapping", []), + "FabricDesign": get_fabric_design(fabric_details["FabricDesign"].get("@odata.id"), rest_obj) + + } + return dict([(k, v) for k, v in payload.items() if v]) + + +def create_modify_payload(module_params, fabric_id, msm_version): + """ + payload creation for fabric management in case of create/modify operations + :param module_params: ansible module parameters + :param fabric_id: fabric id in case of modify operation + :param msm_version: msm version details + :return: dict + """ + backup_params = dict([(k, v) for k, v in module_params.items() if v]) + _payload = { + "Name": backup_params["name"], + "Description": backup_params.get("description"), + "OverrideLLDPConfiguration": backup_params.get("override_LLDP_configuration"), + "FabricDesignMapping": [], + "FabricDesign": {} + } + if backup_params.get("primary_switch_service_tag"): + _payload["FabricDesignMapping"].append({ + "DesignNode": "Switch-A", + "PhysicalNode": backup_params["primary_switch_service_tag"] + }) + if backup_params.get("secondary_switch_service_tag"): + _payload["FabricDesignMapping"].append({ + "DesignNode": "Switch-B", + "PhysicalNode": backup_params["secondary_switch_service_tag"] + }) + if backup_params.get("fabric_design"): + _payload.update({"FabricDesign": {"Name": backup_params["fabric_design"]}}) + if msm_version.startswith("1.0"): # OverrideLLDPConfiguration attribute not supported in msm 1.0 version + _payload.pop("OverrideLLDPConfiguration", None) + if fabric_id: # update id/name in case of modify operation + _payload["Name"] = backup_params.get("new_name", backup_params["name"]) + _payload["Id"] = fabric_id + payload = dict([(k, v) for k, v in _payload.items() if v]) + return payload + + +def get_fabric_id_details(name, all_fabrics): + """ + obtain the fabric id using fabric name + :param name: fabric name + :param all_fabrics: All available fabric in the system + :return: tuple + 1st item: fabric id + 2nd item: all details of fabric specified in dict + """ + fabric_id, fabric_details = None, None + for fabric_each in all_fabrics: + if fabric_each["Name"] == name: + fabric_id = fabric_each["Id"] + fabric_details = fabric_each + break + return fabric_id, fabric_details + + +def validate_device_type(device_type_name, identifier, device_details, module): + """ + Validation for iom and chassis device type and also design modes of model + :param device_type_name: device type name eg: NETWORK_IOM, CHASSIS + :param identifier: identifier to access device type name + :param device_details: all details of device + :param module: ansible module object + :return: None + """ + device_map = { + "primary_switch_service_tag": "NETWORK_IOM", + "secondary_switch_service_tag": "NETWORK_IOM", + "hostname": "CHASSIS" + } + design_mode = module.params.get("fabric_design") + if device_type_name != device_map[identifier]: + module.fail_json( + msg=DEVICE_SERVICE_TAG_TYPE_ERROR_MSG.format(identifier, device_map[identifier])) + if device_type_name != "CHASSIS" and design_mode: + design_model = design_mode.split("_")[0].split('2x')[-1] + identifier_model = device_details["Model"] + if design_model not in identifier_model: + module.fail_json( + msg=DESIGN_MODEL_ERROR_MSG.format(identifier, design_model)) + + +def validate_service_tag(device_service_tag, identifier, device_type_map, rest_obj, module): + """ + Validate the service tag and device type of device + :param identifier: identifier options which required find service tag from module params + primary_switch_service_tag, secondary_switch_service_tag, hostname + :param device_service_tag: device service tag + :param device_type_map: map to get the + :param rest_obj: session object + :param module: ansible module object + :return: None + """ + if device_service_tag is not None: + device_id_details = rest_obj.get_device_id_from_service_tag(device_service_tag) + device_details = device_id_details["value"] + if device_id_details["Id"] is None: + module.fail_json(msg=DEVICE_SERVICE_TAG_NOT_FOUND_ERROR_MSG.format(device_service_tag)) + identifier_device_type = device_details["Type"] + validate_device_type(device_type_map[identifier_device_type], identifier, device_details, module) + + +def validate_devices(host_service_tag, rest_obj, module): + """ + validate domain, primary switch and secondary switch devices + :param host_service_tag: service tag of the hostname provided + :param rest_obj: session object + :param module: Ansible module object + :return: None + """ + primary = module.params.get("primary_switch_service_tag") + secondary = module.params.get("secondary_switch_service_tag") + device_type_map = rest_obj.get_device_type() + validate_service_tag(host_service_tag, "hostname", device_type_map, rest_obj, module) + validate_service_tag(primary, + "primary_switch_service_tag", + device_type_map, rest_obj, module) + validate_service_tag(secondary, + "secondary_switch_service_tag", + device_type_map, rest_obj, + module) + + +def required_field_check_for_create(fabric_id, module): + params = module.params + if not fabric_id and not all([params.get("fabric_design"), params.get("primary_switch_service_tag"), + params.get("secondary_switch_service_tag")]): + module.fail_json(msg=REQUIRED_FIELD) + + +def process_output(name, fabric_resp, msg, fabric_id, rest_obj, module): + """ + fabric management actions creation/update of smart fabric output details processing + :param name: fabric name specified + :param fabric_resp: json response from ome + :param msg: specific message of create and modify operation + :param fabric_id: fabric id in case of modify + :param rest_obj: current session object + :param module: Ansible module object + :return: None + """ + identifier = fabric_resp + if fabric_id: + identifier = fabric_id + if isinstance(fabric_resp, dict): + all_fabrics = rest_obj.get_all_items_with_pagination(FABRIC_URI)["value"] + identifier, current_fabric_details = get_fabric_id_details(name, all_fabrics) + if not identifier: + identifier = "" + module.exit_json(msg=msg, fabric_id=identifier, additional_info=fabric_resp, changed=True) + module.exit_json(msg=msg, fabric_id=identifier, changed=True) + + +def validate_modify(module, current_payload): + """Fabric modification does not support fabric design type modification""" + if module.params.get("fabric_design") and current_payload["FabricDesign"]["Name"] and \ + (module.params.get("fabric_design") != current_payload["FabricDesign"]["Name"]): + module.fail_json(msg="The modify operation does not support fabric_design update.") + + +def create_modify_fabric(name, all_fabric, rest_obj, module): + """ + fabric management actions creation/update of smart fabric + :param all_fabric: all available fabrics in system + :param rest_obj: current session object + :param module: ansible module object + :param name: fabric name specified + :return: None + """ + fabric_id, current_fabric_details = get_fabric_id_details(name, all_fabric) + required_field_check_for_create(fabric_id, module) + host_service_tag, msm_version = get_msm_device_details(rest_obj, module) + validate_devices(host_service_tag, rest_obj, module) + uri = FABRIC_URI + expected_payload = create_modify_payload(module.params, fabric_id, msm_version) + payload = dict(expected_payload) + method = "POST" + msg = "Fabric creation operation is initiated." + current_payload = {} + if fabric_id: + current_payload = get_current_payload(current_fabric_details, rest_obj) + validate_modify(module, current_payload) + method = "PUT" + msg = "Fabric modification operation is initiated." + uri = FABRIC_ID_URI.format(fabric_id=fabric_id) + payload = merge_payload(expected_payload, current_payload, module) + idempotency_check_for_state_present(fabric_id, current_payload, expected_payload, module) + resp = rest_obj.invoke_request(method, uri, data=payload) + fabric_resp = resp.json_data + process_output(name, fabric_resp, msg, fabric_id, rest_obj, module) + + +def check_fabric_exits_for_state_absent(fabric_values, module, fabric_name): + """ + idempotency check in case of state absent + :param fabric_values: fabric details of existing fabric + :param module: ansible module object + :param fabric_name: fabric name + :return: str - fabric id + """ + fabric_id, fabric_details = get_fabric_id_details(fabric_name, fabric_values) + if module.check_mode and fabric_id is None: + module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG) + if module.check_mode and fabric_id is not None: + module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True) + if not module.check_mode and fabric_id is None: + module.exit_json(msg=FABRIC_NOT_FOUND_ERROR_MSG.format(fabric_name)) + return fabric_id + + +def delete_fabric(all_fabrics, rest_obj, module, name): + """ + deletes the fabric specified + :param all_fabrics: All available fabric in system + :param rest_obj: session object + :param module: ansible module object + :param name: fabric name specified + :return: None + """ + fabric_id = check_fabric_exits_for_state_absent(all_fabrics, module, name) + rest_obj.invoke_request("DELETE", FABRIC_ID_URI.format(fabric_id=fabric_id)) + module.exit_json(msg="Fabric deletion operation is initiated.", fabric_id=fabric_id, changed=True) + + +def fabric_actions(rest_obj, module): + """ + fabric management actions + :param rest_obj: session object + :param module: ansible module object + :return: None + """ + module_params = module.params + state = module_params["state"] + name = module_params["name"] + all_fabrics = rest_obj.get_all_items_with_pagination(FABRIC_URI)["value"] + if state == "present": + create_modify_fabric(name, all_fabrics, rest_obj, module) + else: + delete_fabric(all_fabrics, rest_obj, module, name) + + +def main(): + design_choices = ['2xMX5108n_Ethernet_Switches_in_same_chassis', + '2xMX9116n_Fabric_Switching_Engines_in_same_chassis', + '2xMX9116n_Fabric_Switching_Engines_in_different_chassis' + ] + specs = { + "state": {"type": "str", "required": False, "default": "present", "choices": ['present', 'absent']}, + "name": {"required": True, "type": "str"}, + "new_name": {"required": False, "type": "str"}, + "description": {"required": False, "type": "str"}, + "fabric_design": {"required": False, "type": "str", + "choices": design_choices}, + "primary_switch_service_tag": {"required": False, "type": "str"}, + "secondary_switch_service_tag": {"required": False, "type": "str"}, + "override_LLDP_configuration": {"required": False, "type": "str", "choices": ['Enabled', 'Disabled']}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[['state', 'present', ('new_name', 'description', 'fabric_design', 'primary_switch_service_tag', + 'secondary_switch_service_tag', 'override_LLDP_configuration',), True]], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + fabric_actions(rest_obj, module) + except HTTPError as err: + if err.code == 501: + module.fail_json(msg=SYSTEM_NOT_SUPPORTED_ERROR_MSG, error_info=json.load(err)) + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py new file mode 100644 index 00000000..cae5d8d6 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py @@ -0,0 +1,544 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_smart_fabric_uplink +short_description: Create, modify or delete a uplink for a fabric on OpenManage Enterprise Modular +version_added: "2.1.0" +description: This module allows to create, modify or delete an uplink for a fabric. +extends_documentation_fragment: + - dellemc.openmanage.omem_auth_options +options: + state: + description: + - C(present) + - Creates a new uplink with the provided I(name). + - Modifies an existing uplink with the provided I(name). + - C(absent) – Deletes the uplink with the provided I(name). + - I(WARNING) Delete operation can impact the network infrastructure. + choices: [present, absent] + default: present + type: str + fabric_name: + type: str + description: Provide the I(fabric_name) of the fabric for which the uplink is to be configured. + required: true + name: + type: str + description: Provide the I(name) of the uplink to be created, modified or deleted. + required: true + new_name: + type: str + description: Provide the new I(new_name) for the uplink. + description: + type: str + description: Provide a short description for the uplink to be created or modified. + uplink_type: + description: + - Specify the uplink type. + - I(NOTE) The uplink type cannot be changed for an existing uplink. + choices: ['Ethernet', 'FCoE', 'FC Gateway', 'FC Direct Attach', 'Ethernet - No Spanning Tree'] + type: str + ufd_enable: + description: + - "Add or Remove the uplink to the Uplink Failure Detection (UFD) group. The UFD group identifies the loss of + connectivity to the upstream switch and notifies the servers that are connected to the switch. During an uplink + failure, the switch disables the corresponding downstream server ports. The downstream servers can then select + alternate connectivity routes, if available." + - "I(WARNING) The firmware version of the I/O Module running the Fabric Manager must support this configuration + feature. If not, uplink creation will be successful with an appropriate error message in response." + choices: ['Enabled', 'Disabled'] + type: str + primary_switch_service_tag: + description: Service tag of the primary switch. + type: str + primary_switch_ports: + description: + - The IOM slots to be connected to the primary switch. + - I(primary_switch_service_tag) is mandatory for this option. + type: list + elements: str + secondary_switch_service_tag: + description: Service tag of the secondary switch. + type: str + secondary_switch_ports: + description: + - The IOM slots to be connected to the secondary switch. + - I(secondary_switch_service_tag) is mandatory for this option. + type: list + elements: str + tagged_networks: + description: VLANs to be associated with the uplink I(name). + type: list + elements: str + untagged_network: + description: Specify the name of the VLAN to be added as untagged to the uplink. + type: str +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V(@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create an Uplink + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + description: "CREATED from OMAM" + uplink_type: "Ethernet" + ufd_enable: "Enabled" + primary_switch_service_tag: "ABC1234" + primary_switch_ports: + - ethernet1/1/13 + - ethernet1/1/14 + secondary_switch_service_tag: "XYZ1234" + secondary_switch_ports: + - ethernet1/1/13 + - ethernet1/1/14 + tagged_networks: + - vlan1 + - vlan3 + untagged_network: vlan2 + tags: create_uplink + +- name: Modify an existing uplink + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + new_name: "uplink2" + description: "Modified from OMAM" + uplink_type: "Ethernet" + ufd_enable: "Disabled" + primary_switch_service_tag: "DEF1234" + primary_switch_ports: + - ethernet1/2/13 + - ethernet1/2/14 + secondary_switch_service_tag: "TUV1234" + secondary_switch_ports: + - ethernet1/2/13 + - ethernet1/2/14 + tagged_networks: + - vlan11 + - vlan33 + untagged_network: vlan22 + tags: modify_uplink + +- name: Delete an Uplink + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + fabric_name: "fabric1" + name: "uplink1" + tags: delete_uplink + +- name: Modify an Uplink name + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + new_name: "uplink2" + tags: modify_uplink_name + +- name: Modify Uplink ports + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "uplink1" + description: "uplink ports modified" + primary_switch_service_tag: "ABC1234" + primary_switch_ports: + - ethernet1/1/6 + - ethernet1/1/7 + secondary_switch_service_tag: "XYZ1234" + secondary_switch_ports: + - ethernet1/1/9 + - ethernet1/1/10 + tags: modify_ports + +- name: Modify Uplink networks + dellemc.openmanage.ome_smart_fabric_uplink: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + fabric_name: "fabric1" + name: "create1" + description: "uplink networks modified" + tagged_networks: + - vlan4 + tags: modify_networks +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the uplink operation. + returned: always + sample: "Successfully modified the uplink." +uplink_id: + type: str + description: Returns the ID when an uplink is created or modified. + returned: when I(state=present) + sample: "ddc3d260-fd71-46a1-97f9-708e12345678" +additional_info: + type: dict + description: Additional details of the fabric operation. + returned: when I(state=present) and additional information present in response. + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to configure the Uplink Failure Detection mode on the uplink because the firmware + version of the I/O Module running the Fabric Manager does not support the configuration feature.", + "MessageArgs": [], + "MessageId": "CDEV7151", + "RelatedProperties": [], + "Resolution": "Update the firmware version of the I/O Module running the Fabric Manager and retry + the operation. For information about the recommended I/O Module firmware versions, see the + OpenManage Enterprise-Modular User's Guide available on the support site.", + "Severity": "Informational" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CGEN1006", + "RelatedProperties": [], + "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide + for more information about resource URI and its properties." + } + ] + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.common.dict_transformations import recursive_diff +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_item_and_list + +FABRIC_URI = "NetworkService/Fabrics" +UPLINKS_URI = "NetworkService/Fabrics('{fabric_id}')/Uplinks" +UPLINK_URI = "NetworkService/Fabrics('{fabric_id}')/Uplinks('{uplink_id}')" +APPLICABLE_NETWORKS = "NetworkService/Fabrics('{fabric_id}')/NetworkService.GetApplicableUplinkNetworks" +APPLICABLE_UNTAGGED = "NetworkService/Fabrics('{fabric_id}')/NetworkService.GetApplicableUplinkUntaggedNetworks" +IOM_DEVICES = "DeviceService/Devices?$filter=Type%20eq%204000" +PORT_INFO = "DeviceService/Devices({device_id})/InventoryDetails('portInformation')" +MEDIA_TYPES = "NetworkService/UplinkTypes" +VLAN_CONFIG = "NetworkConfigurationService/Networks" +# Messages +CHECK_MODE_MSG = "Changes found to be applied." +NO_CHANGES_MSG = "No changes found to be applied to the uplink configuration." +SAME_SERVICE_TAG_MSG = "Primary and Secondary service tags must not be the same." + + +def get_item_id(rest_obj, name, uri, key='Name', attr='Id', value='value'): + resp = rest_obj.invoke_request('GET', uri) + tlist = [] + if resp.success and resp.json_data.get(value): + tlist = resp.json_data.get(value, []) + for xtype in tlist: + if xtype.get(key, "") == name: + return xtype.get(attr), tlist + return 0, tlist + + +def get_all_uplink_ports(uplinks): + portlist = [] + for uplink in uplinks: + portlist = portlist + uplink.get("Ports") + return portlist + + +def validate_ioms(module, rest_obj, uplinks): + uplinkports = get_all_uplink_ports(uplinks) + payload_ports = [] + occupied_ports = [] + used_ports = [] + for idx in uplinkports: + used_ports.append(idx["Id"]) + iomsts = ("primary", "secondary") + for iom in iomsts: + prim_st = module.params.get(iom + "_switch_service_tag") + if prim_st: + prim_ports = list(str(port).strip() for port in module.params.get(iom + "_switch_ports")) + id, ioms = get_item_id(rest_obj, prim_st, IOM_DEVICES, key="DeviceServiceTag") + if not id: + module.fail_json(msg="Device with service tag {0} does not exist.".format(prim_st)) + resp = rest_obj.invoke_request("GET", PORT_INFO.format(device_id=id)) + port_info_data = resp.json_data.get("InventoryInfo", []) + port_info_list = [] + for port in port_info_data: + if port.get("SubPorts"): + for subport in port.get("SubPorts"): + port_info_list.append(subport["PortNumber"]) + else: + port_info_list.append(port["PortNumber"]) + # All ports are listed but with "OpticsType": "NotPresent" are shown on UI. + non_exist_ports = [] + for port in prim_ports: + if port not in port_info_list: + non_exist_ports.append(port) + st_port = prim_st + ':' + port + payload_ports.append(st_port) + if st_port in used_ports: + occupied_ports.append(st_port) + if non_exist_ports: + module.fail_json(msg="{0} Port Numbers {1} does not exist for IOM {2}." + .format(iom, (",".join(set(non_exist_ports))), prim_st)) + if occupied_ports: + module.fail_json(msg="Ports {0} are already occupied.".format(",".join(set(occupied_ports)))) + return payload_ports + + +def validate_networks(module, rest_obj, fabric_id, media_id): + resp = rest_obj.invoke_request('POST', APPLICABLE_NETWORKS.format(fabric_id=fabric_id), + data={"UplinkType": media_id}) + vlans = [] + if resp.json_data.get('ApplicableUplinkNetworks'): + vlans = resp.json_data.get('ApplicableUplinkNetworks', []) + vlan_payload = [] + vlan_dict = {} + for vlan in vlans: + vlan_dict[vlan["Name"]] = vlan["Id"] + networks = list(str(net).strip() for net in module.params.get("tagged_networks")) + invalids = [] + for ntw in networks: + if vlan_dict.get(ntw): + vlan_payload.append(vlan_dict.get(ntw)) + else: + invalids.append(ntw) + if invalids: + module.fail_json(msg="Networks with names {0} are not applicable or valid.".format(",".join(set(invalids)))) + return vlan_payload + + +def validate_native_vlan(module, rest_obj, fabric_id, media_id): + resp = rest_obj.invoke_request('POST', APPLICABLE_UNTAGGED.format(fabric_id=fabric_id), + data={"UplinkType": media_id}) + vlans = [] + if resp.json_data.get('ApplicableUplinkNetworks'): + vlans = resp.json_data.get('ApplicableUplinkNetworks', []) + vlan_id = 0 + vlan_name = module.params.get("untagged_network") + for vlan in vlans: + if vlan["Name"] == vlan_name: + vlan_id = vlan["VlanMaximum"] # considering tagged vlans take the 'Id' + break + if not vlan_id: + module.fail_json(msg="Native VLAN name {0} is not applicable or valid.".format(vlan_name)) + return vlan_id + + +def create_uplink(module, rest_obj, fabric_id, uplinks): + mparams = module.params + mandatory_parmas = ["name", "uplink_type", "tagged_networks"] + for prm in mandatory_parmas: + if not mparams.get(prm): + module.fail_json(msg="Mandatory parameter {0} not provided for uplink creation.".format(prm)) + media_id, mtypes = get_item_id(rest_obj, mparams["uplink_type"], MEDIA_TYPES) + if not media_id: + module.fail_json(msg="Uplink Type {0} does not exist.".format(mparams["uplink_type"])) + if mparams.get("primary_switch_service_tag") or mparams.get("secondary_switch_service_tag"): + if mparams.get("primary_switch_service_tag") == mparams.get("secondary_switch_service_tag"): + module.fail_json(msg=SAME_SERVICE_TAG_MSG) + payload_port_list = validate_ioms(module, rest_obj, uplinks) + else: + module.fail_json(msg="Provide port details.") + tagged_networks = validate_networks(module, rest_obj, fabric_id, media_id) + create_payload = { + "Name": mparams["name"], + "MediaType": mparams["uplink_type"], + "Ports": [{"Id": port} for port in payload_port_list], + "Networks": [{"Id": net} for net in tagged_networks] + } + if mparams.get("untagged_network"): + untagged_id = validate_native_vlan(module, rest_obj, fabric_id, media_id) + create_payload["NativeVLAN"] = untagged_id + if mparams.get("ufd_enable"): + create_payload["UfdEnable"] = mparams.get("ufd_enable") + if mparams.get("description"): + create_payload["Description"] = mparams.get("description") + if module.check_mode: + module.exit_json(changed=True, msg=CHECK_MODE_MSG) + resp = rest_obj.invoke_request("POST", UPLINKS_URI.format(fabric_id=fabric_id), data=create_payload) + uplink_id = resp.json_data + if isinstance(resp.json_data, dict): + uplink_id, tmp = get_item_id(rest_obj, mparams["name"], UPLINKS_URI.format(fabric_id=fabric_id)) + if not uplink_id: + uplink_id = "" + module.exit_json(changed=True, msg="Successfully created the uplink.", uplink_id=uplink_id, + additional_info=resp.json_data) + module.exit_json(changed=True, msg="Successfully created the uplink.", uplink_id=uplink_id) + + +def delete_uplink(module, rest_obj, fabric_id, uplink_id): + if module.check_mode: + module.exit_json(changed=True, msg=CHECK_MODE_MSG) + rest_obj.invoke_request("DELETE", UPLINK_URI.format(fabric_id=fabric_id, uplink_id=uplink_id)) + module.exit_json(msg="Successfully deleted the uplink.", changed=True) + + +def modify_uplink(module, rest_obj, fabric_id, uplink, uplinks): + mparams = module.params + pload_keys = ["Id", "Name", "Description", "MediaType", "NativeVLAN", "UfdEnable", "Ports", "Networks"] + modify_payload = dict((pload_key, uplink.get(pload_key)) for pload_key in pload_keys) + port_list = list(port["Id"] for port in modify_payload["Ports"]) + modify_payload["Ports"] = sorted(list(set(port_list))) + network_list = list(network["Id"] for network in modify_payload["Networks"]) + modify_payload["Networks"] = sorted(network_list) + modify_data = {} + if mparams.get("new_name"): + modify_data["Name"] = mparams.get("new_name") + if mparams.get("description"): + modify_data["Description"] = mparams.get("description") + if mparams.get("ufd_enable"): + modify_data["UfdEnable"] = mparams.get("ufd_enable") + if mparams.get("uplink_type"): + if mparams.get("uplink_type") != uplink.get("MediaType"): + module.fail_json(msg="Uplink Type cannot be modified.") + modify_data["MediaType"] = mparams["uplink_type"] + if mparams.get("primary_switch_service_tag") or mparams.get("secondary_switch_service_tag"): + if mparams.get("primary_switch_service_tag") == mparams.get("secondary_switch_service_tag"): + module.fail_json(msg=SAME_SERVICE_TAG_MSG) + payload_port_list = validate_ioms(module, rest_obj, uplinks) + modify_data["Ports"] = sorted(list(set(payload_port_list))) + media_id, mtypes = get_item_id(rest_obj, uplink.get("MediaType"), MEDIA_TYPES) + if mparams.get("tagged_networks") and media_id: + tagged_networks = validate_networks(module, rest_obj, fabric_id, media_id) + modify_data["Networks"] = sorted(tagged_networks) + if mparams.get("untagged_network") and media_id: + untagged_id = validate_native_vlan(module, rest_obj, fabric_id, media_id) + modify_data["NativeVLAN"] = untagged_id + diff = recursive_diff(modify_data, modify_payload) + if diff and diff[0]: + modify_payload.update(diff[0]) + if module.check_mode: + module.exit_json(changed=True, msg=CHECK_MODE_MSG) + modify_payload["Ports"] = list({"Id": port} for port in modify_payload["Ports"]) + modify_payload["Networks"] = list({"Id": net} for net in modify_payload["Networks"]) + resp = rest_obj.invoke_request("PUT", UPLINK_URI.format(fabric_id=fabric_id, uplink_id=uplink['Id']), + data=modify_payload) + if isinstance(resp.json_data, dict): + module.exit_json(changed=True, msg="Successfully modified the uplink.", uplink_id=uplink['Id'], + additional_info=resp.json_data) + module.exit_json(changed=True, msg="Successfully modified the uplink.", uplink_id=uplink['Id']) + module.exit_json(msg=NO_CHANGES_MSG) + + +def main(): + specs = { + "state": {"choices": ['present', 'absent'], "default": "present"}, + "fabric_name": {"required": True, "type": "str"}, + "name": {"required": True, "type": "str"}, + "new_name": {"type": "str"}, + "description": {"type": "str"}, + "uplink_type": { + "choices": ['Ethernet', 'FCoE', 'FC Gateway', 'FC Direct Attach', 'Ethernet - No Spanning Tree']}, + "ufd_enable": {"choices": ['Enabled', 'Disabled']}, + "primary_switch_service_tag": {"type": "str"}, + "primary_switch_ports": {"type": "list", "elements": "str"}, + "secondary_switch_service_tag": {"type": "str"}, + "secondary_switch_ports": {"type": "list", "elements": "str"}, + "tagged_networks": {"type": "list", "elements": "str"}, + "untagged_network": {"type": "str"} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[['state', 'present', + ('new_name', 'description', 'uplink_type', 'ufd_enable', + 'primary_switch_service_tag', 'primary_switch_ports', 'secondary_switch_service_tag', + 'secondary_switch_ports', 'tagged_networks', 'untagged_network',), True]], + required_together=[["primary_switch_service_tag", "primary_switch_ports"], + ["secondary_switch_service_tag", "secondary_switch_ports"]], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + fabric_id, fabrics = get_item_id(rest_obj, module.params["fabric_name"], FABRIC_URI) + if not fabric_id: + module.fail_json(msg="Fabric with name {0} does not exist.".format(module.params["fabric_name"])) + uplink, uplinks = get_item_and_list(rest_obj, module.params["name"], + UPLINKS_URI.format(fabric_id=fabric_id) + '?$expand=Ports,Networks') + if module.params["state"] == "present": + if uplink: + uplinks.remove(uplink) + modify_uplink(module, rest_obj, fabric_id, uplink, uplinks) + create_uplink(module, rest_obj, fabric_id, uplinks) + else: + if uplink: + delete_uplink(module, rest_obj, fabric_id, uplink['Id']) + if module.check_mode: + module.exit_json(msg=NO_CHANGES_MSG) + module.exit_json(msg="Uplink {0} does not exist.".format(module.params["name"])) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, SSLError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py new file mode 100644 index 00000000..8c5fa98b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py @@ -0,0 +1,993 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_template +short_description: Create, modify, deploy, delete, export, import and clone a template on OpenManage Enterprise +version_added: "2.0.0" +description: "This module creates, modifies, deploys, deletes, exports, imports and clones a template on +OpenManage Enterprise." +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + command: + description: + - C(create) creates a new template. + - C(modify) modifies an existing template. + - C(deploy) creates a template-deployment job. + - C(delete) deletes an existing template. + - C(export) exports an existing template. + - C(import) creates a template from a specified configuration text in SCP XML format. + - C(clone) creates a clone of a existing template. + choices: [create, modify, deploy, delete, export, import, clone] + default: create + aliases: ['state'] + type: str + template_id: + description: + - ID of the existing template. + - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export). + - This option is mutually exclusive with I(template_name). + type: int + template_name: + description: + - Name of the existing template. + - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export). + - This option is mutually exclusive with I(template_id). + type: str + device_id: + description: + - >- + Specify the list of targeted device ID(s) when I(command) is C(deploy). When I (command) is C(create), + specify the ID of a single device. + - Either I(device_id) or I(device_service_tag) is mandatory or both can be applicable. + type: list + elements: int + default: [] + device_service_tag: + description: + - >- + Specify the list of targeted device service tags when I (command) is C(deploy). When I(command) is C(create), + specify the service tag of a single device. + - Either I(device_id) or I(device_service_tag) is mandatory or both can be applicable. + type: list + elements: str + default: [] + device_group_names: + description: + - Specify the list of groups when I (command) is C(deploy). + - Provide at least one of the mandatory options I(device_id), I(device_service_tag), or I(device_group_names). + type: list + elements: str + default: [] + template_view_type: + description: + - Select the type of view of the OME template. + - This is applicable when I(command) is C(create),C(clone) and C(import). + choices: [Deployment, Compliance, Inventory, Sample, None] + type: str + default: Deployment + attributes: + type: dict + description: + - >- + Payload data for the template operations. All the variables in this option are added as payload for C(create), + C(modify), C(deploy), C(import), and C(clone) operations. It takes the following attributes. + - >- + Attributes: List of dictionaries of attributes (if any) to be modified in the deployment template. This is + applicable when I(command) is C(deploy) and C(modify). Use the I(Id) If the attribute Id is available. + If not, use the comma separated I (DisplayName). For more details about using the I(DisplayName), + see the example provided. + - >- + Name: Name of the template. This is mandatory when I(command) is C(create), C(import), C(clone), and + optional when I(command) is C(modify). + - >- + Description: Description for the template. This is applicable when I(command) is C(create) or C(modify). + - >- + Fqdds: This allows to create a template using components from a specified reference server. One or more, of the + following values must be specified in a comma-separated string: iDRAC, System, BIOS, NIC, LifeCycleController, + RAID, and EventFilters. If none of the values are specified, the default value 'All' is selected. + This is applicable when I (command) is C(create). + - >- + Options: Options to control device shutdown or end power state post template deployment. This is applicable + for C(deploy) operation. + - >- + Schedule: Provides options to schedule the deployment task immediately, or at a specified time. This is + applicable when I(command) is C(deploy). + - >- + NetworkBootIsoModel: Payload to specify the ISO deployment details. This is applicable when I(command) is + C(deploy). + - >- + Content: The XML content of template. This is applicable when I(command) is C(import). + - >- + Type: Template type ID, indicating the type of device for which configuration is supported, such as chassis + and servers. This is applicable when I(command) is C(import). + - >- + TypeId: Template type ID, indicating the type of device for which configuration is supported, such as chassis + and servers. This is applicable when I(command) is C(create). + - >- + Refer OpenManage Enterprise API Reference Guide for more details. +requirements: + - "python >= 3.8.6" +author: "Jagadeesh N V (@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create a template from a reference device + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + device_id: 25123 + attributes: + Name: "New Template" + Description: "New Template description" + +- name: Modify template name, description, and attribute value + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "modify" + template_id: 12 + attributes: + Name: "New Custom Template" + Description: "Custom Template Description" + # Attributes to be modified in the template. + # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails + # This section is optional + Attributes: + - Id: 1234 + Value: "Test Attribute" + IsIgnored: false + +- name: Modify template name, description, and attribute using detailed view + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "modify" + template_id: 12 + attributes: + Name: "New Custom Template" + Description: "Custom Template Description" + Attributes: + # Enter the comma separated string as appearing in the Detailed view on GUI + # NIC -> NIC.Integrated.1-1-1 -> NIC Configuration -> Wake On LAN1 + - DisplayName: 'NIC, NIC.Integrated.1-1-1, NIC Configuration, Wake On LAN' + Value: Enabled + IsIgnored: false + # System -> LCD Configuration -> LCD 1 User Defined String for LCD + - DisplayName: 'System, LCD Configuration, LCD 1 User Defined String for LCD' + Value: LCD str by OMAM + IsIgnored: false + +- name: Deploy template on multiple devices + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + - 'SVTG456' + +- name: Deploy template on groups + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_group_names: + - server_group_1 + - server_group_2 + +- name: Deploy template on multiple devices along with the attributes values to be modified on the target devices + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + attributes: + # Device specific attributes to be modified during deployment. + # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails + # This section is optional + Attributes: + # specific device where attribute to be modified at deployment run-time. + # The DeviceId should be mentioned above in the 'device_id' section. + # Service tags not allowed. + - DeviceId: 12765 + Attributes: + - Id : 15645 + Value : "0.0.0.0" + IsIgnored : false + - DeviceId: 10173 + Attributes: + - Id : 18968, + Value : "hostname-1" + IsIgnored : false + +- name: Deploy template and Operating System (OS) on multiple devices + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + device_service_tag: + - 'SVTG123' + attributes: + # Include this to install OS on the devices. + # This section is optional + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "NFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + +- name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed, +install OS using its image" + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + - 10173 + device_service_tag: + - 'SVTG123' + - 'SVTG456' + attributes: + Attributes: + - DeviceId: 12765 + Attributes: + - Id : 15645 + Value : "0.0.0.0" + IsIgnored : false + - DeviceId: 10173 + Attributes: + - Id : 18968, + Value : "hostname-1" + IsIgnored : false + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "NFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + +- name: Delete template + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "delete" + template_id: 12 + +- name: Export a template + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + template_id: 12 + +# Start of example to export template to a local xml file +- name: Export template to a local xml file + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "export" + template_name: "my_template" + register: result +- name: Save template into a file + ansible.builtin.copy: + content: "{{ result.Content}}" + dest: "/path/to/exported_template.xml" +# End of example to export template to a local xml file + +- name: Clone a template + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "clone" + template_id: 12 + attributes: + Name: "New Cloned Template Name" + +- name: Import template from XML content + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + attributes: + Name: "Imported Template Name" + # Template Type from TemplateService/TemplateTypes + Type: 2 + # xml string content + Content: "\n\nTrue\nClear\n + \n\nReady + \nNo\n\n + \nReady\n + No\n\n\n" + +- name: Import template from local XML file + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + attributes: + Name: "Imported Template Name" + Type: 2 + Content: "{{ lookup('ansible.builtin.file', '/path/to/xmlfile') }}" + +- name: "Deploy template and Operating System (OS) on multiple devices." + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "deploy" + template_id: 12 + device_id: + - 12765 + device_service_tag: + - 'SVTG123' + attributes: + # Include this to install OS on the devices. + # This section is optional + NetworkBootIsoModel: + BootToNetwork: true + ShareType: "CIFS" + IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours + IsoPath: "/home/iso_path/filename.iso" + ShareDetail: + IpAddress: "192.168.0.2" + ShareName: "sharename" + User: "share_user" + Password: "share_password" + Options: + EndHostPowerState: 1 + ShutdownType: 0 + TimeToWaitBeforeShutdown: 300 + Schedule: + RunLater: true + RunNow: false + +- name: Create a compliance template from reference device + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "create" + device_service_tag: + - "SVTG123" + template_view_type: "Compliance" + attributes: + Name: "Configuration Compliance" + Description: "Configuration Compliance Template" + Fqdds: "BIOS" + +- name: Import a compliance template from XML file + dellemc.openmanage.ome_template: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "import" + template_view_type: "Compliance" + attributes: + Name: "Configuration Compliance" + Content: "{{ lookup('ansible.builtin.file', './test.xml') }}" + Type: 2 +''' + +RETURN = r''' +--- +msg: + description: Overall status of the template operation. + returned: always + type: str + sample: "Successfully created a template with ID 23" +return_id: + description: ID of the template for C(create), C(modify), C(import) and C(clone) or task created in case of C(deploy). + returned: success, when I(command) is C(create), C(modify), C(import), C(clone) and C(deploy) + type: int + sample: 12 +TemplateId: + description: ID of the template for C(export). + returned: success, when I(command) is C(export) + type: int + sample: 13 +Content: + description: XML content of the exported template. This content can be written to a xml file. + returned: success, when I(command) is C(export) + type: str + sample: "\n\nTrue\nClear\n\n + \nReady\nNo + \n\n\nReady + \nNo\n\n" +devices_assigned: + description: Mapping of devices with the templates already deployed on them. + returned: I(command) is C(deploy) + type: dict + sample: { + "10362": 28, + "10312": 23 + } +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import apply_diff_key + + +TEMPLATES_URI = "TemplateService/Templates" +TEMPLATE_PATH = "TemplateService/Templates({template_id})" +TEMPLATE_ACTION = "TemplateService/Actions/TemplateService.{op}" +TEMPLATE_ATTRIBUTES = "TemplateService/Templates({template_id})/AttributeDetails" +DEVICE_URI = "DeviceService/Devices" +GROUP_URI = "GroupService/Groups" +PROFILE_URI = "ProfileService/Profiles" +SEPRTR = ',' +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +TEMPLATE_NAME_EXISTS = "Template with name '{name}' already exists." +DEPLOY_DEV_ASSIGNED = "The device(s) '{dev}' have been assigned the template(s) '{temp}' " \ + "respectively. Please unassign the profiles from the devices." + + +def get_profiles(rest_obj): + try: + resp = rest_obj.invoke_request('GET', PROFILE_URI) + profile_list = resp.json_data.get("value") + except Exception: + profile_list = [] + return profile_list + + +def get_group_devices_all(rest_obj, uri): + total_items = [] + next_link = uri + while next_link: + resp = rest_obj.invoke_request('GET', next_link) + data = resp.json_data + total_items.extend(data.get("value", [])) + next_link_list = str(data.get('@odata.nextLink', '')).split('/api') + next_link = next_link_list[-1] + return total_items + + +def get_group(rest_obj, module, group_name): + query_param = {"$filter": "Name eq '{0}'".format(group_name)} + group_req = rest_obj.invoke_request("GET", GROUP_URI, query_param=query_param) + for grp in group_req.json_data.get('value'): + if grp['Name'] == group_name: + return grp + module.fail_json(msg="Group name '{0}' is invalid. Please provide a valid group name.".format(group_name)) + + +def get_group_details(rest_obj, module): + group_name_list = module.params.get('device_group_names') + device_ids = [] + for group_name in group_name_list: + group = get_group(rest_obj, module, group_name) + group_uri = GROUP_URI + "({0})/Devices".format(group['Id']) + group_device_list = get_group_devices_all(rest_obj, group_uri) + device_ids.extend([dev['Id'] for dev in group_device_list]) + return device_ids + + +def get_device_ids(module, rest_obj): + """Getting the list of device ids filtered from the device inventory.""" + target_ids = [] + if module.params.get('device_service_tag') or module.params.get('device_id'): + # device_list = get_group_devices_all(rest_obj, DEVICE_URI) + device_list = rest_obj.get_all_report_details(DEVICE_URI)['report_list'] + device_tag_id_map = dict([(device.get('DeviceServiceTag'), device.get('Id')) for device in device_list]) + device_id = module.params.get('device_id') + invalid_ids = set(device_id) - set(device_tag_id_map.values()) + if invalid_ids: + fail_module(module, msg="Unable to complete the operation because the entered target device" + " id(s) '{0}' are invalid.".format(",".join(list(map(str, set(invalid_ids)))))) + target_ids.extend(device_id) + service_tags = module.params.get('device_service_tag') + invalid_tags = set(service_tags) - set(device_tag_id_map.keys()) + if invalid_tags: + fail_module(module, msg="Unable to complete the operation because the entered target service" + " tag(s) '{0}' are invalid.".format(",".join(set(invalid_tags)))) + for tag in service_tags: # append ids for service tags + target_ids.append(device_tag_id_map.get(tag)) + if module.params.get('device_group_names'): + target_ids.extend(get_group_details(rest_obj, module)) + return list(set(target_ids)) # set to eliminate duplicates + + +def get_view_id(rest_obj, viewstr): + resp = rest_obj.invoke_request('GET', "TemplateService/TemplateViewTypes") + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get('Description', "") == viewstr: + return xtype.get('Id') + viewmap = {"Deployment": 2, "Compliance": 1, "Inventory": 3, "Sample": 4, "None": 0} + return viewmap.get(viewstr) + + +def get_type_id_valid(rest_obj, typeid): + resp = rest_obj.invoke_request('GET', "TemplateService/TemplateTypes") + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get('Id') == typeid: + return True + return False + + +def get_template_by_name(template_name, module, rest_obj): + template = {} + template_path = TEMPLATES_URI + query_param = {"$filter": "Name eq '{0}'".format(template_name)} + template_req = rest_obj.invoke_request("GET", template_path, query_param=query_param) + for each in template_req.json_data.get('value'): + if each['Name'] == template_name: + template = each + break + return template + + +def recurse_subattr_list(subgroup, prefix, attr_detailed, attr_map, adv_list): + if isinstance(subgroup, list): + for each_sub in subgroup: + nprfx = "{0}{1}{2}".format(prefix, SEPRTR, each_sub.get("DisplayName")) + if each_sub.get("SubAttributeGroups"): + recurse_subattr_list(each_sub.get("SubAttributeGroups"), nprfx, attr_detailed, attr_map, adv_list) + else: + for attr in each_sub.get('Attributes'): + attr['prefix'] = nprfx + # case sensitive, remove whitespaces for optim + constr = "{0}{1}{2}".format(nprfx, SEPRTR, attr['DisplayName']) + if constr in adv_list: + attr_detailed[constr] = attr['AttributeId'] + attr_map[attr['AttributeId']] = attr + + +def get_subattr_all(attr_dtls, adv_list): + attr_detailed = {} + attr_map = {} + for each in attr_dtls: + recurse_subattr_list(each.get('SubAttributeGroups'), each.get('DisplayName'), attr_detailed, attr_map, adv_list) + return attr_detailed, attr_map + + +def attributes_check(module, rest_obj, inp_attr, template_id): + diff = 0 + try: + resp = rest_obj.invoke_request("GET", TEMPLATE_ATTRIBUTES.format(template_id=template_id)) + attr_dtls = resp.json_data + disp_adv_list = inp_attr.get("Attributes", {}) + adv_list = [] + for attr in disp_adv_list: + if attr.get("DisplayName"): + split_k = str(attr.get("DisplayName")).split(SEPRTR) + trimmed = map(str.strip, split_k) + n_k = SEPRTR.join(trimmed) + adv_list.append(n_k) + attr_detailed, attr_map = get_subattr_all(attr_dtls.get('AttributeGroups'), adv_list) + payload_attr = inp_attr.get("Attributes", []) + rem_attrs = [] + for attr in payload_attr: + if attr.get("DisplayName"): + split_k = str(attr.get("DisplayName")).split(SEPRTR) + trimmed = map(str.strip, split_k) + n_k = SEPRTR.join(trimmed) + id = attr_detailed.get(n_k, "") + attr['Id'] = id + attr.pop("DisplayName", None) + else: + id = attr.get('Id') + if id: + ex_val = attr_map.get(id, {}) + if not ex_val: + rem_attrs.append(attr) + continue + if attr.get('Value') != ex_val.get("Value") or attr.get('IsIgnored') != ex_val.get("IsIgnored"): + diff = diff + 1 + for rem in rem_attrs: + payload_attr.remove(rem) + # module.exit_json(attr_detailed=attr_detailed, inp_attr=disp_adv_list, payload_attr=payload_attr, adv_list=adv_list) + except Exception: + diff = 1 + return diff + + +def get_create_payload(module, rest_obj, deviceid, view_id): + create_payload = {"Fqdds": "All", + "ViewTypeId": view_id} + attrib_dict = module.params.get("attributes").copy() + if isinstance(attrib_dict, dict): + typeid = attrib_dict.get("Type") if attrib_dict.get("Type") else attrib_dict.get("TypeId") + if typeid: + create_payload["TypeId"] = typeid + attrib_dict.pop("Type", None) # remove if exists as it is not required for create payload + create_payload.update(attrib_dict) + template = get_template_by_name(attrib_dict.get("Name"), module, rest_obj) + if template: + module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=attrib_dict.get("Name"))) + create_payload["SourceDeviceId"] = int(deviceid) + return create_payload + + +def get_modify_payload(module, rest_obj, template_dict): + modify_payload = {} + attrib_dict = module.params.get("attributes") + attrib_dict['Id'] = template_dict.get('Id') + modify_payload["Name"] = template_dict["Name"] + diff = 0 + if attrib_dict.get("Name", template_dict["Name"]) != template_dict["Name"]: + template = get_template_by_name(attrib_dict.get("Name"), module, rest_obj) + if template: + module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=attrib_dict.get("Name"))) + modify_payload["Name"] = attrib_dict.get("Name") + diff = diff + 1 + modify_payload["Description"] = template_dict["Description"] + diff = diff + apply_diff_key(attrib_dict, modify_payload, ["Description"]) + # check attributes + if attrib_dict.get("Attributes"): + diff = diff + attributes_check(module, rest_obj, attrib_dict, template_dict.get('Id')) + + if not diff: + module.exit_json(msg=NO_CHANGES_MSG) + if isinstance(attrib_dict, dict): + modify_payload.update(attrib_dict) + # module.exit_json(attrib_dict=attrib_dict, modify_payload=modify_payload) + return modify_payload + + +def get_deploy_payload(module_params, deviceidlist, template_id): + deploy_payload = {} + if isinstance(module_params.get("attributes"), dict): + deploy_payload.update(module_params.get("attributes")) + deploy_payload["Id"] = template_id + deploy_payload["TargetIds"] = deviceidlist + return deploy_payload + + +def get_import_payload(module, rest_obj, view_id): + attrib_dict = module.params.get("attributes").copy() + import_payload = {} + import_payload["Name"] = attrib_dict.pop("Name") + template = get_template_by_name(import_payload["Name"], module, rest_obj) + if template: + module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=import_payload["Name"])) + import_payload["ViewTypeId"] = view_id + import_payload["Type"] = 2 + typeid = attrib_dict.get("Type") if attrib_dict.get("Type") else attrib_dict.get("TypeId") + if typeid: + if get_type_id_valid(rest_obj, typeid): + import_payload["Type"] = typeid # Type is mandatory for import + else: + fail_module(module, msg="Type provided for 'import' operation is invalid") + import_payload["Content"] = attrib_dict.pop("Content") + if isinstance(attrib_dict, dict): + attrib_dict.pop("TypeId", None) # remove if exists as it is not required for import payload + import_payload.update(attrib_dict) + return import_payload + + +def get_clone_payload(module, rest_obj, template_id, view_id): + attrib_dict = module.params.get("attributes").copy() + clone_payload = {} + clone_payload["SourceTemplateId"] = template_id + clone_payload["NewTemplateName"] = attrib_dict.pop("Name") + template = get_template_by_name(clone_payload["NewTemplateName"], module, rest_obj) + if template: + module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=clone_payload["NewTemplateName"])) + clone_payload["ViewTypeId"] = view_id + if isinstance(attrib_dict, dict): + clone_payload.update(attrib_dict) + return clone_payload + + +def get_template_by_id(module, rest_obj, template_id): + path = TEMPLATE_PATH.format(template_id=template_id) + template_req = rest_obj.invoke_request("GET", path) + if template_req.success: + return template_req.json_data + else: + fail_module(module, msg="Unable to complete the operation because the" + " requested template is not present.") + + +def get_template_details(module, rest_obj): + id = module.params.get('template_id') + query_param = {"$filter": "Id eq {0}".format(id)} + srch = 'Id' + if not id: + id = module.params.get('template_name') + query_param = {"$filter": "Name eq '{0}'".format(id)} + srch = 'Name' + template = {} + resp = rest_obj.invoke_request('GET', TEMPLATES_URI, query_param=query_param) + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get(srch) == id: + template = xtype + return template + + +def _get_resource_parameters(module, rest_obj): + command = module.params.get("command") + rest_method = 'POST' + payload = {} + template = get_template_details(module, rest_obj) + template_id = template.get('Id') + # template_name = template.get('Name') + if command not in ["import", "create", "delete"] and not template: + fail_module(module, msg="Enter a valid template_name or template_id") + if command == "create": + devid_list = get_device_ids(module, rest_obj) + if len(devid_list) != 1: + fail_module(module, msg="Create template requires only one reference device") + view_id = get_view_id(rest_obj, module.params['template_view_type']) + payload = get_create_payload(module, rest_obj, devid_list[0], view_id) + path = TEMPLATES_URI + elif command == 'import': + view_id = get_view_id(rest_obj, module.params['template_view_type']) + path = TEMPLATE_ACTION.format(op="Import") + payload = get_import_payload(module, rest_obj, view_id) + elif command == "delete": + if not template: + module.exit_json(msg=NO_CHANGES_MSG) + path = TEMPLATE_PATH.format(template_id=template_id) + rest_method = 'DELETE' + elif command == "modify": + path = TEMPLATE_PATH.format(template_id=template_id) + template_dict = get_template_by_id(module, rest_obj, template_id) + payload = get_modify_payload(module, rest_obj, template_dict) + rest_method = 'PUT' + elif command == "export": + path = TEMPLATE_ACTION.format(op="Export") + payload = {'TemplateId': template_id} + elif command == "deploy": + devid_list = get_device_ids(module, rest_obj) + if not devid_list: + fail_module(module, msg="There are no devices provided for deploy operation") + profile_list = get_profiles(rest_obj) + dev_temp_map = {} + for prof in profile_list: + target = prof["TargetId"] + if prof["ProfileState"] > 0 and target in devid_list: + if template_id == prof['TemplateId']: # already same template deployed + devid_list.remove(target) + else: + dev_temp_map[prof["TargetId"]] = prof['TemplateId'] + if dev_temp_map: + module.exit_json(devices_assigned=dev_temp_map, + msg=DEPLOY_DEV_ASSIGNED.format(dev=','.join(map(str, dev_temp_map.keys())), + temp=','.join(map(str, dev_temp_map.values())))) + if not devid_list: + module.exit_json(msg=NO_CHANGES_MSG) + path = TEMPLATE_ACTION.format(op="Deploy") + payload = get_deploy_payload(module.params, devid_list, template_id) + elif command == "clone": + view_id = get_view_id(rest_obj, module.params['template_view_type']) + path = TEMPLATE_ACTION.format(op="Clone") + payload = get_clone_payload(module, rest_obj, template_id, view_id) + if module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + return path, payload, rest_method + + +def _validate_inputs(module): + """validates input parameters""" + command = module.params.get("command") + if command in ["create", "deploy"]: + dev_id = module.params["device_id"] + dev_st = module.params["device_service_tag"] + if None in dev_id or None in dev_st: + fail_module(module, msg="Argument device_id or device_service_tag has null values") + attrib_dict = {} + if module.params.get("attributes"): + attrib_dict = module.params.get("attributes") + if command in ["import", "clone", "create"]: + if not attrib_dict.get("Name"): + fail_module(module, msg="Argument 'Name' required in attributes for {0} operation".format(command)) + if command == "import": + if not attrib_dict.get("Content"): + fail_module(module, msg="Argument 'Content' required in attributes for {0} operation".format(command)) + + +def password_no_log(attributes): + if isinstance(attributes, dict): + netdict = attributes.get("NetworkBootIsoModel") + if isinstance(netdict, dict): + sharedet = netdict.get("ShareDetail") + if isinstance(sharedet, dict) and 'Password' in sharedet: + sharedet['Password'] = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + + +def fail_module(module, **failmsg): + password_no_log(module.params.get("attributes")) + module.fail_json(**failmsg) + + +def exit_module(module, response): + password_no_log(module.params.get("attributes")) + resp = None + my_change = True + command = module.params.get('command') + result = {} + if command in ["create", "modify", "deploy", "import", "clone"]: + result["return_id"] = response.json_data + resp = result["return_id"] + if command == 'deploy' and result["return_id"] == 0: + result["failed"] = True + command = 'deploy_fail' + my_change = False + if command == 'export': + my_change = False + result = response.json_data + msg_dict = {'create': "Successfully created a template with ID {0}".format(resp), + 'modify': "Successfully modified the template with ID {0}".format(resp), + 'deploy': "Successfully created the template-deployment job with ID {0}".format(resp), + 'deploy_fail': 'Failed to deploy template.', + 'delete': "Deleted successfully", + 'export': "Exported successfully", + 'import': "Imported successfully", + 'clone': "Cloned successfully"} + module.exit_json(msg=msg_dict.get(command), changed=my_change, **result) + + +def main(): + specs = { + "command": {"required": False, "default": "create", "aliases": ['state'], + "choices": ['create', 'modify', 'deploy', 'delete', 'export', 'import', 'clone']}, + "template_id": {"required": False, "type": 'int'}, + "template_name": {"required": False, "type": 'str'}, + "template_view_type": {"required": False, "default": 'Deployment', + "choices": ['Deployment', 'Compliance', 'Inventory', 'Sample', 'None']}, + "device_id": {"required": False, "type": 'list', "default": [], "elements": 'int'}, + "device_service_tag": {"required": False, "type": 'list', "default": [], "elements": 'str'}, + "device_group_names": {"required": False, "type": 'list', "default": [], "elements": 'str'}, + "attributes": {"required": False, "type": 'dict'}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_if=[ + ['command', 'create', ['attributes']], + ['command', 'modify', ['attributes']], + ['command', 'import', ['attributes']], + ['command', 'modify', ['template_id', 'template_name'], True], + ['command', 'delete', ['template_id', 'template_name'], True], + ['command', 'export', ['template_id', 'template_name'], True], + ['command', 'clone', ['template_id', 'template_name'], True], + ['command', 'deploy', ['template_id', 'template_name'], True], + ['command', 'deploy', ['device_id', 'device_service_tag', 'device_group_names'], True], + ], + mutually_exclusive=[["template_id", "template_name"]], + supports_check_mode=True) + + try: + _validate_inputs(module) + with RestOME(module.params, req_session=True) as rest_obj: + path, payload, rest_method = _get_resource_parameters(module, rest_obj) + # module.exit_json(payload=payload, path=path) + resp = rest_obj.invoke_request(rest_method, path, data=payload) + if resp.success: + exit_module(module, resp) + except HTTPError as err: + fail_module(module, msg=str(err), error_info=json.load(err)) + except URLError as err: + password_no_log(module.params.get("attributes")) + module.exit_json(msg=str(err), unreachable=True) + except (IOError, SSLError, SSLValidationError, ConnectionError, TypeError, ValueError, KeyError, OSError) as err: + fail_module(module, msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py new file mode 100644 index 00000000..701874f7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_template_identity_pool +short_description: Attach or detach an identity pool to a requested template on OpenManage Enterprise +version_added: "2.0.0" +description: This module allows to- + - Attach an identity pool to a requested template on OpenManage Enterprise. + - Detach an identity pool from a requested template on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + template_name: + description: Name of the template to which an identity pool is attached or detached. + type: str + required: true + identity_pool_name: + description: Name of the identity pool. + - To attach an identity pool to a template, provide the name of the identity pool. + - This option is not applicable when detaching an identity pool from a template. + type: str +requirements: + - "python >= 3.8.6" +author: "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Attach an identity pool to a template + dellemc.openmanage.ome_template_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_name: template_name + identity_pool_name: identity_pool_name + +- name: Detach an identity pool from a template + dellemc.openmanage.ome_template_identity_pool: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_name: template_name +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall identity pool status of the attach or detach operation. + returned: always + sample: Successfully attached identity pool to template. +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "GEN1234", + "RelatedProperties": [], + "Message": "Unable to process the request because an error occurred.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Retry the operation. If the issue persists, contact your system administrator." + } + ] + } + } +''' + +import json +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ssl import SSLError + +CONFIG_URI = "TemplateService/Actions/TemplateService.UpdateNetworkConfig" +TEMPLATE_URI = "TemplateService/Templates" +IDENTITY_URI = "IdentityPoolService/IdentityPools" +TEMPLATE_ATTRIBUTE_VIEW = "TemplateService/Templates({template_id})/Views(4)/AttributeViewDetails" +KEY_ATTR_NAME = 'DisplayName' +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." + + +def get_template_vlan_info(rest_obj, template_id): + nic_bonding_tech = "" + try: + resp = rest_obj.invoke_request('GET', TEMPLATE_ATTRIBUTE_VIEW.format(template_id=template_id)) + if resp.success: + nic_model = resp.json_data.get('AttributeGroups', []) + for xnic in nic_model: + if xnic.get(KEY_ATTR_NAME) == "NicBondingTechnology": + nic_bonding_list = xnic.get("Attributes", []) + for xbnd in nic_bonding_list: + if xbnd.get(KEY_ATTR_NAME).lower() == "nic bonding technology": + nic_bonding_tech = xbnd.get('Value') + except Exception: + nic_bonding_tech = "" + return nic_bonding_tech + + +def get_template_id(rest_obj, module): + """Get template id based on requested template name.""" + template_name = module.params["template_name"] + query_param = {"$filter": "Name eq '{0}'".format(template_name)} + template_req = rest_obj.invoke_request("GET", TEMPLATE_URI, query_param=query_param) + for each in template_req.json_data.get('value'): + if each['Name'] == template_name: + template = each + break + else: + module.fail_json(msg="Unable to complete the operation because the requested template" + " with name '{0}' is not present.".format(template_name)) + return template + + +def get_identity_id(rest_obj, module): + """Get identity pool id based on requested identity pool name.""" + identity_name = module.params["identity_pool_name"] + resp = rest_obj.get_all_report_details(IDENTITY_URI) + for each in resp["report_list"]: + if each['Name'] == identity_name: + identity_id = each['Id'] + break + else: + module.fail_json(msg="Unable to complete the operation because the requested identity" + " pool with name '{0}' is not present.".format(identity_name)) + return identity_id + + +def main(): + specs = { + "template_name": {"required": True, "type": "str"}, + "identity_pool_name": {"required": False, "type": "str"}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + template = get_template_id(rest_obj, module) + template_id = template["Id"] + identity_id, message = 0, "Successfully detached identity pool from template." + if module.params["identity_pool_name"] is not None: + identity_id = get_identity_id(rest_obj, module) + message = "Successfully attached identity pool to template." + nic_bonding_tech = get_template_vlan_info(rest_obj, template_id) + payload = {"TemplateId": template_id, "IdentityPoolId": identity_id, "BondingTechnology": nic_bonding_tech} + if template["IdentityPoolId"] == identity_id: + module.exit_json(changed=False, msg=NO_CHANGES_FOUND) + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND) + resp = rest_obj.invoke_request("POST", CONFIG_URI, data=payload) + if resp.status_code == 200: + module.exit_json(msg=message, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (ValueError, TypeError, ConnectionError, SSLError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py new file mode 100644 index 00000000..e233c5ac --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_template_info +short_description: Retrieves template details from OpenManage Enterprise +version_added: "2.0.0" +description: + - This module retrieves the list and details of all the templates on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + template_id: + description: Unique Id of the template. + type: int + system_query_options: + description: Options for pagination of the output. + type: dict + suboptions: + filter: + description: Filter records by the supported values. + type: str +requirements: + - "python >= 3.8.6" +author: "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). + +''' + +EXAMPLES = r''' +--- +- name: Retrieve basic details of all templates + dellemc.openmanage.ome_template_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + +- name: Retrieve details of a specific template identified by its template ID + dellemc.openmanage.ome_template_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_id: 1 + +- name: Get filtered template info based on name + dellemc.openmanage.ome_template_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + filter: "Name eq 'new template'" +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall template facts status. + returned: on error + sample: "Failed to fetch the template facts" +template_info: + type: dict + description: Details of the templates. + returned: success + sample: { + "192.168.0.1": { + "CreatedBy": "system", + "CreationTime": "1970-01-31 00:00:56.372144", + "Description": "Tune workload for Performance Optimized Virtualization", + "HasIdentityAttributes": false, + "Id": 1, + "IdentityPoolId": 0, + "IsBuiltIn": true, + "IsPersistencePolicyValid": false, + "IsStatelessAvailable": false, + "LastUpdatedBy": null, + "LastUpdatedTime": "1970-01-31 00:00:56.372144", + "Name": "iDRAC Enable Performance Profile for Virtualization", + "SourceDeviceId": 0, + "Status": 0, + "TaskId": 0, + "TypeId": 2, + "ViewTypeId": 4 + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def _get_query_parameters(module_params): + """Builds query parameter. + + :return: dict + :example: {"$filter": Name eq 'template name'} + """ + system_query_param = module_params.get("system_query_options") + query_param = {} + if system_query_param: + query_param = dict([("$" + k, v) for k, v in system_query_param.items() if v is not None]) + return query_param + + +def main(): + specs = { + "template_id": {"type": 'int', "required": False}, + "system_query_options": {"required": False, "type": 'dict', + "options": {"filter": {"type": 'str', "required": False}} + }, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[['template_id', 'system_query_options']], + supports_check_mode=True + ) + template_uri = "TemplateService/Templates" + try: + with RestOME(module.params, req_session=True) as rest_obj: + query_param = None + if module.params.get("template_id") is not None: + # Fetch specific template + template_id = module.params.get("template_id") + template_path = "{0}({1})".format(template_uri, template_id) + elif module.params.get("system_query_options") is not None: + # Fetch all the templates based on Name + query_param = _get_query_parameters(module.params) + template_path = template_uri + else: + # Fetch all templates + template_path = template_uri + resp = rest_obj.invoke_request('GET', template_path, query_param=query_param) + template_facts = resp.json_data + if resp.status_code == 200: + module.exit_json(template_info={module.params["hostname"]: template_facts}) + else: + module.fail_json(msg="Failed to fetch the template facts") + except HTTPError as err: + module.fail_json(msg=json.load(err)) + except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py new file mode 100644 index 00000000..987a8b61 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py @@ -0,0 +1,448 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_template_network_vlan +short_description: Set tagged and untagged vlans to native network card supported by a template on OpenManage Enterprise +version_added: "2.0.0" +description: "This module allows to set tagged and untagged vlans to native network card supported by a template +on OpenManage Enterprise." +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + template_name: + description: + - Name of the template. + - It is mutually exclusive with I(template_id). + type: str + template_id: + description: + - Id of the template. + - It is mutually exclusive with I(template_name). + type: int + nic_identifier: + description: Display name of NIC port in the template for VLAN configuration. + required: true + type: str + propagate_vlan: + description: + - To deploy the modified VLAN settings immediately without rebooting the server. + - This option will be applied only when there are changes to the VLAN configuration. + default: true + type: bool + version_added: 3.4.0 + untagged_networks: + description: List of untagged networks and their corresponding NIC ports. + elements: dict + type: list + suboptions: + port: + description: NIC port number of the untagged VLAN. + required: true + type: int + untagged_network_id: + description: + - ID of the untagged VLAN + - Enter 0 to clear the untagged VLAN from the port. + - This option is mutually exclusive with I(untagged_network_name) + - To get the VLAN network ID use the API U( https://I(hostname)/api/NetworkConfigurationService/Networks) + type: int + untagged_network_name: + description: + - name of the vlan for untagging + - provide 0 for clearing the untagging for this I(port) + - This parameter is mutually exclusive with I(untagged_network_id) + type: str + tagged_networks: + description: List of tagged VLANs and their corresponding NIC ports. + type: list + elements: dict + suboptions: + port: + description: NIC port number of the tagged VLAN + required: true + type: int + tagged_network_ids: + description: + - List of IDs of the tagged VLANs + - Enter [] to remove the tagged VLAN from a port. + - List of I(tagged_network_ids) is combined with list of I(tagged_network_names) when adding tagged VLANs to a port. + - To get the VLAN network ID use the API U( https://I(hostname)/api/NetworkConfigurationService/Networks) + type: list + elements: int + tagged_network_names: + description: + - List of names of tagged VLANs + - Enter [] to remove the tagged VLAN from a port. + - List of I(tagged_network_names) is combined with list of I(tagged_network_ids) when adding tagged VLANs to a port. + type: list + elements: str +requirements: + - "python >= 3.8.6" +author: + - "Jagadeesh N V(@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Add tagged or untagged VLANs to a template using VLAN ID and name + dellemc.openmanage.ome_template_network_vlan: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_id: 78 + nic_identifier: NIC Slot 4 + untagged_networks: + - port: 1 + untagged_network_id: 127656 + - port: 2 + untagged_network_name: vlan2 + tagged_networks: + - port: 1 + tagged_network_ids: + - 12767 + - 12768 + - port: 4 + tagged_network_ids: + - 12767 + - 12768 + tagged_network_names: + - vlan3 + - port: 2 + tagged_network_names: + - vlan4 + - vlan1 + +- name: Clear the tagged and untagged VLANs from a template + dellemc.openmanage.ome_template_network_vlan: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + template_id: 78 + nic_identifier: NIC Slot 4 + untagged_networks: + # For removing the untagged VLANs for the port 1 and 2 + - port: 1 + untagged_network_id: 0 + - port: 2 + untagged_network_name: 0 + tagged_networks: + # For removing the tagged VLANs for port 1, 4 and 2 + - port: 1 + tagged_network_ids: [] + - port: 4 + tagged_network_ids: [] + tagged_network_names: [] + - port: 2 + tagged_network_names: [] +''' + +RETURN = r''' +--- +msg: + type: str + description: Overall status of the template vlan operation. + returned: always + sample: "Successfully applied the network settings to template." +error_info: + description: Details of the HTTP Error. + returned: on HTTP error + type: dict + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to complete the request because + TemplateId does not exist or is not applicable for the + resource URI.", + "MessageArgs": [ + "TemplateId" + ], + "MessageId": "CGEN1004", + "RelatedProperties": [], + "Resolution": "Check the request resource URI. Refer to + the OpenManage Enterprise-Modular User's Guide for more + information about resource URI and its properties.", + "Severity": "Critical" + } + ], + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +NETWORK_HIERARCHY_VIEW = 4 # For Network hierarchy View in a Template +UPDATE_NETWORK_CONFIG = "TemplateService/Actions/TemplateService.UpdateNetworkConfig" +TEMPLATE_ATTRIBUTE_VIEW = "TemplateService/Templates({0})/Views({1}" \ + ")/AttributeViewDetails" +VLAN_NETWORKS = "NetworkConfigurationService/Networks?$top=9999" +TEMPLATE_VIEW = "TemplateService/Templates" # Add ?$top=9999 if not query +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +SUCCESS_MSG = "Successfully applied the network settings to the template." +KEY_ATTR_NAME = 'DisplayName' +SUB_GRP_ATTR_NAME = 'SubAttributeGroups' +GRP_ATTR_NAME = 'Attributes' +GRP_NAME_ID_ATTR_NAME = 'GroupNameId' +CUSTOM_ID_ATTR_NAME = 'CustomId' + + +def get_template_details(module, rest_obj): + id = module.params.get('template_id') + query_param = {"$filter": "Id eq {0}".format(id)} + srch = 'Id' + if not id: + id = module.params.get('template_name') + query_param = {"$filter": "Name eq '{0}'".format(id)} + srch = 'Name' + resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param) + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + if xtype.get(srch) == id: + return xtype + module.fail_json(msg="Template with {0} '{1}' not found.".format(srch, id)) + + +def get_vlan_name_id_map(rest_obj): + k = "Name" + v = "Id" + d = {} + resp = rest_obj.invoke_request('GET', VLAN_NETWORKS) + if resp.success and resp.json_data.get('value'): + tlist = resp.json_data.get('value', []) + for xtype in tlist: + d[xtype[k]] = xtype[v] + return d + + +def get_template_vlan_info(module, rest_obj, template_id): + port_id_map = {} + port_untagged_map = {} + port_tagged_map = {} + port_nic_bond_map = {} + nic_bonding_tech = "" + resp = rest_obj.invoke_request('GET', TEMPLATE_ATTRIBUTE_VIEW.format( + template_id, NETWORK_HIERARCHY_VIEW)) + if resp.success: + nic_id = module.params.get("nic_identifier") + nic_model = resp.json_data.get('AttributeGroups', []) + # nic_group = nic_model[0]['SubAttributeGroups'] + for xnic in nic_model: + if xnic.get(KEY_ATTR_NAME) == "NICModel": + nic_group = xnic.get('SubAttributeGroups', []) + if xnic.get(KEY_ATTR_NAME) == "NicBondingTechnology": + nic_bonding_list = xnic.get("Attributes", []) + for xbnd in nic_bonding_list: + if xbnd.get(KEY_ATTR_NAME).lower() == "nic bonding technology": + nic_bonding_tech = xbnd.get('Value') + nic_found = False + for nic in nic_group: + if nic_id == nic.get(KEY_ATTR_NAME): + nic_found = True + for port in nic.get(SUB_GRP_ATTR_NAME): # ports + for partition in port.get(SUB_GRP_ATTR_NAME): # partitions + for attribute in partition.get(GRP_ATTR_NAME): # attributes + if attribute.get(CUSTOM_ID_ATTR_NAME) != 0: + port_number = port.get(GRP_NAME_ID_ATTR_NAME) + port_id_map[port_number] = attribute.get(CUSTOM_ID_ATTR_NAME) + if attribute.get(KEY_ATTR_NAME).lower() == "vlan untagged": + port_untagged_map[port_number] = int(attribute['Value']) + if attribute.get(KEY_ATTR_NAME).lower() == "vlan tagged": + port_tagged_map[port_number] = [] + if attribute['Value']: + port_tagged_map[port_number] = \ + list(map(int, (attribute['Value']).replace(" ", "").split(","))) + if attribute.get(KEY_ATTR_NAME).lower() == "nic bonding enabled": + port_nic_bond_map[port_number] = attribute['Value'] + if not nic_found: + module.fail_json(msg="NIC with name '{0}' not found for template with id {1}".format(nic_id, template_id)) + return port_id_map, port_untagged_map, port_tagged_map, port_nic_bond_map, nic_bonding_tech + + +def compare_nested_dict(modify_setting_payload, existing_setting_payload): + """compare existing and requested setting values of identity pool in case of modify operations + if both are same return True""" + for key, val in modify_setting_payload.items(): + if existing_setting_payload.get(key) is None: + return False + elif isinstance(val, dict): + if not compare_nested_dict(val, existing_setting_payload.get(key)): + return False + elif val != existing_setting_payload.get(key): + return False + return True + + +def get_vlan_payload(module, rest_obj, untag_dict, tagged_dict): + payload = {} + template = get_template_details(module, rest_obj) + payload["TemplateId"] = template["Id"] + payload["IdentityPoolId"] = template["IdentityPoolId"] + # VlanAttributes + port_id_map, port_untagged_map, port_tagged_map, port_nic_bond_map, nic_bonding_tech =\ + get_template_vlan_info(module, rest_obj, template['Id']) + payload["BondingTechnology"] = nic_bonding_tech + payload["PropagateVlan"] = module.params.get('propagate_vlan') + untag_equal_dict = compare_nested_dict(untag_dict, port_untagged_map) + tag_equal_dict = compare_nested_dict(tagged_dict, port_tagged_map) + if untag_equal_dict and tag_equal_dict: + module.exit_json(msg=NO_CHANGES_MSG) + vlan_attributes = [] + for pk, pv in port_id_map.items(): + mdict = {} + if pk in untag_dict or pk in tagged_dict: + mdict["Untagged"] = untag_dict.pop(pk, port_untagged_map.get(pk)) + mdict["Tagged"] = tagged_dict.pop(pk, port_tagged_map.get(pk)) + mdict["ComponentId"] = port_id_map.get(pk) + mdict["IsNicBonded"] = port_nic_bond_map.get(pk) + if mdict: + vlan_attributes.append(mdict) + if untag_dict: + module.fail_json(msg="Invalid port(s) {0} found for untagged VLAN".format(untag_dict.keys())) + if tagged_dict: + module.fail_json(msg="Invalid port(s) {0} found for tagged VLAN".format(tagged_dict.keys())) + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND) + payload["VlanAttributes"] = vlan_attributes + return payload + + +def get_key(val, my_dict): + for key, value in my_dict.items(): + if val == value: + return key + return None + + +def validate_vlans(module, rest_obj): + vlan_name_id_map = get_vlan_name_id_map(rest_obj) + vlan_name_id_map["0"] = 0 + tagged_list = module.params.get("tagged_networks") + untag_list = module.params.get("untagged_networks") + untag_dict = {} + if untag_list: + for utg in untag_list: + p = utg["port"] + if utg.get("untagged_network_id") is not None: + if p in untag_dict: + module.fail_json(msg="port {0} is repeated for " + "untagged_network_id".format(p)) + vlan = utg.get("untagged_network_id") + if vlan not in vlan_name_id_map.values(): + module.fail_json(msg="untagged_network_id: {0} is not a " + "valid vlan id for port {1}". + format(vlan, p)) + untag_dict[p] = vlan + if utg.get("untagged_network_name"): + vlan = utg.get("untagged_network_name") + if vlan in vlan_name_id_map: + if p in untag_dict: + module.fail_json(msg="port {0} is repeated for " + "untagged_network_name".format(p)) + untag_dict[p] = vlan_name_id_map.get(vlan) + else: + module.fail_json(msg="{0} is not a valid vlan name for port {1}".format(vlan, p)) + vlan_name_id_map.pop("0") + tagged_dict = {} + if tagged_list: + for tg in tagged_list: + p = tg["port"] + tg_list = [] + empty_list = False + tgnids = tg.get("tagged_network_ids") + if isinstance(tgnids, list): + if len(tgnids) == 0: + empty_list = True + for vl in tgnids: + if vl not in vlan_name_id_map.values(): + module.fail_json(msg="{0} is not a valid vlan id " + "port {1}".format(vl, p)) + tg_list.append(vl) + tgnames = tg.get("tagged_network_names") + if isinstance(tgnames, list): + if len(tgnames) == 0: + empty_list = True + for vln in tgnames: + if vln not in vlan_name_id_map: + module.fail_json(msg="{0} is not a valid vlan name " + "port {1}".format(vln, p)) + tg_list.append(vlan_name_id_map.get(vln)) + if not tg_list and not empty_list: + module.fail_json(msg="No tagged_networks provided or valid tagged_networks not found for port {0}" + .format(p)) + tagged_dict[p] = list(set(tg_list)) # Will not report duplicates + for k, v in untag_dict.items(): + if v in tagged_dict.get(k, []): + module.fail_json(msg="vlan {0}('{1}') cannot be in both tagged and untagged list for port {2}". + format(v, get_key(v, vlan_name_id_map), k)) + return untag_dict, tagged_dict + + +def main(): + port_untagged_spec = {"port": {"required": True, "type": "int"}, + "untagged_network_id": {"type": "int"}, + "untagged_network_name": {"type": "str"}} + port_tagged_spec = {"port": {"required": True, "type": "int"}, + "tagged_network_ids": {"type": "list", "elements": "int"}, + "tagged_network_names": {"type": "list", "elements": "str"}} + specs = { + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "nic_identifier": {"required": True, "type": "str"}, + "untagged_networks": {"required": False, "type": "list", "elements": "dict", "options": port_untagged_spec, + "mutually_exclusive": [("untagged_network_id", "untagged_network_name")]}, + "tagged_networks": {"required": False, "type": "list", "elements": "dict", "options": port_tagged_spec}, + "propagate_vlan": {"type": "bool", "default": True} + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + required_one_of=[("template_id", "template_name"), + ("untagged_networks", "tagged_networks")], + mutually_exclusive=[("template_id", "template_name")], + supports_check_mode=True + ) + try: + with RestOME(module.params, req_session=True) as rest_obj: + untag_dict, tagged_dict = validate_vlans(module, rest_obj) + payload = get_vlan_payload(module, rest_obj, untag_dict, tagged_dict) + resp = rest_obj.invoke_request("POST", UPDATE_NETWORK_CONFIG, data=payload) + if resp.success: + module.exit_json(msg=SUCCESS_MSG, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py new file mode 100644 index 00000000..c768b4ca --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_user +short_description: Create, modify or delete a user on OpenManage Enterprise +version_added: "2.0.0" +description: This module creates, modifies or deletes a user on OpenManage Enterprise. +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + state: + type: str + description: + - C(present) creates a user in case the I(UserName) provided inside I(attributes) does not exist. + - C(present) modifies a user in case the I(UserName) provided inside I(attributes) exists. + - C(absent) deletes an existing user. + choices: [present, absent] + default: present + user_id: + description: + - Unique ID of the user to be deleted. + - Either I(user_id) or I(name) is mandatory for C(absent) operation. + type: int + name: + type: str + description: + - Unique Name of the user to be deleted. + - Either I(user_id) or I(name) is mandatory for C(absent) operation. + attributes: + type: dict + default: {} + description: + - >- + Payload data for the user operations. It can take the following attributes for C(present). + - >- + UserTypeId, DirectoryServiceId, Description, Name, Password, UserName, RoleId, Locked, Enabled. + - >- + OME will throw error if required parameter is not provided for operation. + - >- + Refer OpenManage Enterprise API Reference Guide for more details. +requirements: + - "python >= 3.8.6" +author: "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module does not support C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create user with required parameters + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + attributes: + UserName: "user1" + Password: "UserPassword" + RoleId: "10" + Enabled: True + +- name: Create user with all parameters + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + attributes: + UserName: "user2" + Description: "user2 description" + Password: "UserPassword" + RoleId: "10" + Enabled: True + DirectoryServiceId: 0 + UserTypeId: 1 + Locked: False + Name: "user2" + +- name: Modify existing user + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + attributes: + UserName: "user3" + RoleId: "10" + Enabled: True + Description: "Modify user Description" + +- name: Delete existing user using id + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + user_id: 1234 + +- name: Delete existing user using name + dellemc.openmanage.ome_user: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + name: "name" +''' + +RETURN = r''' +--- +msg: + description: Overall status of the user operation. + returned: always + type: str + sample: "Successfully created a User" +user_status: + description: Details of the user operation, when I(state) is C(present). + returned: When I(state) is C(present). + type: dict + sample: + { + "Description": "Test user creation", + "DirectoryServiceId": 0, + "Enabled": true, + "Id": "61546", + "IsBuiltin": false, + "Locked": false, + "Name": "test", + "Password": null, + "PlainTextPassword": null, + "RoleId": "10", + "UserName": "test", + "UserTypeId": 1 + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def _validate_inputs(module): + """both user_id and name are not acceptable in case of state is absent""" + state = module.params['state'] + user_id = module.params.get('user_id') + name = module.params.get('name') + if state != 'present' and (user_id is None and name is None): + fail_module(module, msg="One of the following 'user_id' or 'name' " + "option is required for state 'absent'") + + +def get_user_id_from_name(rest_obj, name): + """Get the account id using account name""" + user_id = None + if name is not None: + resp = rest_obj.invoke_request('GET', 'AccountService/Accounts') + if resp.success: + for user in resp.json_data.get('value'): + if 'UserName' in user and user['UserName'] == name: + return user['Id'] + return user_id + + +def _get_resource_parameters(module, rest_obj): + state = module.params["state"] + payload = module.params.get("attributes") + if state == "present": + name = payload.get('UserName') + user_id = get_user_id_from_name(rest_obj, name) + if user_id is not None: + payload.update({"Id": user_id}) + path = "AccountService/Accounts('{user_id}')".format(user_id=user_id) + method = 'PUT' + else: + path = "AccountService/Accounts" + method = 'POST' + else: + user_id = module.params.get("user_id") + if user_id is None: + name = module.params.get('name') + user_id = get_user_id_from_name(rest_obj, name) + if user_id is None: + fail_module(module, msg="Unable to get the account because the specified account " + "does not exist in the system.") + path = "AccountService/Accounts('{user_id}')".format(user_id=user_id) + method = 'DELETE' + return method, path, payload + + +def password_no_log(attributes): + if isinstance(attributes, dict) and 'Password' in attributes: + attributes['Password'] = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + + +def fail_module(module, **failmsg): + password_no_log(module.params.get("attributes")) + module.fail_json(**failmsg) + + +def exit_module(module, response, http_method): + password_no_log(module.params.get("attributes")) + msg_dict = {'POST': "Successfully created a User", + 'PUT': "Successfully modified a User", + 'DELETE': "Successfully deleted the User"} + state_msg = msg_dict[http_method] + if response.status_code != 204: + module.exit_json(msg=state_msg, changed=True, user_status=response.json_data) + else: + # For delete operation no response content is returned + module.exit_json(msg=state_msg, changed=True) + + +def main(): + specs = { + "state": {"required": False, "type": 'str', "default": "present", + "choices": ['present', 'absent']}, + "user_id": {"required": False, "type": 'int'}, + "name": {"required": False, "type": 'str'}, + "attributes": {"required": False, "type": 'dict'}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[['user_id', 'name'], ], + required_if=[['state', 'present', ['attributes']], ], + supports_check_mode=False) + + try: + _validate_inputs(module) + if module.params.get("attributes") is None: + module.params["attributes"] = {} + with RestOME(module.params, req_session=True) as rest_obj: + method, path, payload = _get_resource_parameters(module, rest_obj) + resp = rest_obj.invoke_request(method, path, data=payload) + if resp.success: + exit_module(module, resp, method) + except HTTPError as err: + fail_module(module, msg=str(err), user_status=json.load(err)) + except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err: + fail_module(module, msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py new file mode 100644 index 00000000..b42f180f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ome_user_info +short_description: Retrieves details of all accounts or a specific account on OpenManage Enterprise +version_added: "2.0.0" +description: + - "This module retrieves the list and basic details of all accounts or details of a specific account on + OpenManage Enterprise." +extends_documentation_fragment: + - dellemc.openmanage.ome_auth_options +options: + account_id: + description: Unique Id of the account. + type: int + system_query_options: + description: Options for filtering the output. + type: dict + suboptions: + filter: + description: Filter records for the supported values. + type: str +requirements: + - "python >= 3.8.6" +author: "Jagadeesh N V(@jagadeeshnv)" +notes: + - Run this module from a system that has direct access to DellEMC OpenManage Enterprise. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Retrieve basic details of all accounts + dellemc.openmanage.ome_user_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + +- name: Retrieve details of a specific account identified by its account ID + dellemc.openmanage.ome_user_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + account_id: 1 + +- name: Get filtered user info based on user name + dellemc.openmanage.ome_user_info: + hostname: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + system_query_options: + filter: "UserName eq 'test'" +''' + +RETURN = r''' +--- +msg: + type: str + description: Over all status of fetching user facts. + returned: on error + sample: "Unable to retrieve the account details." +user_info: + type: dict + description: Details of the user. + returned: success + sample: { + "192.168.0.1": { + "Id": "1814", + "UserTypeId": 1, + "DirectoryServiceId": 0, + "Description": "user name description", + "Name": "user_name", + "Password": null, + "UserName": "user_name", + "RoleId": "10", + "Locked": false, + "IsBuiltin": true, + "Enabled": true + } + } +''' + +import json +from ssl import SSLError +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +def _get_query_parameters(module_params): + """Builds query parameter. + + :return: dict + :example: {"$filter": UserName eq 'user name'} + """ + system_query_param = module_params.get("system_query_options") + query_param = {} + if system_query_param: + query_param = dict([("$" + k, v) for k, v in system_query_param.items() if v is not None]) + return query_param + + +def main(): + specs = { + "account_id": {"type": 'int', "required": False}, + "system_query_options": {"required": False, "type": 'dict', "options": { + "filter": {"type": 'str', "required": False}, + }}, + } + specs.update(ome_auth_params) + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[ + ('account_id', 'system_query_options') + ], + supports_check_mode=True + ) + account_uri = "AccountService/Accounts" + query_param = None + try: + with RestOME(module.params, req_session=True) as rest_obj: + if module.params.get("account_id") is not None: + # Fetch specific account + account_id = module.params.get("account_id") + account_path = "{0}('{1}')".format(account_uri, account_id) + elif module.params.get("system_query_options") is not None: + # Fetch all the user based on UserName + query_param = _get_query_parameters(module.params) + account_path = account_uri + else: + # Fetch all users + account_path = account_uri + resp = rest_obj.invoke_request('GET', account_path, query_param=query_param) + user_facts = resp.json_data + user_exists = True + if "value" in user_facts and len(user_facts["value"]) == 0: + user_exists = False + # check for 200 status as GET only returns this for success + if resp.status_code == 200 and user_exists: + module.exit_json(user_info={module.params["hostname"]: user_facts}) + else: + module.fail_json(msg="Unable to retrieve the account details.") + except HTTPError as err: + module.fail_json(msg=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py new file mode 100644 index 00000000..c0a0fc47 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ +# see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt +# + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: redfish_event_subscription +short_description: Manage Redfish Subscriptions +version_added: "4.1.0" +description: + This module allows to add or delete Redfish Event subscriptions. +extends_documentation_fragment: + - dellemc.openmanage.redfish_auth_options +options: + destination: + description: + - The HTTPS URI of the destination to send events. + - HTTPS is required. + type: str + required: True + event_type: + description: + - Specifies the event type to be subscribed. + - C(Alert) used to subscribe for alert. + - C(MetricReport) used to subscribe for the metrics report. + type: str + default: Alert + choices: [Alert, MetricReport] + event_format_type: + description: + - Specifies the format type of the event to be subscribed. + - C(Event) used to subscribe for Event format type. + - C(MetricReport) used to subscribe for the metrics report format type. + type: str + default: Event + choices: [Event, MetricReport] + state: + description: + - C(present) adds new event subscription. + - C(absent) deletes event subscription with the specified I(destination). + type: str + default: present + choices: ["present", "absent"] +requirements: + - "python >= 3.8.6" +author: + - "Trevor Squillario (@TrevorSquillario)" + - "Sachin Apagundi (@sachin-apa)" +notes: + - I(event_type) needs to be C(MetricReport) and I(event_format_type) needs to be C(MetricReport) for metrics + subscription. + - I(event_type) needs to be C(Alert) and I(event_format_type) needs to be C(Event) for event subscription. + - Modifying a subscription is not supported. + - Context is always set to RedfishEvent. + - This module supports C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Add Redfish metric subscription + redfish_event_subscription: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination: "https://192.168.1.100:8188" + event_type: MetricReport + event_format_type: MetricReport + state: present + +- name: Add Redfish alert subscription + redfish_event_subscription: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination: "https://server01.example.com:8188" + event_type: Alert + event_format_type: Event + state: present + +- name: Delete Redfish subscription with a specified destination + redfish_event_subscription: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + destination: "https://server01.example.com:8188" + state: absent +""" + +RETURN = """ +--- +msg: + description: Overall status of the task. + returned: always + type: str + sample: Successfully added the subscription. +status: + description: Returns subscription object created + returned: on adding subscription successfully + type: dict + sample: { + "@Message.ExtendedInfo": [ + { + "Message": "The resource has been created successfully", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.7.Created", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + }, + { + "Message": "A new resource is successfully created.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "IDRAC.2.2.SYS414", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "No response action is required.", + "Severity": "Informational" + } + ], + "Actions": { + "#EventDestination.ResumeSubscription": { + "target": "/redfish/v1/EventService/Subscriptions/5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a/Actions/EventDestination.ResumeSubscription" + } + }, + "Context": "RedfishEvent", + "DeliveryRetryPolicy": "RetryForever", + "Description": "Event Subscription Details", + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "Event", + "EventTypes": [ + "Alert" + ], + "EventTypes@odata.count": 1, + "HttpHeaders": [], + "HttpHeaders@odata.count": 0, + "Id": "5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a", + "MetricReportDefinitions": [], + "MetricReportDefinitions@odata.count": 0, + "Name": "EventSubscription 5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a", + "OriginResources": [], + "OriginResources@odata.count": 0, + "Protocol": "Redfish", + "Status": { + "Health": "OK", + "HealthRollup": "OK", + "State": "Enabled" + }, + "SubscriptionType": "RedfishEvent" + } +error_info: + type: dict + description: Details of http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to complete the operation because the JSON data format entered is invalid.", + "Resolution": "Do the following and the retry the operation: + 1) Enter the correct JSON data format and retry the operation. + 2) Make sure that no syntax error is present in JSON data format. + 3) Make sure that a duplicate key is not present in JSON data format.", + "Severity": "Critical" + }, + { + "Message": "The request body submitted was malformed JSON and + could not be parsed by the receiving service.", + "Resolution": "Ensure that the request body is valid JSON and resubmit the request.", + "Severity": "Critical" + } + ], + "code": "Base.1.2.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +""" + +import json +import os +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +DESTINATION_INVALID = "The Parameter destination must have an HTTPS destination. The HTTP destination is not allowed" +SUBSCRIPTION_EXISTS = "No changes found to be applied." +SUBSCRIPTION_DELETED = "Successfully deleted the subscription." +SUBSCRIPTION_UNABLE_DEL = "Unable to delete the subscription." +SUBSCRIPTION_UNABLE_ADD = "Unable to add a subscription." +SUBSCRIPTION_ADDED = "Successfully added the subscription." +DESTINATION_MISMATCH = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." + + +def get_subscription_payload(): + payload = { + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": ["MetricReport"], + "SubscriptionType": "RedfishEvent" + } + return payload + + +def get_subscription(obj, destination): + url = "{0}{1}".format(obj.root_uri, "EventService/Subscriptions") + list_resp = obj.invoke_request("GET", url) + list_subscriptions = list_resp.json_data["Members"] + for list_subscription in list_subscriptions: + id = os.path.basename(list_subscription.get('@odata.id')) + detail_json = get_subscription_details(obj, id) + subscription = get_subscription_payload() + if detail_json and detail_json["Destination"] == destination: + subscription["Id"] = detail_json["Id"] + subscription["Destination"] = detail_json["Destination"] + subscription["EventFormatType"] = detail_json["EventFormatType"] + subscription["Context"] = detail_json["Context"] + subscription["Protocol"] = detail_json["Protocol"] + subscription["EventTypes"] = detail_json["EventTypes"] + subscription["SubscriptionType"] = detail_json["SubscriptionType"] + return subscription + return None + + +def get_subscription_details(obj, id): + detail_url = "{0}{1}".format(obj.root_uri, "EventService/Subscriptions/%s" % id) + detail_resp = obj.invoke_request("GET", detail_url) + detail_json = detail_resp.json_data + if detail_resp.success: + return detail_json + else: + return None + + +def create_subscription(obj, module): + payload = get_subscription_payload() + payload["Destination"] = module.params["destination"] + payload["EventFormatType"] = module.params["event_format_type"] + payload["EventTypes"] = [module.params["event_type"]] + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND) + resp = obj.invoke_request("POST", "{0}{1}".format(obj.root_uri, "EventService/Subscriptions"), data=payload) + return resp + + +def delete_subscription(obj, id): + resp = obj.invoke_request("DELETE", "{0}{1}".format(obj.root_uri, "EventService/Subscriptions/%s" % id)) + return resp + + +def _validate_inputs(module): + """validates that destination has https instead of http""" + inp_destination = module.params['destination'] + if not inp_destination.startswith("https"): + module.fail_json(msg=DESTINATION_INVALID) + + +def _get_formatted_payload(obj, existing_payload): + """get the payload after removing unwanted tags""" + existing_payload = obj.strip_substr_dict(existing_payload) + return existing_payload + + +def main(): + specs = { + "destination": {"required": True, "type": "str"}, + "event_type": {"type": "str", "default": "Alert", "choices": ['Alert', 'MetricReport']}, + "event_format_type": {"type": "str", "default": "Event", + "choices": ['Event', 'MetricReport']}, + "state": {"type": "str", "default": "present", "choices": ['present', 'absent']}, + } + specs.update(redfish_auth_params) + + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + + try: + _validate_inputs(module) + with Redfish(module.params, req_session=True) as obj: + subscription = get_subscription(obj, module.params["destination"]) + if subscription: + if module.params["state"] == "present": + module.exit_json(msg=SUBSCRIPTION_EXISTS, changed=False) + else: + if module.check_mode: + module.exit_json(changed=True, msg=CHANGES_FOUND) + delete_resp = delete_subscription(obj, subscription["Id"]) + if delete_resp.success: + module.exit_json(msg=SUBSCRIPTION_DELETED, changed=True) + else: + module.fail_json(msg=SUBSCRIPTION_UNABLE_DEL) + else: + if module.params["state"] == "present": + create_resp = create_subscription(obj, module) + if create_resp.success: + module.exit_json(msg=SUBSCRIPTION_ADDED, changed=True, + status=_get_formatted_payload(obj, create_resp.json_data)) + else: + module.fail_json(msg=SUBSCRIPTION_UNABLE_ADD) + else: + module.exit_json(msg=DESTINATION_MISMATCH, changed=False) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py new file mode 100644 index 00000000..a03ba040 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.5.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: redfish_firmware +short_description: To perform a component firmware update using the image file available on the local or remote system +version_added: "2.1.0" +description: + - This module allows the firmware update of only one component at a time. + If the module is run for more than one component, an error message is returned. + - Depending on the component, the firmware update is applied after an automatic or manual reboot. +extends_documentation_fragment: + - dellemc.openmanage.redfish_auth_options +options: + image_uri: + description: + - Firmware Image location URI or local path. + - For example- U(http:///components.exe) or /home/firmware_repo/component.exe. + type: str + required: True + transfer_protocol: + description: Protocol used to transfer the firmware image file. Applicable for URI based update. + type: str + default: HTTP + choices: ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"] +requirements: + - "python >= 3.8.6" + - "urllib3" +author: + - "Felix Stephen (@felixs88)" +notes: + - Run this module from a system that has direct access to Redfish APIs. + - This module does not support C(check_mode). +""" + +EXAMPLES = """ +--- +- name: Update the firmware from a single executable file available in a HTTP protocol + dellemc.openmanage.redfish_firmware: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + image_uri: "http://192.168.0.2/firmware_repo/component.exe" + transfer_protocol: "HTTP" + +- name: Update the firmware from a single executable file available in a local path + dellemc.openmanage.redfish_firmware: + baseuri: "192.168.0.1" + username: "user_name" + password: "user_password" + ca_path: "/path/to/ca_cert.pem" + image_uri: "/home/firmware_repo/component.exe" +""" + +RETURN = """ +--- +msg: + description: Overall status of the firmware update task. + returned: always + type: str + sample: Successfully submitted the firmware update task. +task: + description: Returns ID and URI of the created task. + returned: success + type: dict + sample: { + "id": "JID_XXXXXXXXXXXX", + "uri": "/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXX" + } +error_info: + type: dict + description: Details of http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to complete the operation because the JSON data format entered is invalid.", + "Resolution": "Do the following and the retry the operation: + 1) Enter the correct JSON data format and retry the operation. + 2) Make sure that no syntax error is present in JSON data format. + 3) Make sure that a duplicate key is not present in JSON data format.", + "Severity": "Critical" + }, + { + "Message": "The request body submitted was malformed JSON and + could not be parsed by the receiving service.", + "Resolution": "Ensure that the request body is valid JSON and resubmit the request.", + "Severity": "Critical" + } + ], + "code": "Base.1.2.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information." + } + } +""" + + +import json +import os +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +try: + from urllib3.fields import RequestField + from urllib3.filepost import encode_multipart_formdata + HAS_LIB = True +except ImportError: + HAS_LIB = False + +UPDATE_SERVICE = "UpdateService" +JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}" + + +def _encode_form_data(payload_file): + """Encode multipart/form-data for file upload.""" + fields = [] + f_name, f_data, f_type = payload_file.get("file") + f_binary = f_data.read() + req_field = RequestField(name="file", data=f_binary, filename=f_name) + req_field.make_multipart(content_type=f_type) + fields.append(req_field) + data, content_type = encode_multipart_formdata(fields) + return data, content_type + + +def _get_update_service_target(obj, module): + """Returns all the URI which is required for firmware update dynamically.""" + action_resp = obj.invoke_request("GET", "{0}{1}".format(obj.root_uri, UPDATE_SERVICE)) + action_attr = action_resp.json_data["Actions"] + protocol = module.params["transfer_protocol"] + update_uri = None + push_uri = action_resp.json_data.get('HttpPushUri') + inventory_uri = action_resp.json_data.get('FirmwareInventory').get('@odata.id') + if "#UpdateService.SimpleUpdate" in action_attr: + update_service = action_attr.get("#UpdateService.SimpleUpdate") + proto = update_service.get("TransferProtocol@Redfish.AllowableValues") + if isinstance(proto, list) and protocol in proto and 'target' in update_service: + update_uri = update_service.get('target') + else: + module.fail_json(msg="Target firmware version does not support {0} protocol.".format(protocol)) + if update_uri is None or push_uri is None or inventory_uri is None: + module.fail_json(msg="Target firmware version does not support redfish firmware update.") + return str(inventory_uri), str(push_uri), str(update_uri) + + +def firmware_update(obj, module): + """Firmware update using single binary file from Local path or HTTP location.""" + image_path = module.params.get("image_uri") + trans_proto = module.params["transfer_protocol"] + inventory_uri, push_uri, update_uri = _get_update_service_target(obj, module) + if image_path.startswith("http"): + payload = {"ImageURI": image_path, "TransferProtocol": trans_proto} + update_status = obj.invoke_request("POST", update_uri, data=payload) + else: + resp_inv = obj.invoke_request("GET", inventory_uri) + with open(os.path.join(image_path), "rb") as img_file: + binary_payload = {"file": (image_path.split(os.sep)[-1], img_file, "multipart/form-data")} + data, ctype = _encode_form_data(binary_payload) + headers = {"If-Match": resp_inv.headers.get("etag")} + headers.update({"Content-Type": ctype}) + upload_status = obj.invoke_request("POST", push_uri, data=data, headers=headers, dump=False, + api_timeout=100) + if upload_status.status_code == 201: + payload = {"ImageURI": upload_status.headers.get("location")} + update_status = obj.invoke_request("POST", update_uri, data=payload) + else: + update_status = upload_status + return update_status + + +def main(): + specs = { + "image_uri": {"required": True, "type": "str"}, + "transfer_protocol": {"type": "str", "default": "HTTP", + "choices": ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]}, + } + specs.update(redfish_auth_params) + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=False) + if not HAS_LIB: + module.fail_json(msg=missing_required_lib("urllib3")) + try: + message = "Failed to submit the firmware update task." + with Redfish(module.params, req_session=True) as obj: + status = firmware_update(obj, module) + if status.success: + message = "Successfully submitted the firmware update task." + task_uri = status.headers.get("Location") + job_id = task_uri.split("/")[-1] + module.exit_json(msg=message, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True) + module.fail_json(msg=message, error_info=json.loads(status)) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py new file mode 100644 index 00000000..23094b15 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.1 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: redfish_powerstate +short_description: Manage device power state +version_added: "2.1.0" +description: + - This module allows to manage the different power states of the specified device. +extends_documentation_fragment: + - dellemc.openmanage.redfish_auth_options +options: + resource_id: + description: + - The unique identifier of the device being managed. + For example- U(https:///redfish/v1/Systems/). + - This option is mandatory for I(base_uri) with multiple devices. + - To get the device details, use the API U(https:///redfish/v1/Systems). + required: False + type: str + reset_type: + description: + - This option resets the device. + - If C(ForceOff), Turns off the device immediately. + - If C(ForceOn), Turns on the device immediately. + - If C(ForceRestart), Turns off the device immediately, and then restarts the device. + - If C(GracefulRestart), Performs graceful shutdown of the device, and then restarts the device. + - If C(GracefulShutdown), Performs a graceful shutdown of the device, and the turns off the device. + - If C(Nmi), Sends a diagnostic interrupt to the device. This is usually a non-maskable interrupt + (NMI) on x86 device. + - If C(On), Turns on the device. + - If C(PowerCycle), Performs power cycle on the device. + - If C(PushPowerButton), Simulates the pressing of a physical power button on the device. + - When a power control operation is performed, which is not supported on the device, an error message is displayed + with the list of operations that can be performed. + required: True + type: str + choices: ["ForceOff", "ForceOn", "ForceRestart", "GracefulRestart", "GracefulShutdown", + "Nmi", "On", "PowerCycle", "PushPowerButton"] +requirements: + - "python >= 3.8.6" +author: + - "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to Redfish APIs. + - This module supports C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Manage power state of the first device + dellemc.openmanage.redfish_powerstate: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + reset_type: "On" + +- name: Manage power state of a specified device + dellemc.openmanage.redfish_powerstate: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + reset_type: "ForceOff" + resource_id: "System.Embedded.1" +''' + +RETURN = r''' +--- +msg: + description: Overall status of the reset operation. + returned: always + type: str + sample: "Successfully performed the reset type operation 'On'." +error_info: + type: dict + description: Details of the HTTP error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to complete the operation because the resource + /redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset entered in not found.", + "MessageArgs": [ + "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset" + ], + "MessageArgs@odata.count": 1, + "MessageId": "IDRAC.2.1.SYS403", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "Enter the correct resource and retry the operation. + For information about valid resource, + see the Redfish Users Guide available on the support site.", + "Severity": "Critical" + }, + ], + "code": "Base.1.5.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information" + } +} +''' + +import json +import re +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +powerstate_map = {} + + +def fetch_power_uri_resource(module, session_obj): + try: + resource_id = module.params.get("resource_id") + static_resource_id_resource = None + if resource_id: + static_resource_id_resource = "{0}{1}{2}".format(session_obj.root_uri, "Systems/", resource_id) + error_message1 = "The target device does not support the system reset feature" \ + " using Redfish API." + system_uri = "{0}{1}".format(session_obj.root_uri, "Systems") + system_resp = session_obj.invoke_request("GET", system_uri) + system_members = system_resp.json_data.get("Members") + if len(system_members) > 1 and static_resource_id_resource is None: + module.fail_json(msg="Multiple devices exists in the system, but option 'resource_id' is not specified.") + if system_members: + resource_id_list = [system_id["@odata.id"] for system_id in system_members if "@odata.id" in system_id] + system_id_res = static_resource_id_resource or resource_id_list[0] + if system_id_res in resource_id_list: + system_id_res_resp = session_obj.invoke_request("GET", system_id_res) + system_id_res_data = system_id_res_resp.json_data + action_id_res = system_id_res_data.get("Actions") + if action_id_res: + current_state = system_id_res_data["PowerState"] + power_uri = action_id_res['#ComputerSystem.Reset']['target'] + allowable_enums = action_id_res['#ComputerSystem.Reset']['ResetType@Redfish.AllowableValues'] + powerstate_map.update( + {'power_uri': power_uri, 'allowable_enums': allowable_enums, 'current_state': current_state}) + else: + module.fail_json(msg=error_message1) + else: + error_message2 = "Invalid device Id '{0}' is provided".format(resource_id) + module.fail_json(msg=error_message2) + else: + module.fail_json(msg=error_message1) + except HTTPError as err: + if err.code in [404, 405]: + module.fail_json(msg=error_message1, + error_info=json.load(err)) + raise err + + +def is_change_applicable_for_power_state(current_power_state, apply_power_state): + """ checks if changes are applicable or not for current system state + :param current_power_state: Current power state + :type current_power_state: str + :param apply_power_state: Required power state + :type apply_power_state: str + :return: boolean True if changes is applicable + """ + on_states = ["On", "PoweringOn"] + off_states = ["Off", "PoweringOff"] + + reset_map_apply = { + ("On", "ForceOn",): off_states, + ("PushPowerButton",): on_states + off_states, + ("ForceOff", "ForceRestart", "GracefulRestart", "GracefulShutdown", "Nmi", "PowerCycle",): on_states + } + is_reset_applicable = False + for apply_states, applicable_states in reset_map_apply.items(): + if apply_power_state in apply_states: + if current_power_state in applicable_states: + is_reset_applicable = True + break + break + return is_reset_applicable + + +def is_valid_reset_type(reset_type, allowable_enum, module): + if reset_type not in allowable_enum: + res_list = re.findall('[A-Z][^A-Z]*', reset_type) + lw_reset_type = " ".join([word.lower() for word in res_list]) + error_msg = "The target device does not support a" \ + " {0} operation.The acceptable values for device reset types" \ + " are {1}.".format(lw_reset_type, ", ".join(allowable_enum)) + module.fail_json(msg=error_msg) + + +def run_change_power_state(redfish_session_obj, module): + """ + Apply reset type to system + Keyword arguments: + redfish_session_obj -- session handle + module -- Ansible module obj + """ + apply_reset_type = module.params["reset_type"] + fetch_power_uri_resource(module, redfish_session_obj) + is_valid_reset_type(apply_reset_type, powerstate_map["allowable_enums"], module) + current_power_state = powerstate_map["current_state"] + reset_flag = is_change_applicable_for_power_state(current_power_state, apply_reset_type) + if module.check_mode is True: + if reset_flag is True: + module.exit_json(msg="Changes found to be applied.", changed=True) + else: + module.exit_json(msg="No Changes found to be applied.", changed=False) + + if reset_flag is True: + payload = {"ResetType": apply_reset_type} + power_uri = powerstate_map["power_uri"] + reset_resp = redfish_session_obj.invoke_request("POST", power_uri, data=payload) + if reset_resp.success: + module.exit_json(msg="Successfully performed the reset type operation" + " '{0}'.".format(apply_reset_type), changed=True) + else: + module.exit_json(msg="Unable to perform the reset type operation '{0}'.".format(apply_reset_type), + changed=False) + else: + module.exit_json(msg="The device is already powered {0}.".format(current_power_state.lower()), changed=False) + + +def main(): + specs = { + "resource_id": {"required": False, "type": "str"}, + "reset_type": {"required": True, "type": "str", + "choices": ['ForceOff', 'ForceOn', 'ForceRestart', 'GracefulRestart', + 'GracefulShutdown', 'Nmi', 'On', 'PowerCycle', 'PushPowerButton']}, + } + specs.update(redfish_auth_params) + + module = AnsibleModule( + argument_spec=specs, + supports_check_mode=True) + try: + with Redfish(module.params) as redfish_obj: + run_change_power_state(redfish_obj, module) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except URLError as err: + module.exit_json(msg=str(err), unreachable=True) + except (IOError, ValueError, SSLError, TypeError, ConnectionError, OSError) as err: + module.fail_json(msg=str(err)) + except Exception as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py new file mode 100644 index 00000000..ce02b4c0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py @@ -0,0 +1,633 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: redfish_storage_volume +short_description: Manages the storage volume configuration +version_added: "2.1.0" +description: + - This module allows to create, modify, initialize, or delete a single storage volume. +extends_documentation_fragment: + - dellemc.openmanage.redfish_auth_options +options: + controller_id: + description: + - Fully Qualified Device Descriptor (FQDD) of the storage controller. + - For example- RAID.Slot.1-1. + - This option is mandatory when I(state) is C(present) while creating a volume. + type: str + volume_id: + description: + - FQDD of existing volume. + - For example- Disk.Virtual.4:RAID.Slot.1-1. + - This option is mandatory in the following scenarios, + - >- + I(state) is C(present), when updating a volume. + - >- + I(state) is C(absent), when deleting a volume. + - >- + I(command) is C(initialize), when initializing a volume. + type: str + state: + description: + - >- + C(present) creates a storage volume for the specified I (controller_id), or modifies the storage volume for the + specified I (volume_id). + "Note: Modification of an existing volume properties depends on drive and controller capabilities". + - C(absent) deletes the volume for the specified I(volume_id). + type: str + choices: [present, absent] + command: + description: + - C(initialize) initializes an existing storage volume for a specified I(volume_id). + type: str + choices: [initialize] + volume_type: + description: + - One of the following volume types must be selected to create a volume. + - >- + C(Mirrored) The volume is a mirrored device. + - >- + C(NonRedundant) The volume is a non-redundant storage device. + - >- + C(SpannedMirrors) The volume is a spanned set of mirrored devices. + - >- + C(SpannedStripesWithParity) The volume is a spanned set of devices which uses parity to retain redundant + information. + - >- + C(StripedWithParity) The volume is a device which uses parity to retain redundant information. + type: str + choices: [NonRedundant, Mirrored, StripedWithParity, SpannedMirrors, SpannedStripesWithParity] + name: + description: + - Name of the volume to be created. + - Only applicable when I(state) is C(present). + type: str + drives: + description: + - FQDD of the Physical disks. + - For example- Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1. + - Only applicable when I(state) is C(present) when creating a new volume. + type: list + elements: str + block_size_bytes: + description: + - Block size in bytes.Only applicable when I(state) is C(present). + type: int + capacity_bytes: + description: + - Volume size in bytes. + - Only applicable when I(state) is C(present). + type: str + optimum_io_size_bytes: + description: + - Stripe size value must be in multiples of 64 * 1024. + - Only applicable when I(state) is C(present). + type: int + encryption_types: + description: + - The following encryption types can be selected. + - C(ControllerAssisted) The volume is encrypted by the storage controller entity. + - C(NativeDriveEncryption) The volume utilizes the native drive encryption capabilities + of the drive hardware. + - C(SoftwareAssisted) The volume is encrypted by the software running + on the system or the operating system. + - Only applicable when I(state) is C(present). + type: str + choices: [NativeDriveEncryption, ControllerAssisted, SoftwareAssisted] + encrypted: + description: + - Indicates whether volume is currently utilizing encryption or not. + - Only applicable when I(state) is C(present). + type: bool + oem: + description: + - Includes OEM extended payloads. + - Only applicable when I(state) is I(present). + type: dict + initialize_type: + description: + - Initialization type of existing volume. + - Only applicable when I(command) is C(initialize). + type: str + choices: [Fast, Slow] + default: Fast + +requirements: + - "python >= 3.8.6" +author: "Sajna Shetty(@Sajna-Shetty)" +notes: + - Run this module from a system that has direct access to Redfish APIs. + - This module supports C(check_mode). + - This module always reports changes when I(name) and I(volume_id) are not specified. + Either I(name) or I(volume_id) is required to support C(check_mode). +''' + +EXAMPLES = r''' +--- +- name: Create a volume with supported options + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + volume_type: "Mirrored" + name: "VD0" + controller_id: "RAID.Slot.1-1" + drives: + - Disk.Bay.5:Enclosure.Internal.0-1:RAID.Slot.1-1 + - Disk.Bay.6:Enclosure.Internal.0-1:RAID.Slot.1-1 + block_size_bytes: 512 + capacity_bytes: 299439751168 + optimum_io_size_bytes: 65536 + encryption_types: NativeDriveEncryption + encrypted: true + +- name: Create a volume with minimum options + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + controller_id: "RAID.Slot.1-1" + volume_type: "NonRedundant" + drives: + - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1 + +- name: Modify a volume's encryption type settings + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "present" + volume_id: "Disk.Virtual.5:RAID.Slot.1-1" + encryption_types: "ControllerAssisted" + encrypted: true + +- name: Delete an existing volume + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + state: "absent" + volume_id: "Disk.Virtual.5:RAID.Slot.1-1" + +- name: Initialize an existing volume + dellemc.openmanage.redfish_storage_volume: + baseuri: "192.168.0.1" + username: "username" + password: "password" + ca_path: "/path/to/ca_cert.pem" + command: "initialize" + volume_id: "Disk.Virtual.6:RAID.Slot.1-1" + initialize_type: "Slow" +''' + +RETURN = r''' +--- +msg: + description: Overall status of the storage configuration operation. + returned: always + type: str + sample: "Successfully submitted create volume task." +task: + type: dict + description: Returns ID and URI of the created task. + returned: success + sample: { + "id": "JID_XXXXXXXXXXXXX", + "uri": "/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXXX" + } +error_info: + type: dict + description: Details of a http error. + returned: on http error + sample: { + "error": { + "@Message.ExtendedInfo": [ + { + "Message": "Unable to perform configuration operations because a + configuration job for the device already exists.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "IDRAC.1.6.STOR023", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "Wait for the current job for the device to complete + or cancel the current job before attempting more configuration + operations on the device.", + "Severity": "Informational" + } + ], + "code": "Base.1.2.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information" + } + } +''' + +import json +import copy +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + + +VOLUME_INITIALIZE_URI = "{storage_base_uri}/Volumes/{volume_id}/Actions/Volume.Initialize" +DRIVES_URI = "{storage_base_uri}/Drives/{driver_id}" +CONTROLLER_URI = "{storage_base_uri}/{controller_id}" +SETTING_VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}/Settings" +CONTROLLER_VOLUME_URI = "{storage_base_uri}/{controller_id}/Volumes" +VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}" +storage_collection_map = {} +CHANGES_FOUND = "Changes found to be applied." +NO_CHANGES_FOUND = "No changes found to be applied." + + +def fetch_storage_resource(module, session_obj): + try: + system_uri = "{0}{1}".format(session_obj.root_uri, "Systems") + system_resp = session_obj.invoke_request("GET", system_uri) + system_members = system_resp.json_data.get("Members") + if system_members: + system_id_res = system_members[0]["@odata.id"] + system_id_res_resp = session_obj.invoke_request("GET", system_id_res) + system_id_res_data = system_id_res_resp.json_data.get("Storage") + if system_id_res_data: + storage_collection_map.update({"storage_base_uri": system_id_res_data["@odata.id"]}) + else: + module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.") + else: + module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.") + except HTTPError as err: + if err.code in [404, 405]: + module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.", + error_info=json.load(err)) + raise err + except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def volume_payload(module): + params = module.params + drives = params.get("drives") + capacity_bytes = params.get("capacity_bytes") + physical_disks = [] + oem = params.get("oem") + encrypted = params.get("encrypted") + encryption_types = params.get("encryption_types") + if capacity_bytes: + capacity_bytes = int(capacity_bytes) + if drives: + storage_base_uri = storage_collection_map["storage_base_uri"] + physical_disks = [{"@odata.id": DRIVES_URI.format(storage_base_uri=storage_base_uri, + driver_id=drive_id)} for drive_id in drives] + + raid_mapper = { + "Name": params.get("name"), + "VolumeType": params.get("volume_type"), + "BlockSizeBytes": params.get("block_size_bytes"), + "CapacityBytes": capacity_bytes, + "OptimumIOSizeBytes": params.get("optimum_io_size_bytes"), + "Drives": physical_disks + } + raid_payload = dict([(k, v) for k, v in raid_mapper.items() if v]) + if oem: + raid_payload.update(params.get("oem")) + if encrypted is not None: + raid_payload.update({"Encrypted": encrypted}) + if encryption_types: + raid_payload.update({"EncryptionTypes": [encryption_types]}) + + return raid_payload + + +def check_physical_disk_exists(module, drives): + """ + validation to check if physical disks(drives) available for the specified controller + """ + specified_drives = module.params.get("drives") + if specified_drives: + existing_drives = [] + specified_controller_id = module.params.get("controller_id") + if drives: + for drive in drives: + drive_uri = drive['@odata.id'] + drive_id = drive_uri.split("/")[-1] + existing_drives.append(drive_id) + else: + module.fail_json(msg="No Drive(s) are attached to the specified " + "Controller Id: {0}.".format(specified_controller_id)) + invalid_drives = list(set(specified_drives) - set(existing_drives)) + if invalid_drives: + invalid_drive_msg = ",".join(invalid_drives) + module.fail_json(msg="Following Drive(s) {0} are not attached to the " + "specified Controller Id: {1}.".format(invalid_drive_msg, specified_controller_id)) + return True + + +def check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message): + """ + common validation to check if , specified volume or controller id exist in the system or not + """ + try: + resp = session_obj.invoke_request('GET', uri) + return resp + except HTTPError as err: + if err.code == 404: + if module.check_mode: + return err + module.fail_json(msg=err_message) + raise err + except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: + raise err + + +def check_controller_id_exists(module, session_obj): + """ + Controller availability Validation + """ + specified_controller_id = module.params.get("controller_id") + uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=specified_controller_id) + err_message = "Specified Controller {0} does " \ + "not exist in the System.".format(specified_controller_id) + resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message) + if resp.success: + return check_physical_disk_exists(module, resp.json_data["Drives"]) + else: + module.fail_json(msg="Failed to retrieve the details of the specified Controller Id " + "{0}.".format(specified_controller_id)) + + +def check_volume_id_exists(module, session_obj, volume_id): + """ + validation to check if volume id is valid in case of modify, delete, initialize operation + """ + uri = VOLUME_ID_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], volume_id=volume_id) + err_message = "Specified Volume Id {0} does not exist in the System.".format(volume_id) + resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message) + return resp + + +def check_initialization_progress(module, session_obj, volume_id): + """ + validation check if any operation is running in specified volume id. + """ + operations = [] + resp = check_volume_id_exists(module, session_obj, volume_id) + if resp.success: + operations = resp.json_data["Operations"] + return operations + + +def perform_storage_volume_action(method, uri, session_obj, action, payload=None): + """ + common request call for raid creation update delete and initialization + """ + try: + resp = session_obj.invoke_request(method, uri, data=payload) + task_uri = resp.headers["Location"] + return get_success_message(action, task_uri) + except (HTTPError, URLError, SSLValidationError, ConnectionError, + TypeError, ValueError) as err: + raise err + + +def check_mode_validation(module, session_obj, action, uri): + volume_id = module.params.get('volume_id') + name = module.params.get("name") + block_size_bytes = module.params.get("block_size_bytes") + capacity_bytes = module.params.get("capacity_bytes") + optimum_io_size_bytes = module.params.get("optimum_io_size_bytes") + encryption_types = module.params.get("encryption_types") + encrypted = module.params.get("encrypted") + volume_type = module.params.get("volume_type") + drives = module.params.get("drives") + if name is None and volume_id is None and module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + if action == "create" and name is not None: + volume_resp = session_obj.invoke_request("GET", uri) + volume_resp_data = volume_resp.json_data + if volume_resp_data.get("Members@odata.count") == 0 and module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif 0 < volume_resp_data.get("Members@odata.count"): + for mem in volume_resp_data.get("Members"): + mem_resp = session_obj.invoke_request("GET", mem["@odata.id"]) + if mem_resp.json_data["Name"] == name: + volume_id = mem_resp.json_data["Id"] + break + if name is not None and module.check_mode and volume_id is None: + module.exit_json(msg=CHANGES_FOUND, changed=True) + if volume_id is not None: + resp = session_obj.invoke_request("GET", SETTING_VOLUME_ID_URI.format( + storage_base_uri=storage_collection_map["storage_base_uri"], + volume_id=volume_id)) + resp_data = resp.json_data + exist_value = {"Name": resp_data["Name"], "BlockSizeBytes": resp_data["BlockSizeBytes"], + "CapacityBytes": resp_data["CapacityBytes"], "Encrypted": resp_data["Encrypted"], + "EncryptionTypes": resp_data["EncryptionTypes"][0], + "OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "VolumeType": resp_data["VolumeType"]} + exit_value_filter = dict([(k, v) for k, v in exist_value.items() if v is not None]) + cp_exist_value = copy.deepcopy(exit_value_filter) + req_value = {"Name": name, "BlockSizeBytes": block_size_bytes, + "Encrypted": encrypted, "OptimumIOSizeBytes": optimum_io_size_bytes, + "VolumeType": volume_type, "EncryptionTypes": encryption_types} + if capacity_bytes is not None: + req_value["CapacityBytes"] = int(capacity_bytes) + req_value_filter = dict([(k, v) for k, v in req_value.items() if v is not None]) + cp_exist_value.update(req_value_filter) + exist_drive, req_drive = [], [] + if resp_data["Links"]: + exist_drive = [disk["@odata.id"].split("/")[-1] for disk in resp_data["Links"]["Drives"]] + if drives is not None: + req_drive = sorted(drives) + diff_changes = [bool(set(exit_value_filter.items()) ^ set(cp_exist_value.items())) or + bool(set(exist_drive) ^ set(req_drive))] + if module.check_mode and any(diff_changes) is True: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif (module.check_mode and any(diff_changes) is False) or \ + (not module.check_mode and any(diff_changes) is False): + module.exit_json(msg=NO_CHANGES_FOUND) + return None + + +def perform_volume_create_modify(module, session_obj): + """ + perform volume creation and modification for state present + """ + specified_controller_id = module.params.get("controller_id") + volume_id = module.params.get("volume_id") + if specified_controller_id is not None: + check_controller_id_exists(module, session_obj) + uri = CONTROLLER_VOLUME_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], + controller_id=specified_controller_id) + method = "POST" + action = "create" + else: + resp = check_volume_id_exists(module, session_obj, volume_id) + if resp.success: + uri = SETTING_VOLUME_ID_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], + volume_id=volume_id) + method = "PATCH" + action = "modify" + payload = volume_payload(module) + check_mode_validation(module, session_obj, action, uri) + if not payload: + module.fail_json(msg="Input options are not provided for the {0} volume task.".format(action)) + return perform_storage_volume_action(method, uri, session_obj, action, payload) + + +def perform_volume_deletion(module, session_obj): + """ + perform volume deletion for state absent + """ + volume_id = module.params.get("volume_id") + if volume_id: + resp = check_volume_id_exists(module, session_obj, volume_id) + if hasattr(resp, "success") and resp.success and not module.check_mode: + uri = VOLUME_ID_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], volume_id=volume_id) + method = "DELETE" + return perform_storage_volume_action(method, uri, session_obj, "delete") + elif hasattr(resp, "success") and resp.success and module.check_mode: + module.exit_json(msg=CHANGES_FOUND, changed=True) + elif hasattr(resp, "code") and resp.code == 404 and module.check_mode: + module.exit_json(msg=NO_CHANGES_FOUND) + else: + module.fail_json(msg="'volume_id' option is a required property for deleting a volume.") + + +def perform_volume_initialization(module, session_obj): + """ + perform volume initialization for command initialize + """ + specified_volume_id = module.params.get("volume_id") + if specified_volume_id: + operations = check_initialization_progress(module, session_obj, specified_volume_id) + if operations: + operation_message = "Cannot perform the configuration operations because a " \ + "configuration job for the device already exists." + operation_name = operations[0].get("OperationName") + percentage_complete = operations[0].get("PercentageComplete") + if operation_name and percentage_complete: + operation_message = "Cannot perform the configuration operation because the configuration job '{0}'" \ + " in progress is at '{1}' percentage.".format(operation_name, percentage_complete) + module.fail_json(msg=operation_message) + else: + method = "POST" + uri = VOLUME_INITIALIZE_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], + volume_id=specified_volume_id) + payload = {"InitializeType": module.params["initialize_type"]} + return perform_storage_volume_action(method, uri, session_obj, "initialize", payload) + else: + module.fail_json(msg="'volume_id' option is a required property for initializing a volume.") + + +def configure_raid_operation(module, session_obj): + """ + configure raid action based on state and command input + """ + module_params = module.params + state = module_params.get("state") + command = module_params.get("command") + if state is not None and state == "present": + return perform_volume_create_modify(module, session_obj) + elif state is not None and state == "absent": + return perform_volume_deletion(module, session_obj) + elif command is not None and command == "initialize": + return perform_volume_initialization(module, session_obj) + + +def get_success_message(action, task_uri): + """ + message for different types of raid actions + """ + msg = "Successfully submitted {0} volume task.".format(action) + status_message = {"msg": msg} + if task_uri is not None: + task_id = task_uri.split("/")[-1] + status_message.update({"task_uri": task_uri, "task_id": task_id}) + return status_message + + +def validate_inputs(module): + """ + validation check for state and command input for null values. + """ + module_params = module.params + state = module_params.get("state") + command = module_params.get("command") + if state is None and command is None: + module.fail_json(msg="Either state or command should be provided to further actions.") + elif state == "present" and\ + module_params.get("controller_id") is None and\ + module_params.get("volume_id") is None: + module.fail_json(msg="When state is present, either controller_id or" + " volume_id must be specified to perform further actions.") + + +def main(): + specs = { + "state": {"type": "str", "required": False, "choices": ['present', 'absent']}, + "command": {"type": "str", "required": False, "choices": ['initialize']}, + "volume_type": {"type": "str", "required": False, + "choices": ['NonRedundant', 'Mirrored', + 'StripedWithParity', 'SpannedMirrors', + 'SpannedStripesWithParity']}, + "name": {"required": False, "type": "str"}, + "controller_id": {"required": False, "type": "str"}, + "drives": {"elements": "str", "required": False, "type": "list"}, + "block_size_bytes": {"required": False, "type": "int"}, + "capacity_bytes": {"required": False, "type": "str"}, + "optimum_io_size_bytes": {"required": False, "type": "int"}, + "encryption_types": {"type": "str", "required": False, + "choices": ['NativeDriveEncryption', 'ControllerAssisted', 'SoftwareAssisted']}, + "encrypted": {"required": False, "type": "bool"}, + "volume_id": {"required": False, "type": "str"}, + "oem": {"required": False, "type": "dict"}, + "initialize_type": {"type": "str", "required": False, "choices": ['Fast', 'Slow'], "default": "Fast"}, + } + + specs.update(redfish_auth_params) + + module = AnsibleModule( + argument_spec=specs, + mutually_exclusive=[['state', 'command']], + required_one_of=[['state', 'command']], + required_if=[['command', 'initialize', ['volume_id']], + ['state', 'absent', ['volume_id']], ], + supports_check_mode=True) + + try: + validate_inputs(module) + with Redfish(module.params, req_session=True) as session_obj: + fetch_storage_resource(module, session_obj) + status_message = configure_raid_operation(module, session_obj) + task_status = {"uri": status_message.get("task_uri"), "id": status_message.get("task_id")} + module.exit_json(msg=status_message["msg"], task=task_status, changed=True) + except HTTPError as err: + module.fail_json(msg=str(err), error_info=json.load(err)) + except (URLError, SSLValidationError, ConnectionError, ImportError, ValueError, + RuntimeError, TypeError, OSError, SSLError) as err: + module.fail_json(msg=str(err)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/openmanage/requirements.txt b/ansible_collections/dellemc/openmanage/requirements.txt new file mode 100644 index 00000000..604f7ba2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/requirements.txt @@ -0,0 +1,2 @@ +omsdk +netaddr>=0.7.19 \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/requirements.yml b/ansible_collections/dellemc/openmanage/requirements.yml new file mode 100644 index 00000000..6440db74 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/requirements.yml @@ -0,0 +1,2 @@ +collections: + - name: dellemc.openmanage diff --git a/ansible_collections/dellemc/openmanage/tests/.gitignore b/ansible_collections/dellemc/openmanage/tests/.gitignore new file mode 100644 index 00000000..8c8e7569 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/.gitignore @@ -0,0 +1,4 @@ +output/ +# Unit test / coverage reports +htmlcov/ +.tox/ \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/tests/README.md b/ansible_collections/dellemc/openmanage/tests/README.md new file mode 100644 index 00000000..f66cdd59 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/README.md @@ -0,0 +1,54 @@ +### Overview +Dell EMC OpenManage Ansible Modules unit test scripts are located under + [unit](./tests/unit) directory. + +### Implementing the unit tests +Any contribution must have an associated unit test. This section covers the + tests that need to be carried out. +* The unit tests are required for each new resource, bug fix, or enhancement. They must cover what is being submitted. +* The name of the test modules should start with the prefix "test_" in + addition to the tested module name. For example: test_ome_user + +### Prerequisites +* Dell EMC OpenManage collections - to install run `ansible-galaxy collection + install dellemc.openmanage` +* To run the unittest for iDRAC modules, install OpenManage Python Software Development Kit (OMSDK) using +`pip install omsdk --upgrade` or from [Dell EMC OpenManage Python SDK](https://github.com/dell/omsdk) + +### Executing unit tests +You can execute them manually by using any tool of your choice, like `pytest` or `ansible-test`. + +#### Executing with `ansible-test` +* Clone [Ansible repository](https://github.com/ansible/ansible) from GitHub to local $ANSIBLE_DIR. +* Copy `compat` directory from the cloned repository path. + `$ANSIBLE_DIR/test/units/` to the location of the installed Dell EMC OpenManage collection `$ANSIBLE_COLLECTIONS_PATHS/ansible_collections/dellemc/openmanage/tests/unit`. +* Copy `utils.py` file from `$ANSIBLE_DIR/test/units/modules` tests location to the location of the installed collection `$ANSIBLE_COLLECTIONS_PATHS/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules` +* Edit the copied `utils.py` to refer the above `compat` package as below: +```python + from units.compat import unittest + + # Replace the above lines in utils.py as below + + from ansible_collections.dellemc.openmanage.tests.unit.compat import unittest +``` +* To install `ansible-test` requirements use + ``` + ansible-test units --requirements + ``` +* To perform a test, run the following command + ``` + ansible-test units -vvv + ``` +* To run any specific module use the below command, + ``` + ansible-test units idrac_server_config_profile + ``` +See [here](https://docs.ansible.com/ansible/latest/dev_guide/testing_units.html#testing-units) for more details on unit-testing. + +#### Executing with `pytest` + +See [here](https://docs.pytest.org/en/stable/). + +### Acceptance criteria +The code coverage of new module should be more than 90%. +Execute code coverage with `pytest` as explained [here](https://pytest-cov.readthedocs.io/en/latest/reporting.html). \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/tests/__init__.py b/ansible_collections/dellemc/openmanage/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/tests/requirements.txt b/ansible_collections/dellemc/openmanage/tests/requirements.txt new file mode 100644 index 00000000..3ea8227f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/requirements.txt @@ -0,0 +1,9 @@ +omsdk +pytest +pytest-xdist==2.5.0 +mock +pytest-mock +pytest-cov +# pytest-ansible==2.0.1 +coverage==4.5.4 +netaddr>=0.7.19 diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..f6fec0eb --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt @@ -0,0 +1,3 @@ +tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip +plugins/modules/idrac_attributes.py compile-2.6!skip +plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..f6fec0eb --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt @@ -0,0 +1,3 @@ +tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip +plugins/modules/idrac_attributes.py compile-2.6!skip +plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt new file mode 100644 index 00000000..f6fec0eb --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt @@ -0,0 +1,3 @@ +tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip +plugins/modules/idrac_attributes.py compile-2.6!skip +plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..9d8f3ba1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt @@ -0,0 +1,7 @@ +plugins/modules/dellemc_get_firmware_inventory.py validate-modules:deprecation-mismatch +plugins/modules/dellemc_get_firmware_inventory.py validate-modules:invalid-documentation +plugins/modules/dellemc_get_system_inventory.py validate-modules:deprecation-mismatch +plugins/modules/dellemc_get_system_inventory.py validate-modules:invalid-documentation +tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip +plugins/modules/idrac_attributes.py compile-2.6!skip +plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file diff --git a/ansible_collections/dellemc/openmanage/tests/unit/__init__.py b/ansible_collections/dellemc/openmanage/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/__init__.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/__init__.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py new file mode 100644 index 00000000..fc0f0be5 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py @@ -0,0 +1,284 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2019-2022 Dell Inc. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. +# Other trademarks may be trademarks of their respective owners. +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME +from mock import MagicMock +import json + +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.' + + +class TestRestOME(object): + + @pytest.fixture + def ome_response_mock(self, mocker): + set_method_result = {'json_data': {}} + response_class_mock = mocker.patch( + MODULE_UTIL_PATH + 'ome.OpenURLResponse', + return_value=set_method_result) + response_class_mock.success = True + response_class_mock.status_code = 200 + return response_class_mock + + @pytest.fixture + def mock_response(self): + mock_response = MagicMock() + mock_response.getcode.return_value = 200 + mock_response.headers = mock_response.getheaders.return_value = {'X-Auth-Token': 'token_id'} + mock_response.read.return_value = json.dumps({"value": "data"}) + return mock_response + + def test_invoke_request_with_session(self, mock_response, mocker): + mocker.patch(MODULE_UTIL_PATH + 'ome.open_url', + return_value=mock_response) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + req_session = True + with RestOME(module_params, req_session) as obj: + response = obj.invoke_request("/testpath", "GET") + assert response.status_code == 200 + assert response.json_data == {"value": "data"} + assert response.success is True + + def test_invoke_request_without_session(self, mock_response, mocker): + mocker.patch(MODULE_UTIL_PATH + 'ome.open_url', + return_value=mock_response) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + req_session = False + with RestOME(module_params, req_session) as obj: + response = obj.invoke_request("/testpath", "GET") + assert response.status_code == 200 + assert response.json_data == {"value": "data"} + assert response.success is True + + def test_invoke_request_without_session_with_header(self, mock_response, mocker): + mocker.patch(MODULE_UTIL_PATH + 'ome.open_url', + return_value=mock_response) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + req_session = False + with RestOME(module_params, req_session) as obj: + response = obj.invoke_request("/testpath", "POST", headers={"application": "octstream"}) + assert response.status_code == 200 + assert response.json_data == {"value": "data"} + assert response.success is True + + def test_invoke_request_with_session_connection_error(self, mocker, mock_response): + mock_response.success = False + mock_response.status_code = 500 + mock_response.json_data = {} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + req_session = True + with pytest.raises(ConnectionError): + with RestOME(module_params, req_session) as obj: + obj.invoke_request("/testpath", "GET") + + @pytest.mark.parametrize("exc", [URLError, SSLValidationError, ConnectionError]) + def test_invoke_request_error_case_handling(self, exc, mock_response, mocker): + open_url_mock = mocker.patch(MODULE_UTIL_PATH + 'ome.open_url', + return_value=mock_response) + open_url_mock.side_effect = exc("test") + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + req_session = False + with pytest.raises(exc) as e: + with RestOME(module_params, req_session) as obj: + obj.invoke_request("/testpath", "GET") + + def test_invoke_request_http_error_handling(self, mock_response, mocker): + open_url_mock = mocker.patch(MODULE_UTIL_PATH + 'ome.open_url', + return_value=mock_response) + open_url_mock.side_effect = HTTPError('http://testhost.com/', 400, + 'Bad Request Error', {}, None) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + req_session = False + with pytest.raises(HTTPError) as e: + with RestOME(module_params, req_session) as obj: + obj.invoke_request("/testpath", "GET") + + def test_get_all_report_details(self, mock_response, mocker): + mock_response.success = True + mock_response.status_code = 200 + mock_response.json_data = {"@odata.count": 50, "value": list(range(51))} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with RestOME(module_params, True) as obj: + reports = obj.get_all_report_details("DeviceService/Devices") + assert reports == {"resp_obj": mock_response, "report_list": list(range(51))} + + def test_get_report_list_error_case(self, mock_response, mocker): + mocker.patch(MODULE_UTIL_PATH + 'ome.open_url', + return_value=mock_response) + invoke_obj = mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + side_effect=HTTPError('http://testhost.com/', 400, 'Bad Request Error', {}, None)) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with pytest.raises(HTTPError) as e: + with RestOME(module_params, False) as obj: + obj.get_all_report_details("DeviceService/Devices") + + @pytest.mark.parametrize("query_param", [ + {"inp": {"$filter": "UserName eq 'admin'"}, "out": "%24filter=UserName%20eq%20%27admin%27"}, + {"inp": {"$top": 1, "$skip": 2, "$filter": "JobType/Id eq 8"}, "out": + "%24top=1&%24skip=2&%24filter=JobType%2FId%20eq%208"}, + {"inp": {"$top": 1, "$skip": 3}, "out": "%24top=1&%24skip=3"} + ]) + def test_build_url(self, query_param, mocker): + """builds complete url""" + base_uri = 'https://192.168.0.1:443/api' + path = "AccountService/Accounts" + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME._get_base_url', + return_value=base_uri) + inp = query_param["inp"] + out = query_param["out"] + url = RestOME(module_params=module_params)._build_url(path, query_param=inp) + assert url == base_uri + "/" + path + "?" + out + assert "+" not in url + + def test_get_job_type_id(self, mock_response, mocker): + mock_response.success = True + mock_response.status_code = 200 + mock_response.json_data = {"@odata.count": 50, "value": [{"Name": "PowerChange", "Id": 11}]} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + jobtype_name = "PowerChange" + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with RestOME(module_params, True) as obj: + job_id = obj.get_job_type_id(jobtype_name) + assert job_id == 11 + + def test_get_job_type_id_null_case(self, mock_response, mocker): + mock_response.success = True + mock_response.status_code = 200 + mock_response.json_data = {"@odata.count": 50, "value": [{"Name": "PowerChange", "Id": 11}]} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + jobtype_name = "FirmwareUpdate" + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with RestOME(module_params, True) as obj: + job_id = obj.get_job_type_id(jobtype_name) + assert job_id is None + + def test_get_device_id_from_service_tag_ome_case01(self, mocker, mock_response): + mock_response.success = True + mock_response.status_code = 200 + mock_response.json_data = {"@odata.count": 1, "value": [{"Name": "xyz", "Id": 11}]} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + ome_default_args = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with RestOME(ome_default_args, True) as obj: + details = obj.get_device_id_from_service_tag("xyz") + assert details["Id"] == 11 + assert details["value"] == {"Name": "xyz", "Id": 11} + + def test_get_device_id_from_service_tag_ome_case02(self, mocker, mock_response): + mock_response.success = True + mock_response.status_code = 200 + mock_response.json_data = {"@odata.count": 0, "value": []} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + ome_default_args = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with RestOME(ome_default_args, True) as obj: + details = obj.get_device_id_from_service_tag("xyz") + assert details["Id"] is None + assert details["value"] == {} + + def test_get_all_items_with_pagination(self, mock_response, mocker): + mock_response.success = True + mock_response.status_code = 200 + mock_response.json_data = {"@odata.count": 50, "value": list(range(51))} + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with RestOME(module_params, True) as obj: + reports = obj.get_all_items_with_pagination("DeviceService/Devices") + assert reports == {"total_count": 50, "value": list(range(51))} + + def test_get_all_items_with_pagination_error_case(self, mock_response, mocker): + mocker.patch(MODULE_UTIL_PATH + 'ome.open_url', + return_value=mock_response) + invoke_obj = mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + side_effect=HTTPError('http://testhost.com/', 400, 'Bad Request Error', {}, None)) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with pytest.raises(HTTPError) as e: + with RestOME(module_params, False) as obj: + obj.get_all_items_with_pagination("DeviceService/Devices") + + def test_get_device_type(self, mock_response, mocker): + mock_response.success = True + mock_response.status_code = 200 + mock_response.json_data = { + "@odata.context": "/api/$metadata#Collection(DeviceService.DeviceType)", + "@odata.count": 5, + "value": [ + { + "@odata.type": "#DeviceService.DeviceType", + "DeviceType": 1000, + "Name": "SERVER", + "Description": "Server Device" + }, + { + "@odata.type": "#DeviceService.DeviceType", + "DeviceType": 2000, + "Name": "CHASSIS", + "Description": "Chassis Device" + }, + { + "@odata.type": "#DeviceService.DeviceType", + "DeviceType": 3000, + "Name": "STORAGE", + "Description": "Storage Device" + }, + { + "@odata.type": "#DeviceService.DeviceType", + "DeviceType": 4000, + "Name": "NETWORK_IOM", + "Description": "NETWORK IO Module Device" + }, + { + "@odata.type": "#DeviceService.DeviceType", + "DeviceType": 8000, + "Name": "STORAGE_IOM", + "Description": "Storage IOM Device" + } + ] + } + mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request', + return_value=mock_response) + module_params = {'hostname': '192.168.0.1', 'username': 'username', + 'password': 'password', "port": 443} + with RestOME(module_params, False) as obj: + type_map = obj.get_device_type() + assert type_map == {1000: "SERVER", 2000: "CHASSIS", 3000: "STORAGE", + 4000: "NETWORK_IOM", 8000: "STORAGE_IOM"} diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/__init__.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py new file mode 100644 index 00000000..0cc124f9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2019-2022 Dell Inc. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. +# Other trademarks may be trademarks of their respective owners. +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.utils import set_module_args, AnsibleFailJson, \ + AnsibleExitJson +from mock import MagicMock +import ast + + +class Constants: + device_id1 = 1234 + device_id2 = 4321 + service_tag1 = "MXL1234" + service_tag2 = "MXL5467" + hostname1 = "192.168.0.1" + hostname2 = "192.168.0.2" + + +class AnsibleFailJSonException(Exception): + def __init__(self, msg, **kwargs): + super(AnsibleFailJSonException, self).__init__(msg) + self.fail_msg = msg + self.fail_kwargs = kwargs + + +class FakeAnsibleModule: + + def _run_module(self, module_args, check_mode=False): + module_args.update({'_ansible_check_mode': check_mode}) + set_module_args(module_args) + with pytest.raises(AnsibleExitJson) as ex: + self.module.main() + return ex.value.args[0] + + def _run_module_with_fail_json(self, module_args): + set_module_args(module_args) + with pytest.raises(AnsibleFailJson) as exc: + self.module.main() + result = exc.value.args[0] + return result + + def execute_module(self, module_args, check_mode=False): + """[workaround]: generic exception handling in module will + be caught here and extracted the result for exit_json case""" + module_args.update({'_ansible_check_mode': check_mode}) + set_module_args(module_args) + result = {} + try: + with pytest.raises(AnsibleExitJson) as ex: + self.module.main() + except Exception as err: + result = ast.literal_eval(err.args[0]['msg']) + return result + + def get_module_mock(self, params=None, check_mode=False): + if params is None: + params = {} + + def fail_func(msg, **kwargs): + raise AnsibleFailJSonException(msg, **kwargs) + + module = MagicMock() + module.fail_json.side_effect = fail_func + module.exit_json.side_effect = fail_func + module.params = params + module.check_mode = check_mode + return module diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py new file mode 100644 index 00000000..e6f9ae46 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2019-2022 Dell Inc. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. +# Other trademarks may be trademarks of their respective owners. +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible.module_utils import basic +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.utils import set_module_args, exit_json, \ + fail_json, AnsibleFailJson, AnsibleExitJson +from mock import MagicMock + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.' + + +@pytest.fixture(autouse=True) +def module_mock(mocker): + return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + + +@pytest.fixture +def ome_connection_mock(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_device_info.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +@pytest.fixture +def ome_response_mock(mocker): + set_method_result = {'json_data': {}} + response_class_mock = mocker.patch(MODULE_UTIL_PATH + 'ome.OpenURLResponse', return_value=set_method_result) + response_class_mock.success = True + response_class_mock.status_code = 200 + return response_class_mock + + +@pytest.fixture +def redfish_response_mock(mocker): + set_method_result = {'json_data': {}} + response_class_mock = mocker.patch(MODULE_UTIL_PATH + 'redfish.OpenURLResponse', return_value=set_method_result) + response_class_mock.success = True + response_class_mock.status_code = 200 + return response_class_mock + + +@pytest.fixture +def ome_default_args(): + default_args = {'hostname': '192.168.0.1', 'username': 'username', 'password': 'password', "ca_path": "/path/ca_bundle"} + return default_args + + +@pytest.fixture +def idrac_default_args(): + default_args = {"idrac_ip": "idrac_ip", "idrac_user": "idrac_user", "idrac_password": "idrac_password", + "ca_path": "/path/to/ca_cert.pem"} + return default_args + + +@pytest.fixture +def redfish_default_args(): + default_args = {'baseuri': '192.168.0.1', 'username': 'username', 'password': 'password', + "ca_path": "/path/to/ca_cert.pem"} + return default_args + + +@pytest.fixture +def fake_ansible_module_mock(): + module = MagicMock() + module.params = {} + module.fail_json = AnsibleFailJson() + module.exit_json = AnsibleExitJson() + return module + + +@pytest.fixture +def default_ome_args(): + return {"hostname": "hostname", "username": "username", "password": "password"} diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py new file mode 100644 index 00000000..0386269e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py @@ -0,0 +1,237 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_configure_idrac_eventing +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock, PropertyMock +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestConfigureEventing(FakeAnsibleModule): + module = dellemc_configure_idrac_eventing + + @pytest.fixture + def idrac_configure_eventing_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).create_share_obj = Mock(return_value="Status") + type(idrac_obj).set_liason_share = Mock(return_value="Status") + return idrac_obj + + @pytest.fixture + def idrac_file_manager_config_eventing_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_eventing.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + @pytest.fixture + def is_changes_applicable_eventing_mock(self, mocker): + try: + changes_applicable_obj = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_eventing.config_mgr') + except AttributeError: + changes_applicable_obj = MagicMock() + obj = MagicMock() + changes_applicable_obj.is_change_applicable.return_value = obj + return changes_applicable_obj + + @pytest.fixture + def idrac_connection_configure_eventing_mock(self, mocker, idrac_configure_eventing_mock): + idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_eventing.iDRACConnection', + return_value=idrac_configure_eventing_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_eventing_mock + return idrac_configure_eventing_mock + + def test_main_configure_eventing_success_case01(self, idrac_connection_configure_eventing_mock, idrac_default_args, + mocker, idrac_file_manager_config_eventing_mock): + idrac_default_args.update({"share_name": None, 'share_password': None, "destination_number": 1, + "destination": "1.1.1.1", 'share_mnt': None, 'share_user': None}) + message = {'msg': 'Successfully configured the idrac eventing settings.', + 'eventing_status': {"Id": "JID_12345123456", "JobState": "Completed"}, + 'changed': True} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_eventing.run_idrac_eventing_config', return_value=message) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully configured the iDRAC eventing settings." + status_msg = {"Status": "Success", "Message": "No changes found to commit!"} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_eventing.run_idrac_eventing_config', return_value=status_msg) + result = self._run_module(idrac_default_args) + assert result["msg"] == "No changes found to commit!" + + def test_run_idrac_eventing_config_success_case01(self, idrac_connection_configure_eventing_mock, + idrac_file_manager_config_eventing_mock, idrac_default_args, + is_changes_applicable_eventing_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": 1, "destination": "1.1.1.1", + "snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4, + "email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test", + "enable_alerts": "Enabled", "authentication": "Enabled", + "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname", + "password": "pwd"}) + message = {"changes_applicable": True, "message": "Changes found to commit!"} + idrac_connection_configure_eventing_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + with pytest.raises(Exception) as ex: + self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert "Changes found to commit!" == ex.value.args[0] + + def test_run_idrac_eventing_config_success_case02(self, idrac_connection_configure_eventing_mock, + idrac_file_manager_config_eventing_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": 1, "destination": "1.1.1.1", + "snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4, + "email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test", + "enable_alerts": "Enabled", "authentication": "Enabled", + "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname", + "password": "pwd"}) + message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True, + "Status": "Success"} + idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert result['message'] == 'changes found to commit!' + + def test_run_idrac_eventing_config_success_case03(self, idrac_connection_configure_eventing_mock, + idrac_file_manager_config_eventing_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": 1, + "destination": "1.1.1.1", "snmp_v3_username": "snmpuser", + "snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled", + "address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled", + "authentication": "Enabled", "smtp_ip_address": "192.168.0.1", "smtp_port": 443, + "username": "uname", "password": "pwd"}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert result["Message"] == 'No changes found to commit!' + + def test_run_idrac_eventing_config_success_case04(self, idrac_connection_configure_eventing_mock, + idrac_default_args, idrac_file_manager_config_eventing_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": 1, "destination": "1.1.1.1", + "snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4, + "email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test", + "enable_alerts": "Enabled", "authentication": "Enabled", + "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname", + "password": "pwd"}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "Success"} + idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert result['Message'] == 'No changes were applied' + + def test_run_idrac_eventing_config_success_case05(self, idrac_connection_configure_eventing_mock, + idrac_file_manager_config_eventing_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": None, "destination": None, + "snmp_v3_username": None, "snmp_trap_state": None, "alert_number": None, + "email_alert_state": None, "address": None, "custom_message": None, + "enable_alerts": None, "authentication": None, + "smtp_ip_address": None, "smtp_port": None, "username": None, + "password": None}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "Success"} + obj = MagicMock() + idrac_connection_configure_eventing_mock.config_mgr = obj + type(obj).configure_snmp_trap_destination = PropertyMock(return_value=message) + type(obj).configure_email_alerts = PropertyMock(return_value=message) + type(obj).configure_idrac_alerts = PropertyMock(return_value=message) + type(obj).configure_smtp_server_settings = PropertyMock(return_value=message) + idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert result['Message'] == 'No changes were applied' + + def test_run_idrac_eventing_config_failed_case01(self, idrac_connection_configure_eventing_mock, + idrac_file_manager_config_eventing_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": 1, "destination": "1.1.1.1", + "snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4, + "email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test", + "enable_alerts": "Enabled", "authentication": "Enabled", + "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname", + "password": "pwd"}) + message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}} + idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert ex.value.args[0] == 'status failed in checking Data' + + def test_run_idrac_eventing_config_failed_case02(self, idrac_connection_configure_eventing_mock, + idrac_default_args, idrac_file_manager_config_eventing_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": 1, "destination": "1.1.1.1", + "snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4, + "email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test", + "enable_alerts": "Enabled", "authentication": "Enabled", + "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname", + "password": "pwd"}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "failed"} + idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert result['Message'] == 'No changes were applied' + + def test_run_idrac_eventing_config_failed_case03(self, idrac_connection_configure_eventing_mock, + idrac_default_args, idrac_file_manager_config_eventing_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "destination_number": 1, + "destination": "1.1.1.1", "snmp_v3_username": "snmpuser", + "snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled", + "address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled", + "authentication": "Enabled", "smtp_ip_address": "192.168.0.1", + "smtp_port": 443, "username": "uname", "password": "pwd"}) + message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}} + idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module) + assert ex.value.args[0] == 'Failed to found changes' + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError]) + def test_main_configure_eventing_exception_handling_case(self, exc_type, mocker, idrac_default_args, + idrac_connection_configure_eventing_mock, + idrac_file_manager_config_eventing_mock): + idrac_default_args.update({"share_name": None, 'share_password': None, + 'share_mnt': None, 'share_user': None}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_eventing.run_idrac_eventing_config', side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py new file mode 100644 index 00000000..2606a034 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_configure_idrac_services +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestConfigServices(FakeAnsibleModule): + module = dellemc_configure_idrac_services + + @pytest.fixture + def idrac_configure_services_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).create_share_obj = Mock(return_value="servicesstatus") + type(idrac_obj).set_liason_share = Mock(return_value="servicestatus") + return idrac_obj + + @pytest.fixture + def idrac_file_manager_config_services_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_services.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + @pytest.fixture + def is_changes_applicable_mock_services(self, mocker): + try: + changes_applicable_mock = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_services.' + 'config_mgr') + except AttributeError: + changes_applicable_mock = MagicMock() + obj = MagicMock() + changes_applicable_mock.is_change_applicable.return_value = obj + return changes_applicable_mock + + @pytest.fixture + def idrac_connection_configure_services_mock(self, mocker, idrac_configure_services_mock): + idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_services.iDRACConnection', + return_value=idrac_configure_services_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_services_mock + return idrac_configure_services_mock + + def test_main_idrac_services_config_success_Case(self, idrac_connection_configure_services_mock, idrac_default_args, + mocker, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1", + "ipmi_lan": {"community_name": "public"}}) + message = {'changed': False, 'msg': {'Status': "Success", "message": "No changes found to commit!"}} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_services.run_idrac_services_config', return_value=message) + with pytest.raises(Exception) as ex: + self._run_module(idrac_default_args) + assert ex.value.args[0]['msg'] == "Failed to configure the iDRAC services." + status_msg = {"Status": "Success", "Message": "No changes found to commit!"} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_services.run_idrac_services_config', return_value=status_msg) + result = self._run_module(idrac_default_args) + assert result["msg"] == "No changes found to commit!" + status_msg = {"Status": "Failed"} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_services.run_idrac_services_config', return_value=status_msg) + with pytest.raises(Exception) as ex: + self._run_module(idrac_default_args) + assert ex.value.args[0]['msg'] == "Failed to configure the iDRAC services." + + def test_run_idrac_services_config_success_case01(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock, + is_changes_applicable_mock_services): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1", + "ipmi_lan": {"community_name": "public"}}) + message = {"changes_applicable": True, "message": "changes are applicable"} + idrac_connection_configure_services_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + with pytest.raises(Exception) as ex: + self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert ex.value.args[0] == "Changes found to commit!" + + def test_run_idrac_services_config_success_case02(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1", + "ipmi_lan": {"community_name": "public"}}) + message = {"changes_applicable": True, "message": "changes found to commit!", + "Status": "Success"} + idrac_connection_configure_services_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert msg == {'changes_applicable': True, 'message': 'changes found to commit!', 'Status': 'Success'} + + def test_run_idrac_services_config_success_case03(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1", + "ipmi_lan": {"community_name": "public"}}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_services_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert msg == {'changes_applicable': False, 'Message': 'No changes found to commit!', + 'changed': False, 'Status': 'Success'} + + def test_run_idrac_services_config_success_case04(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1", + "ipmi_lan": {"community_name": "public"}}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_services_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert msg == {'changes_applicable': False, 'Message': 'No changes found to commit!', + 'changed': False, 'Status': 'Success'} + + def test_run_idrac_services_config_success_case05(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": None, "http_port": None, + "https_port": None, "timeout": None, "ssl_encryption": None, + "tls_protocol": None, "snmp_enable": None, + "community_name": None, "snmp_protocol": None, "alert_port": None, + "discovery_port": None, "trap_format": None, + "ipmi_lan": {"community_name": "public"}}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_services_mock.config_mgr.configure_web_server.return_value = message + idrac_connection_configure_services_mock.config_mgr.configure_snmp.return_value = message + idrac_connection_configure_services_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert msg == {'changes_applicable': False, 'Message': 'No changes found to commit!', + 'changed': False, 'Status': 'Success'} + + def test_run_idrac_services_config_failed_case01(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1"}) + message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}} + idrac_connection_configure_services_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_services_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert ex.value.args[0] == 'status failed in checking Data' + + def test_run_idrac_services_config_failed_case02(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1", + "ipmi_lan": {"community_name": "public"}}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "failed"} + idrac_connection_configure_services_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert msg == {'changes_applicable': False, 'Message': 'No changes were applied', + 'changed': False, 'Status': 'failed'} + + def test_run_idrac_services_config_failed_case03(self, idrac_connection_configure_services_mock, + idrac_default_args, idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "enable_web_server": "Enabled", "http_port": 443, + "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher", + "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled", + "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445, + "discovery_port": 1000, "trap_format": "SNMPv1"}) + message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}} + idrac_connection_configure_services_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_services_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module) + assert ex.value.args[0] == "Failed to found changes" + + def test_main_idrac_configure_fail_case(self, mocker, idrac_default_args, idrac_connection_configure_services_mock, + idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None}) + message = {'changed': False, 'msg': {'Status': "failed", "message": "No changes found to commit!"}} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_services.run_idrac_services_config', return_value=message) + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError]) + def test_main_idrac_configure_services_exception_handling_case(self, exc_type, mocker, idrac_default_args, + idrac_connection_configure_services_mock, + idrac_file_manager_config_services_mock): + idrac_default_args.update({"share_name": None}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_configure_idrac_services.run_idrac_services_config', side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py new file mode 100644 index 00000000..657f89e4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_get_firmware_inventory +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, PropertyMock +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestFirmware(FakeAnsibleModule): + module = dellemc_get_firmware_inventory + + @pytest.fixture + def idrac_firmware_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.update_mgr = idrac_obj + type(idrac_obj).InstalledFirmware = PropertyMock(return_value="msg") + return idrac_obj + + @pytest.fixture + def idrac_get_firmware_inventory_connection_mock(self, mocker, idrac_firmware_mock): + idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_get_firmware_inventory.iDRACConnection', + return_value=idrac_firmware_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_firmware_mock + return idrac_firmware_mock + + def test_main_idrac_get_firmware_inventory_success_case01(self, idrac_get_firmware_inventory_connection_mock, + idrac_default_args): + idrac_get_firmware_inventory_connection_mock.update_mgr.InstalledFirmware.return_value = {"Status": "Success"} + result = self._run_module(idrac_default_args) + assert result == {'ansible_facts': { + idrac_get_firmware_inventory_connection_mock.ipaddr: { + 'Firmware Inventory': idrac_get_firmware_inventory_connection_mock.update_mgr.InstalledFirmware}}, + "changed": False} + + def test_run_get_firmware_inventory_success_case01(self, idrac_get_firmware_inventory_connection_mock, + idrac_default_args): + obj2 = MagicMock() + idrac_get_firmware_inventory_connection_mock.update_mgr = obj2 + type(obj2).InstalledFirmware = PropertyMock(return_value="msg") + f_module = self.get_module_mock(params=idrac_default_args) + msg, err = self.module.run_get_firmware_inventory(idrac_get_firmware_inventory_connection_mock, f_module) + assert msg == {'failed': False, + 'msg': idrac_get_firmware_inventory_connection_mock.update_mgr.InstalledFirmware} + assert msg['failed'] is False + assert err is False + + def test_run_get_firmware_inventory_failed_case01(self, idrac_get_firmware_inventory_connection_mock, + idrac_default_args): + f_module = self.get_module_mock(params=idrac_default_args) + error_msg = "Error in Runtime" + obj2 = MagicMock() + idrac_get_firmware_inventory_connection_mock.update_mgr = obj2 + type(obj2).InstalledFirmware = PropertyMock(side_effect=Exception(error_msg)) + msg, err = self.module.run_get_firmware_inventory(idrac_get_firmware_inventory_connection_mock, f_module) + assert msg['failed'] is True + assert msg['msg'] == "Error: {0}".format(error_msg) + assert err is True + + def test_run_get_firmware_inventory_failed_case02(self, idrac_get_firmware_inventory_connection_mock, + idrac_default_args): + message = {'Status': "Failed", "Message": "Fetched..."} + obj2 = MagicMock() + idrac_get_firmware_inventory_connection_mock.update_mgr = obj2 + type(obj2).InstalledFirmware = PropertyMock(return_value=message) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.run_get_firmware_inventory(idrac_get_firmware_inventory_connection_mock, f_module) + assert result == ({'msg': {'Status': 'Failed', 'Message': 'Fetched...'}, 'failed': True}, False) + if "Status" in result[0]['msg']: + if not result[0]['msg']['Status'] == "Success": + assert result[0]['failed'] is True + + def test_main_idrac_get_firmware_inventory_faild_case01(self, idrac_get_firmware_inventory_connection_mock, + idrac_default_args): + error_msg = "Error occurs" + obj2 = MagicMock() + idrac_get_firmware_inventory_connection_mock.update_mgr = obj2 + type(obj2).InstalledFirmware = PropertyMock(side_effect=Exception(error_msg)) + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + assert result['msg'] == "Error: {0}".format(error_msg) + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError]) + def test_main_idrac_get_firmware_inventory_exception_handling_case(self, exc_type, mocker, + idrac_get_firmware_inventory_connection_mock, + idrac_default_args): + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_firmware_inventory.' + 'run_get_firmware_inventory', side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py new file mode 100644 index 00000000..c398c9f8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_get_system_inventory +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, Mock +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestSystemInventory(FakeAnsibleModule): + module = dellemc_get_system_inventory + + @pytest.fixture + def idrac_system_inventory_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.get_entityjson = idrac_obj + type(idrac_obj).get_json_device = Mock(return_value="msg") + return idrac_obj + + @pytest.fixture + def idrac_get_system_inventory_connection_mock(self, mocker, idrac_system_inventory_mock): + idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_get_system_inventory.iDRACConnection', + return_value=idrac_system_inventory_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_system_inventory_mock + return idrac_system_inventory_mock + + def test_main_idrac_get_system_inventory_success_case01(self, idrac_get_system_inventory_connection_mock, mocker, + idrac_default_args): + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_system_inventory.run_get_system_inventory', + return_value=({"msg": "Success"}, False)) + msg = self._run_module(idrac_default_args) + assert msg['changed'] is False + assert msg['ansible_facts'] == {idrac_get_system_inventory_connection_mock.ipaddr: + {'SystemInventory': "Success"}} + + def test_run_get_system_inventory_error_case(self, idrac_get_system_inventory_connection_mock, idrac_default_args, + mocker): + f_module = self.get_module_mock() + idrac_get_system_inventory_connection_mock.get_json_device = {"msg": "Success"} + result, err = self.module.run_get_system_inventory(idrac_get_system_inventory_connection_mock, f_module) + assert result["failed"] is True + assert err is True + + def test_main_error_case(self, idrac_get_system_inventory_connection_mock, idrac_default_args, mocker): + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_system_inventory.run_get_system_inventory', + return_value=({"msg": "Failed"}, True)) + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError]) + def test_main_exception_handling_case(self, exc_type, mocker, idrac_default_args, + idrac_get_system_inventory_connection_mock): + + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_system_inventory.run_get_system_inventory', + side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py new file mode 100644 index 00000000..1ae8b22c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_idrac_lc_attributes +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestLcAttributes(FakeAnsibleModule): + module = dellemc_idrac_lc_attributes + + @pytest.fixture + def idrac_lc_attributes_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).create_share_obj = Mock(return_value="Status") + type(idrac_obj).set_liason_share = Mock(return_value="Status") + return idrac_obj + + @pytest.fixture + def idrac_connection_lc_attribute_mock(self, mocker, idrac_lc_attributes_mock): + idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_lc_attributes.iDRACConnection', + return_value=idrac_lc_attributes_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_lc_attributes_mock + return idrac_lc_attributes_mock + + @pytest.fixture + def idrac_file_manager_lc_attribute_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + def test_main_lc_attributes_success_case01(self, idrac_connection_lc_attribute_mock, + idrac_default_args, mocker, idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, 'share_password': None, + 'csior': 'Enabled', 'share_mnt': None, 'share_user': None}) + message = {'changed': False, 'msg': {'Status': "Success", "message": "No changes found to commit!"}} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.run_setup_idrac_csior', + return_value=message) + with pytest.raises(Exception) as ex: + self._run_module(idrac_default_args) + assert ex.value.args[0]['msg'] == "Failed to configure the iDRAC LC attributes." + status_msg = {"Status": "Success"} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.run_setup_idrac_csior', + return_value=status_msg) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully configured the iDRAC LC attributes." + status_msg = {"Status": "Success", "Message": "No changes were applied"} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.run_setup_idrac_csior', + return_value=status_msg) + result = self._run_module(idrac_default_args) + assert result["msg"] == "No changes were applied" + + def test_run_setup_idrac_csior_success_case01(self, idrac_connection_lc_attribute_mock, idrac_default_args, + idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "csior": "csior"}) + message = {"changes_applicable": True, "message": "changes are applicable"} + idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + with pytest.raises(Exception) as ex: + self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert ex.value.args[0] == "Changes found to commit!" + status_msg = {"changes_applicable": False, "message": "no changes are applicable"} + idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = status_msg + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + with pytest.raises(Exception) as ex: + self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert ex.value.args[0] == "No changes found to commit!" + + def test_run_setup_idrac_csior_success_case02(self, idrac_connection_lc_attribute_mock, idrac_default_args, + idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "csior": "scr"}) + message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True, + "Status": "Success"} + idrac_connection_lc_attribute_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert msg == {'changes_applicable': True, 'message': 'changes found to commit!', + 'changed': True, 'Status': 'Success'} + + def test_run_setup_idrac_csior_success_case03(self, idrac_connection_lc_attribute_mock, idrac_default_args, + idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "csior": "scr"}) + message = {"changes_applicable": True, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_lc_attribute_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert msg == {'changes_applicable': True, 'Message': 'No changes found to commit!', + 'changed': False, 'Status': 'Success'} + + def test_run_setup_csior_disable_case(self, idrac_connection_lc_attribute_mock, idrac_default_args, + idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "csior": 'Disabled'}) + message = {"changes_applicable": True} + obj = MagicMock() + idrac_connection_lc_attribute_mock.config_mgr = obj + type(obj).disable_csior = Mock(return_value=message) + idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + with pytest.raises(Exception) as ex: + self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert ex.value.args[0] == "Changes found to commit!" + + def test_run_setup_csior_enable_case(self, idrac_connection_lc_attribute_mock, idrac_default_args, + idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "share_password": "sharepassword", "csior": 'Enabled'}) + message = {"changes_applicable": True} + obj = MagicMock() + idrac_connection_lc_attribute_mock.config_mgr = obj + type(obj).enable_csior = Mock(return_value='Enabled') + idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + with pytest.raises(Exception) as ex: + self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert ex.value.args[0] == "Changes found to commit!" + + def test_run_setup_csior_failed_case01(self, idrac_connection_lc_attribute_mock, idrac_default_args, + idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "csior": "csior"}) + message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}} + idrac_connection_lc_attribute_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_lc_attribute_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert ex.value.args[0] == "status failed in checking Data" + + def test_run_setup_idrac_csior_failed_case03(self, idrac_connection_lc_attribute_mock, idrac_default_args, + idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "csior": "scr"}) + message = {"changes_applicable": False, "Message": "Failed to found changes", "changed": False, + "Status": "Failed", "failed": True} + idrac_connection_lc_attribute_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module) + assert msg == {'changes_applicable': False, 'Message': 'Failed to found changes', + 'changed': False, 'Status': 'Failed', "failed": True} + assert msg['changed'] is False + assert msg['failed'] is True + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError]) + def test_main_lc_attribute_exception_handling_case(self, exc_type, mocker, idrac_connection_lc_attribute_mock, + idrac_default_args, idrac_file_manager_lc_attribute_mock): + idrac_default_args.update({"share_name": None, 'share_password': None, + 'csior': 'Enabled', 'share_mnt': None, 'share_user': None}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.run_setup_idrac_csior', + side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py new file mode 100644 index 00000000..c3a0dff1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py @@ -0,0 +1,437 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import os +from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_idrac_storage_volume +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestStorageVolume(FakeAnsibleModule): + module = dellemc_idrac_storage_volume + + @pytest.fixture + def idrac_storage_volume_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).create_share_obj = Mock(return_value="servicesstatus") + type(idrac_obj).set_liason_share = Mock(return_value="servicestatus") + return idrac_obj + + @pytest.fixture + def idrac_connection_storage_volume_mock(self, mocker, idrac_storage_volume_mock): + idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.iDRACConnection', + return_value=idrac_storage_volume_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_storage_volume_mock + return idrac_storage_volume_mock + + @pytest.fixture + def idrac_file_manager_storage_volume_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_storage_volume.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + def test_main_idrac_storage_volume_success_Case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"disk_cache_policy": "Default", "capacity": 12.4, "media_type": "HDD", + "number_dedicated_hot_spare": 1, "protocol": "SAS", "raid_init_operation": "None", + "raid_reset_config": True, "read_cache_policy": "ReadAhead", "span_depth": 4, + "span_length": 3, "state": "create", "stripe_size": 2, "volume_type": "RAID 0", + "write_cache_policy": "WriteThrough"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume._validate_options', return_value='state') + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.run_server_raid_config', return_value={"changes_applicable": True}) + msg = self._run_module(idrac_default_args) + assert msg == {'changed': True, 'msg': 'Successfully completed the create storage volume operation', + 'storage_status': {'changes_applicable': True}} + assert msg["msg"] == "Successfully completed the {0} storage volume operation".format("create") + + def test_main_idrac_storage_volume_fail_Case1(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"disk_cache_policy": "Default", "capacity": 12.4, "media_type": "HDD", + "number_dedicated_hot_spare": 1, "protocol": "SAS", "raid_init_operation": "None", + "raid_reset_config": True, "read_cache_policy": "ReadAhead", "span_depth": 4, + "span_length": 3, "state": "create", "stripe_size": 2, "volume_type": "RAID 0", + "write_cache_policy": "WriteThrough"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume._validate_options', return_value='state') + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.run_server_raid_config', return_value={"storage_status": "pressent"}) + result = self._run_module_with_fail_json(idrac_default_args) + assert result == {'failed': True, 'msg': 'Failed to perform storage operation'} + + def test_main_idrac_storage_volume_success_case01(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"disk_cache_policy": "Default", "capacity": 12.4, "media_type": "HDD", + "number_dedicated_hot_spare": 1, "protocol": "SAS", "raid_init_operation": "None", + "raid_reset_config": True, "read_cache_policy": "ReadAhead", "span_depth": 4, + "span_length": 3, "state": "create", "stripe_size": 2, "volume_type": "RAID 0", + "write_cache_policy": "WriteThrough"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume._validate_options', return_value='state') + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.run_server_raid_config', return_value={"Status": "Success", + "changed": True}) + msg = self._run_module(idrac_default_args) + assert msg == {'changed': True, 'msg': 'Successfully completed the create storage volume operation', + 'storage_status': {'Status': 'Success', 'changed': True}} + + def test_main_idrac_storage_volume_success_case02(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"disk_cache_policy": "Default", "capacity": 12.4, "media_type": "HDD", + "number_dedicated_hot_spare": 1, "protocol": "SAS", "raid_init_operation": "None", + "raid_reset_config": True, "read_cache_policy": "ReadAhead", "span_depth": 4, + "span_length": 3, "state": "create", "stripe_size": 2, "volume_type": "RAID 0", + "write_cache_policy": "WriteThrough"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume._validate_options', return_value='state') + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.run_server_raid_config', + return_value={"Status": "Success", "changed": False, "Message": "No changes found to commit!"}) + msg = self._run_module(idrac_default_args) + assert msg == {'changed': False, 'msg': 'No changes found to commit!', + 'storage_status': {'Message': 'No changes found to commit!', + 'Status': 'Success', + 'changed': False}} + + def test_main_idrac_storage_volume_success_case03(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"disk_cache_policy": "Default", "capacity": 12.4, + "media_type": "HDD", + "number_dedicated_hot_spare": 1, "protocol": "SAS", "raid_init_operation": "None", + "raid_reset_config": True, "read_cache_policy": "ReadAhead", "span_depth": 4, + "span_length": 3, "state": "create", "stripe_size": 2, "volume_type": "RAID 0", + "write_cache_policy": "WriteThrough"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume._validate_options', return_value='state') + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.run_server_raid_config', + return_value={"Status": "Success", "changed": True, "Message": "Nooo changes found to commit!"}) + msg = self._run_module(idrac_default_args) + assert msg['msg'] == "Successfully completed the create storage volume operation" + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError, TypeError]) + def test_main_idrac_storage_volume_exception_handling_case(self, exc_type, mocker, + idrac_connection_storage_volume_mock, + idrac_default_args): + idrac_default_args.update({"share_name": "sharename"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume._validate_options', side_effect=exc_type('test')) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.run_server_raid_config', side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True + # with pytest.raises(Exception) as exc: + # self._run_module_with_fail_json(idrac_default_args) + # assert exc.value.args[0] == "msg" + + def test_run_server_raid_config_create_success_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.view_storage', return_value="view") + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.create_storage', return_value="create") + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.delete_storage', return_value="delete") + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.run_server_raid_config(idrac_connection_storage_volume_mock, f_module) + assert result == 'create' + + def test_run_server_raid_config_view_success_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "view"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.view_storage', return_value="view") + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.create_storage', return_value="create") + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.delete_storage', return_value="delete") + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.run_server_raid_config(idrac_connection_storage_volume_mock, f_module) + assert result == 'view' + + def test_run_server_raid_config_delete_success_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "delete"}) + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.view_storage', return_value="view") + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.create_storage', return_value="create") + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_idrac_storage_volume.delete_storage', return_value="delete") + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.run_server_raid_config(idrac_connection_storage_volume_mock, f_module) + assert result == 'delete' + + def test_validate_options_controller_id_error_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": ""}) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert "Controller ID is required." == str(ex.value) + + def test_validate_options_capacity_error_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123", + "capacity": -1.4}) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_storage_volume." + "error_handling_for_negative_num", return_value=("capacity", -3.4)) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert str(("capacity", -3.4)) == str(ex.value) + + def test_validate_options_strip_size_error_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": -1}) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_storage_volume." + "error_handling_for_negative_num", return_value=("stripe_size", -1)) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert str(("stripe_size", -1)) == str(ex.value) + + def test_validate_options_volume_error_case01(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": 1, "volumes": [{"drives": {"id": ["data"], + "location":[1]}}]}) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert "Either {0} or {1} is allowed".format("id", "location") == str(ex.value) + + def test_validate_options_volume_error_case02(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": 1, "volumes": [{"drives": {}}]}) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert "Drives must be defined for volume creation." == str(ex.value) + + def test_validate_create_success_case(self, idrac_connection_storage_volume_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": 1, + "volumes": [{"drives": {'data': ""}}]}) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert "Either {0} or {1} should be specified".format("id", "location") == str(ex.value) + + def test_validate_create_success_case_volumes_stripe_size(self, idrac_connection_storage_volume_mock, + idrac_default_args, mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": 1, + "volumes": [{"drives": {'location': [1]}, "stripe_size": -1}]}) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_storage_volume." + "error_handling_for_negative_num", return_value=("stripe_size", -1)) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert str(("stripe_size", -1)) == str(ex.value) + + def test_validate_create_success_case_volumes_capacity(self, idrac_connection_storage_volume_mock, + idrac_default_args, mocker): + idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": 1, + "volumes": [{"drives": {'location': [0]}, "capacity": -1.1}]}) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_storage_volume." + "error_handling_for_negative_num", return_value=("capacity", -1.1)) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert str(("capacity", -1.1)) == str(ex.value) + + def test_validate_option_delete_success_case01(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "delete", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": 1, + "volumes": {"drives": {"Id": "", "location": ""}, "capacity": 1.4, + "stripe_size": 1}}) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert "Virtual disk name is a required parameter for remove virtual disk operations." == str(ex.value) + + def test_validate_option_delete_success_case02(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"share_name": "sharename", "state": "delete", "controller_id": "XYZ123", + "capacity": 1.4, "stripe_size": 1, + "volumes": None}) + with pytest.raises(ValueError) as ex: + self.module._validate_options(idrac_default_args) + assert "Virtual disk name is a required parameter for remove virtual disk operations." == str(ex.value) + + def test_error_handling_for_negative_num(self, idrac_connection_storage_volume_mock, idrac_default_args): + msg = self.module.error_handling_for_negative_num("capacity", -1.0) + assert msg == "{0} cannot be a negative number or zero,got {1}".format("capacity", -1.0) + + def test_set_liason_share_success_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + idrac_file_manager_storage_volume_mock): + idrac_default_args.update({"share_name": "sharename", "state": "delete", "share_path": "sharpath"}) + message = {"Status": 'Failed', "Data": {'Message': "Failed to set Liason share"}} + obj = MagicMock() + idrac_connection_storage_volume_mock.tempfile.gettempdir() + os.sep + idrac_connection_storage_volume_mock.file_share_manager.create_share_obj.return_value = message + idrac_connection_storage_volume_mock.config_mgr = obj + obj.set_liason_share = Mock(return_value=message) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.set_liason_share(idrac_connection_storage_volume_mock, f_module) + assert "Failed to set Liason share" == str(ex.value) + + def test_view_storage_success_case(self, idrac_connection_storage_volume_mock, idrac_default_args): + idrac_default_args.update({"controller_id": "controller", "volume_id": "virtual_disk"}) + msg = {"Status": "Success"} + obj = MagicMock() + idrac_connection_storage_volume_mock.config_mgr.RaidHelper = obj + obj.view_storage = Mock(return_value=msg) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.view_storage(idrac_connection_storage_volume_mock, f_module) + assert result == {"Status": "Success"} + + def test_view_storage_failed_case(self, idrac_connection_storage_volume_mock, idrac_default_args): + idrac_default_args.update({"controller_id": "controller", "volume_id": "virtual_disk"}) + msg = {"Status": "Failed", "msg": "Failed to fetch storage details"} + obj = MagicMock() + idrac_connection_storage_volume_mock.config_mgr.RaidHelper = obj + obj.view_storage = Mock(return_value=msg) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.view_storage(idrac_connection_storage_volume_mock, f_module) + assert "Failed to fetch storage details" == str(ex.value) + + def test_delete_storage_case(self, idrac_connection_storage_volume_mock, idrac_default_args): + idrac_default_args.update({"volumes": [{"name": "nameofvolume"}]}) + msg = {"Status": "Success"} + obj = MagicMock() + idrac_connection_storage_volume_mock.config_mgr.RaidHelper = obj + obj.delete_virtual_disk = Mock(return_value=msg) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.delete_storage(idrac_connection_storage_volume_mock, f_module) + assert result == {"Status": "Success"} + + def test_create_storage_success_case01(self, idrac_connection_storage_volume_mock, idrac_default_args, mocker): + idrac_default_args.update({"volumes": {"name": "volume1"}, "controller_id": "x56y"}) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_storage_volume." + "multiple_vd_config", return_value={"name": "volume1", "stripe_size": 1.3}) + obj = MagicMock() + idrac_connection_storage_volume_mock.config_mgr.RaidHelper = obj + obj.new_virtual_disk = Mock(return_value=[{"name": "volume1", "stripe_size": 1.3}]) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.create_storage(idrac_connection_storage_volume_mock, f_module) + assert result == [{'name': 'volume1', 'stripe_size': 1.3}] + + def test_create_storage_success_case02(self, idrac_connection_storage_volume_mock, idrac_default_args, mocker): + idrac_default_args.update({"volumes": None, "controller_id": "x56y"}) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_storage_volume." + "multiple_vd_config", return_value={"name": "volume1", "stripe_size": 1.3}) + obj = MagicMock() + idrac_connection_storage_volume_mock.config_mgr.RaidHelper = obj + obj.new_virtual_disk = Mock(return_value=[{"name": "volume1", "stripe_size": 1.3}]) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.create_storage(idrac_connection_storage_volume_mock, f_module) + assert result == [{'name': 'volume1', 'stripe_size': 1.3}] + + def test_multiple_vd_config_success_case(self, idrac_connection_storage_volume_mock, idrac_default_args, mocker): + idrac_default_args.update({"name": "name1", "media_type": 'HDD', "protocol": "SAS", "drives": None, + "capacity": 2, "raid_init_operation": 'Fast', 'raid_reset_config': True, + "span_depth": 1, "span_length": 1, "number_dedicated_hot_spare": 0, + "volume_type": 'RAID 0', "disk_cache_policy": "Default", + "write_cache_policy": "WriteThrough", "read_cache_policy": "NoReadAhead", + "stripe_size": 64 * 1024}) + result = self.module.multiple_vd_config({'name': 'volume1', 'stripe_size': 1.3, "capacity": 1, + "drives": {"id": "id", "location": "location"}}, "", + {"media_type": "HDD", "protocol": "NAS", "raid_init_operation": "Fast", + 'raid_reset_config': True, "span_depth": 1, "span_length": 1, + "number_dedicated_hot_spare": 0, "volume_type": 'RAID 0', + "disk_cache_policy": "Default", "write_cache_policy": "WriteThrough", + "read_cache_policy": "NoReadAhead", "stripe_size": 64 * 1024}) + assert result["mediatype"] == 'HDD' + + def test_multiple_vd_config_capacity_none_case(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"name": "name1", "media_type": 'HDD', "protocol": "SAS", "drives": {"id": ["id1"], + "location": [1]}, + "capacity": None, "raid_init_operation": 'Fast', 'raid_reset_config': True, + "span_depth": 1, "span_length": 1, "number_dedicated_hot_spare": 0, + "volume_type": 'RAID 0', "disk_cache_policy": "Default", "stripe_size": 64 * 1024, + "write_cache_policy": "WriteThrough", "read_cache_policy": "NoReadAhead"}) + result = self.module.multiple_vd_config({"media_type": 'HDD', "protocol": "SAS", "drives": None, + "capacity": 2, "raid_init_operation": 'Fast', + 'raid_reset_config': True, "span_depth": 1, "span_length": 1, + "number_dedicated_hot_spare": 0, "volume_type": 'RAID 0', + "disk_cache_policy": "Default", "stripe_size": 64 * 1024, + "write_cache_policy": "WriteThrough", + "read_cache_policy": "NoReadAhead"}, "", {"protocol": "SAS"}) + assert result["mediatype"] == "HDD" + + def test_multiple_vd_config_capacity_none_case02(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"name": "name1", "media_type": None, "protocol": "SAS", "drives": {"id": ["id1"]}, + "capacity": None, "raid_init_operation": None, 'raid_reset_config': True, + "span_depth": 1, "span_length": 1, "number_dedicated_hot_spare": 0, + "volume_type": 'RAID 0', "disk_cache_policy": "Default", "stripe_size": 64 * 1024, + "write_cache_policy": "WriteThrough", "read_cache_policy": "NoReadAhead"}) + result = self.module.multiple_vd_config({'name': 'volume1', 'stripe_size': 1.3, "capacity": 1, + "drives": {"id": ["id"]}}, "", + {"media_type": None, "protocol": "SAS", "raid_init_operation": None, + 'raid_reset_config': True, "span_depth": 1, "span_length": 1, + "number_dedicated_hot_spare": 0, "volume_type": 'RAID 0', + "disk_cache_policy": "Default", "write_cache_policy": "WriteThrough", + "read_cache_policy": "NoReadAhead", "stripe_size": 64 * 1024}) + assert result['Name'] == 'volume1' + + def test_multiple_vd_config_capacity_none_case1(self, idrac_connection_storage_volume_mock, idrac_default_args, + mocker): + idrac_default_args.update({"name": "name1", "media_type": 'HDD', "protocol": "SAS", "drives": {"id": ["id1"]}, + "capacity": None, "raid_init_operation": None, 'raid_reset_config': False, + "span_depth": 1, "span_length": 1, "number_dedicated_hot_spare": 0, + "volume_type": 'RAID 0', "disk_cache_policy": "Default", "stripe_size": 64 * 1024, + "write_cache_policy": "WriteThrough", "read_cache_policy": "NoReadAhead"}) + result = self.module.multiple_vd_config({"media_type": 'HDD', "protocol": "SAS", "drives": None, + "capacity": None, "raid_init_operation": None, + 'raid_reset_config': False, "span_depth": 1, "span_length": 1, + "number_dedicated_hot_spare": 0, "volume_type": 'RAID 0', + "disk_cache_policy": "Default", "stripe_size": 64 * 1024, + "write_cache_policy": "WriteThrough", + "read_cache_policy": "NoReadAhead"}, "", {"protocol": "NAS"}) + assert result["StripeSize"] == 65536 + + def test_multiple_vd_config_success_case02(self, idrac_connection_storage_volume_mock, idrac_default_args, mocker): + idrac_default_args.update({"name": "name1", "media_type": 'HDD', "protocol": "SAS", "drives": None, + "capacity": 2, "raid_init_operation": 'Fast', 'raid_reset_config': True, + "span_depth": 1, "span_length": 1, "number_dedicated_hot_spare": 0, + "volume_type": 'RAID 0', "disk_cache_policy": "Default", + "write_cache_policy": "WriteThrough", "read_cache_policy": "NoReadAhead", + "stripe_size": 64 * 1024}) + result = self.module.multiple_vd_config({'name': 'volume1', "capacity": 1, + "media_type": None, "protocol": None, + "raid_init_operation": "Fast", + 'raid_reset_config': False, "span_depth": 1, "span_length": 1, + "number_dedicated_hot_spare": 0, "volume_type": 'RAID 0', + "disk_cache_policy": "Default", "stripe_size": 64 * 1024, + "write_cache_policy": "WriteThrough", + "read_cache_policy": "NoReadAhead"}, "", {}) + assert result["StripeSize"] == 65536 diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py new file mode 100644 index 00000000..768c62bf --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_system_lockdown_mode +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, Mock +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestSysytemLockdownMode(FakeAnsibleModule): + module = dellemc_system_lockdown_mode + + @pytest.fixture + def idrac_system_lockdown_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + return idrac_obj + + @pytest.fixture + def idrac_file_manager_system_lockdown_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_system_lockdown_mode.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + @pytest.fixture + def idrac_connection_system_lockdown_mode_mock(self, mocker, idrac_system_lockdown_mock): + idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.' + 'dellemc_system_lockdown_mode.iDRACConnection', + return_value=idrac_system_lockdown_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_system_lockdown_mock + return idrac_system_lockdown_mock + + def test_main_system_lockdown_mode_success_case01(self, idrac_connection_system_lockdown_mode_mock, mocker, + idrac_file_manager_system_lockdown_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_password": None, + "lockdown_mode": "Enabled"}) + message = {"Status": "Success", "msg": "Lockdown mode of the system is configured.", + "changed": True, "system_lockdown_status": {"Status": "Success"}} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_system_lockdown_mode.run_system_lockdown_mode', + return_value=message) + idrac_connection_system_lockdown_mode_mock.config_mgr.set_liason_share.return_value = message + result = self._run_module(idrac_default_args) + assert result["msg"] == "Lockdown mode of the system is configured." + + def test_main_system_lockdown_mode_fail_case(self, idrac_connection_system_lockdown_mode_mock, mocker, + idrac_file_manager_system_lockdown_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_password": None, + "lockdown_mode": "Enabled"}) + message = {"Status": "Failed", "msg": "Failed to complete the lockdown mode operations.", + "system_lockdown_status": {}, "failed": True, "changed": False} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_system_lockdown_mode.run_system_lockdown_mode', + return_value=message) + idrac_connection_system_lockdown_mode_mock.config_mgr.set_liason_share.return_value = message + with pytest.raises(Exception) as ex: + self._run_module_with_fail_json(idrac_default_args) + assert ex.value.args[0]['msg'] == "Failed to complete the lockdown mode operations." + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError]) + def test_main_exception_handling_case(self, exc_type, mocker, idrac_connection_system_lockdown_mode_mock, + idrac_file_manager_system_lockdown_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_password": None, + "lockdown_mode": "Enabled"}) + idrac_connection_system_lockdown_mode_mock.config_mgr.set_liason_share.return_value = {"Status": "Failed"} + mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_system_lockdown_mode.run_system_lockdown_mode', + side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True + + def test_run_system_lockdown_mode_success_case01(self, idrac_connection_system_lockdown_mode_mock, mocker, + idrac_file_manager_system_lockdown_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_password": None, + "lockdown_mode": "Enabled", "share_mnt": None, "share_user": None}) + message = {"Status": "Success", "msg": "Lockdown mode of the system is configured.", + "changed": True, "system_lockdown_status": {"Status": "Success"}} + idrac_connection_system_lockdown_mode_mock.config_mgr.set_liason_share.return_value = message + idrac_connection_system_lockdown_mode_mock.config_mgr.enable_system_lockdown.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + msg = self.module.run_system_lockdown_mode(idrac_connection_system_lockdown_mode_mock, f_module) + assert msg['msg'] == "Successfully completed the lockdown mode operations." + + def test_run_system_lockdown_mode_failed_case01(self, idrac_connection_system_lockdown_mode_mock, mocker, + idrac_file_manager_system_lockdown_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_password": None, + "lockdown_mode": "Disabled", "share_mnt": None, "share_user": None}) + message = {"Status": "failed"} + idrac_connection_system_lockdown_mode_mock.config_mgr.set_liason_share.return_value = message + idrac_connection_system_lockdown_mode_mock.config_mgr.disable_system_lockdown.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.run_system_lockdown_mode(idrac_connection_system_lockdown_mode_mock, f_module) + assert ex.value.args[0] == 'Failed to complete the lockdown mode operations.' + + def test_run_system_lockdown_mode_failed_case02(self, idrac_connection_system_lockdown_mode_mock, mocker, + idrac_file_manager_system_lockdown_mock, idrac_default_args): + idrac_default_args.update({"share_name": None, "share_password": None, + "lockdown_mode": "Enabled", "share_mnt": None, "share_user": None}) + message = {"Status": "Failed", "Message": "message inside data"} + idrac_connection_system_lockdown_mode_mock.config_mgr.set_liason_share.return_value = message + idrac_connection_system_lockdown_mode_mock.config_mgr.enable_system_lockdown.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.run_system_lockdown_mode(idrac_connection_system_lockdown_mode_mock, f_module) + assert ex.value.args[0] == "message inside data" diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py new file mode 100644 index 00000000..d5c22523 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import os +import tempfile +from io import StringIO + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_attributes +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule +from mock import MagicMock + +SUCCESS_MSG = "Successfully updated the attributes." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_MSG = "Changes found to be applied." +SYSTEM_ID = "System.Embedded.1" +MANAGER_ID = "iDRAC.Embedded.1" +LC_ID = "LifecycleController.Embedded.1" +IDRAC_URI = "/redfish/v1/Managers/{res_id}/Oem/Dell/DellAttributes/{attr_id}" +MANAGERS_URI = "/redfish/v1/Managers" +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.idrac_attributes.' +UTILS_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.utils.' + + +@pytest.fixture +def idrac_redfish_mock_for_attr(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestIdracAttributes(FakeAnsibleModule): + module = idrac_attributes + + @pytest.fixture + def idrac_attributes_mock(self): + idrac_obj = MagicMock() + return idrac_obj + + @pytest.fixture + def idrac_connection_attributes_mock(self, mocker, idrac_attributes_mock): + idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI', + return_value=idrac_attributes_mock) + idrac_conn_mock.return_value.__enter__.return_value = idrac_attributes_mock + return idrac_conn_mock + + @pytest.mark.parametrize("params", [{"id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'}, + "uri_dict": + {"iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "response_attr": {"SNMP.1.AgentCommunity": "Disabled"}}]) + def test_get_response_attr(self, params, idrac_redfish_mock_for_attr, ome_response_mock): + ome_response_mock.success = params.get("success", True) + diff, response_attr = self.module.get_response_attr(idrac_redfish_mock_for_attr, params["id"], params["attr"], params["uri_dict"]) + assert response_attr.keys() == params["response_attr"].keys() + + @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'public'}, + "uri_dict": { + "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "response_attr": {"SNMP.1.AgentCommunity": "public"}, + "mparams": {'idrac_attributes': {"SNMP.1.AgentCommunity": "public"} + } + }]) + def _test_fetch_idrac_uri_attr(self, params, idrac_redfish_mock_for_attr, idrac_default_args): + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr =\ + self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, params["res_id"]) + assert idrac_response_attr.keys() == params["response_attr"].keys() + + @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'}, + "uri_dict": { + "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "response_attr": {"ThermalSettings.1.ThermalProfile": "Sound Cap"}, + "mparams": {'system_attributes': {"ThermalSettings.1.ThermalProfile": "Sound Cap"} + }}]) + def _test_fetch_idrac_uri_attr_succes_case01(self, params, idrac_redfish_mock_for_attr, idrac_default_args): + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = self.module.fetch_idrac_uri_attr( + idrac_redfish_mock_for_attr, f_module, params["res_id"]) + assert system_response_attr.keys() == params["response_attr"].keys() + + @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'}, + "uri_dict": { + "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "response_attr": {"LCAttributes.1.AutoUpdate": "Enabled"}, + "mparams": {'lifecycle_controller_attributes': {"LCAttributes.1.AutoUpdate": "Enabled"} + }}]) + def _test_fetch_idrac_uri_attr_succes_case02(self, params, idrac_redfish_mock_for_attr, idrac_default_args): + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = self.module.fetch_idrac_uri_attr( + idrac_redfish_mock_for_attr, f_module, params["res_id"]) + assert lc_response_attr.keys() == params["response_attr"].keys() + + @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'}, + "uri_dict": { + "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "response_attr": {"SNMP.1.AgentCommunity": "Disabled"}, + "mparams": {'idrac_attributes': {"SNMP.1.AgentCommunity": "Enabled"} + }, + "system_response_attr": {}, + "lc_response_attr": {}, + "resp": { + "iDRAC": { + "@Message.ExtendedInfo": [ + { + "Message": "The request completed successfully.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.12.Success", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + }, + { + "Message": "The operation successfully completed.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "IDRAC.2.7.SYS413", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "No response action is required.", + "Severity": "Informational" + } + ] + } + }}]) + def test_update_idrac_attributes(self, params, idrac_redfish_mock_for_attr, idrac_default_args): + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + resp = self.module.update_idrac_attributes(idrac_redfish_mock_for_attr, f_module, params["uri_dict"], + params["response_attr"], params["system_response_attr"], + params["lc_response_attr"]) + assert resp.keys() == params["resp"].keys() + + @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", + "uri_dict": { + "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "system_response_attr": {"ThermalSettings.1.ThermalProfile": "Sound Cap"}, + "mparams": {'system_attributes': {"ThermalSettings.1.ThermalProfile": "Sound Cap"} + }, + "idrac_response_attr": {}, + "lc_response_attr": {}, + "resp": { + "System": { + "@Message.ExtendedInfo": [ + { + "Message": "The request completed successfully.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.12.Success", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + }, + { + "Message": "The operation successfully completed.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "IDRAC.2.7.SYS413", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "No response action is required.", + "Severity": "Informational" + } + ] + } + }}]) + def test_update_idrac_attributes_case01(self, params, idrac_redfish_mock_for_attr, idrac_default_args): + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + resp = self.module.update_idrac_attributes(idrac_redfish_mock_for_attr, f_module, params["uri_dict"], + params["idrac_response_attr"], params["system_response_attr"], + params["lc_response_attr"]) + assert resp.keys() == params["resp"].keys() + + @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", + "uri_dict": { + "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "lc_response_attr": {"LCAttributes.1.AutoUpdate": "Enabled"}, + "mparams": { + 'lifecycle_controller_attributes': {"LCAttributes.1.AutoUpdate": "Enabled"} + }, + "idrac_response_attr": {}, + "system_response_attr": {}, + "resp": { + "Lifecycle Controller": { + "@Message.ExtendedInfo": [ + { + "Message": "The request completed successfully.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.12.Success", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + }, + { + "Message": "The operation successfully completed.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "IDRAC.2.7.SYS413", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "No response action is required.", + "Severity": "Informational" + } + ] + } + }}]) + def test_update_idrac_attributes_case02(self, params, idrac_redfish_mock_for_attr, idrac_default_args): + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + resp = self.module.update_idrac_attributes(idrac_redfish_mock_for_attr, f_module, params["uri_dict"], + params["idrac_response_attr"], params["system_response_attr"], + params["lc_response_attr"]) + assert resp.keys() == params["resp"].keys() + + @pytest.mark.parametrize("params", + [{"json_data": {}, + "diff": 1, + "uri_dict": { + "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1", + "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1", + "LifecycleController.Embedded.1": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"}, + "system_response_attr": {"ThermalSettings.1.ThermalProfile": "Sound Cap"}, + "mparams": {'system_attributes': {"ThermalSettings.1.ThermalProfile": "Sound Cap"}}, + "idrac_response_attr": {}, + "lc_response_attr": {}, + "message": "Successfully updated the attributes." + }]) + def _test_idrac_attributes(self, params, idrac_connection_attributes_mock, idrac_default_args, mocker): + idrac_connection_attributes_mock.success = params.get("success", True) + idrac_connection_attributes_mock.json_data = params.get('json_data') + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + mocker.patch(UTILS_PATH + 'get_manager_res_id', return_value=MANAGER_ID) + mocker.patch(MODULE_PATH + 'fetch_idrac_uri_attr', return_value=(params["diff"], + params["uri_dict"], + params["idrac_response_attr"], + params["system_response_attr"], + params["lc_response_attr"])) + mocker.patch(MODULE_PATH + 'update_idrac_attributes', return_value=params["resp"]) + result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("exc_type", [HTTPError, URLError]) + def _test_main_idrac_attributes_exception_handling_case(self, exc_type, idrac_connection_attributes_mock, idrac_default_args, mocker): + idrac_default_args.update({'lifecycle_controller_attributes': {"LCAttributes.1.AutoUpdate": "Enabled"}}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError]: + mocker.patch( + MODULE_PATH + 'update_idrac_attributes', + side_effect=exc_type('test')) + else: + mocker.patch( + MODULE_PATH + 'update_idrac_attributes', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py new file mode 100644 index 00000000..3ea74c90 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py @@ -0,0 +1,587 @@ +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.2.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_bios +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule +from mock import MagicMock +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.idrac_bios.' + +BIOS_JOB_RUNNING = "BIOS Config job is running. Wait for the job to complete." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_MSG = "Changes found to be applied." +SUCCESS_CLEAR = "Successfully cleared the pending BIOS attributes." +SUCCESS_COMPLETE = "Successfully applied the BIOS attributes update." +SCHEDULED_SUCCESS = "Successfully scheduled the job for the BIOS attributes update." +COMMITTED_SUCCESS = "Successfully committed changes. The job is in pending state. The changes will be applied {0}" +RESET_TRIGGERRED = "Reset BIOS action triggered successfully." +HOST_RESTART_FAILED = "Unable to restart the host. Check the host status and restart the host manually." +BIOS_RESET_TRIGGERED = "The BIOS reset action has been triggered successfully. The host reboot is complete." +BIOS_RESET_COMPLETE = "BIOS reset to defaults has been completed successfully." +BIOS_RESET_PENDING = "Pending attributes to be applied. " \ + "Clear or apply the pending changes before resetting the BIOS." +FORCE_BIOS_DELETE = "The BIOS configuration job is scheduled. Use 'force' to delete the job." +INVALID_ATTRIBUTES_MSG = "The values specified for the attributes are invalid." +UNSUPPORTED_APPLY_TIME = "Apply time {0} is not supported." +MAINTENANCE_OFFSET = "The maintenance time must be post-fixed with local offset to {0}." +MAINTENANCE_TIME = "The specified maintenance time window occurs in the past, " \ + "provide a future time to schedule the maintenance window." + + +@pytest.fixture +def idrac_redfish_mock_for_bios(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestConfigBios(FakeAnsibleModule): + module = idrac_bios + + @pytest.fixture + def idrac_configure_bios_mock(self): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.config_mgr = idrac_obj + return idrac_obj + + @pytest.fixture + def idrac_connection_configure_bios_mock(self, mocker, idrac_configure_bios_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + 'iDRACConnection', + return_value=idrac_configure_bios_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_bios_mock + return idrac_configure_bios_mock + + @pytest.mark.parametrize("params", [ + {"json_data": {"Attributes": {}}, 'message': NO_CHANGES_MSG, + "success": True, 'mparams': {'clear_pending': True}}, + {"json_data": {"Attributes": {}}, 'message': NO_CHANGES_MSG, + "success": True, 'mparams': {'clear_pending': True}, "check_mode": True}, + {"json_data": {"Attributes": {"test": "value"}}, 'message': BIOS_JOB_RUNNING, + "success": True, 'mparams': {'clear_pending': True}, + "check_scheduled_bios_job": ("job1", "Running")}, + {"json_data": {"Attributes": {"test": "value"}}, 'message': BIOS_JOB_RUNNING, + "success": True, 'mparams': {'clear_pending': True}, + "check_scheduled_bios_job": ("job1", "Starting")}, + {"json_data": {"Attributes": {"test": "value"}}, 'message': SUCCESS_CLEAR, + "success": True, 'mparams': {'clear_pending': True}, + "check_scheduled_bios_job": ("job1", "Scheduled")}, + {"json_data": {"Attributes": {"test": "value"}}, 'message': CHANGES_MSG, + "success": True, 'mparams': {'clear_pending': True}, + "check_scheduled_bios_job": ("job1", "Scheduled"), "check_mode": True}, + {"json_data": {"Attributes": {"test": "value"}}, 'message': CHANGES_MSG, + "success": True, 'mparams': {'clear_pending': True}, + "check_scheduled_bios_job": ("job1", "Scheduler"), "check_mode": True}, + {"json_data": {"Attributes": {"test": "value"}}, 'message': SUCCESS_CLEAR, + "success": True, 'mparams': {'clear_pending': True}, + "check_scheduled_bios_job": (None, "Scheduled")}, + {"json_data": {"Attributes": {"test": "value"}}, 'message': CHANGES_MSG, + "success": True, 'mparams': {'clear_pending': True}, + "check_scheduled_bios_job": (None, "Scheduled"), "check_mode": True}, + {"json_data": {"Attributes": {"test": "value"}, + "Members": [ + {"Id": "job_1", "JobType": "RAIDConfiguration", "JobState": "Scheduled"}, + {"Id": "job_1", "JobType": "BIOSConfiguration", "JobState": "Scheduled"}]}, + 'message': SUCCESS_CLEAR, + "success": True, 'mparams': {'clear_pending': True}}, + {"json_data": {"Attributes": {"test": "value"}, + "Members": [{"Id": "job_1", "JobType": "BIOSConfiguration", "JobState": "Running"}]}, + 'message': BIOS_JOB_RUNNING, + "success": True, 'mparams': {'clear_pending': True}}, + {"json_data": {"Attributes": {"test": "value"}, + "Members": [{"Id": "job_1", "JobType": "BIOSConfiguration", "JobState": "Starting"}]}, + 'message': BIOS_JOB_RUNNING, + "success": True, 'mparams': {'clear_pending': True}}, + ]) + def test_idrac_bios_clear_pending(self, params, idrac_redfish_mock_for_bios, ome_response_mock, idrac_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params.get('json_data') + mocks = ["get_pending_attributes", "check_scheduled_bios_job", "delete_scheduled_bios_job"] + for m in mocks: + if m in params: + mocker.patch(MODULE_PATH + m, return_value=params.get(m, {})) + idrac_default_args.update(params['mparams']) + result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False)) + assert result['status_msg'] == params['message'] + + @pytest.mark.parametrize("params", [ + {"json_data": {"Attributes": {}}, 'message': BIOS_RESET_TRIGGERED, + "reset_host": True, + "success": True, 'mparams': {'reset_bios': True}}, + {"json_data": {"Attributes": {"BootMode": "Uefi"}}, 'message': BIOS_RESET_PENDING, + "reset_host": True, + "success": True, 'mparams': {'reset_bios': True}}, + {"json_data": {"DateTime": "2022-09-14T05:59:35-05:00", + "DateTimeLocalOffset": "-05:00", + "Members": [{"Created": "2022-09-14T05:59:20-05:00", "MessageId": "SYS1003"}, + {"Created": "2022-09-14T05:59:10-05:00", "MessageId": "UEFI0157"}, + {"Created": "2022-09-14T05:59:30-05:00", "MessageId": "SYS1002"}], + "Entries": { + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog/Entries" + }, + "Attributes": {}}, + 'message': BIOS_RESET_TRIGGERED, "reset_host": True, + "success": True, 'mparams': {'reset_bios': True}}, + {"json_data": {"DateTime": "2022-09-14T05:59:35-05:00", + "DateTimeLocalOffset": "-05:00", + "Members": [{"Created": "2022-09-14T05:59:20-05:00", "MessageId": "SYS1003"}, + {"Created": "2022-09-14T05:59:10-05:00", "MessageId": "UEFI0157"}, + {"Created": "2022-09-14T05:59:40-05:00", "MessageId": "SYS1002"}], + "Entries": { + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog/Entries" + }, + "Attributes": {}}, + 'message': BIOS_RESET_COMPLETE, "reset_host": True, + "success": True, 'mparams': {'reset_bios': True}}, + {"json_data": {"Attributes": {}}, 'message': CHANGES_MSG, + "reset_host": True, "check_mode": True, + "success": True, 'mparams': {'reset_bios': True}}, + {"json_data": {"PowerState": "On"}, 'message': BIOS_RESET_TRIGGERED, + "success": True, 'mparams': {'reset_bios': True, "reset_type": "force_restart"}}, + {"json_data": {"PowerState": "Off"}, 'message': "{0} {1}".format(RESET_TRIGGERRED, HOST_RESTART_FAILED), + "success": True, 'mparams': {'reset_bios': True}}, + {"json_data": {"PowerState": "On"}, 'message': HOST_RESTART_FAILED, + "get_power_state": "On", "power_act_host": False, + "success": True, 'mparams': {'reset_bios': True}}, + {"json_data": {"PowerState": "On"}, 'message': HOST_RESTART_FAILED, + "get_power_state": "Off", "power_act_host": False, + "success": True, 'mparams': {'reset_bios': True}}, + ]) + def test_idrac_bios_reset_bios(self, params, idrac_redfish_mock_for_bios, ome_response_mock, idrac_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params.get('json_data') + mocks = ["reset_host", "get_power_state", "track_power_state", "power_act_host"] + for m in mocks: + if m in params: + mocker.patch(MODULE_PATH + m, return_value=params.get(m, {})) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.module_utils.utils." + 'time.sleep', + return_value=None) + idrac_default_args.update(params['mparams']) + result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False)) + assert result['status_msg'] == params['message'] + + @pytest.mark.parametrize("params", [ + {"json_data": {"Attributes": {"NumLock": "On"}}, 'message': NO_CHANGES_MSG, + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "On"}}}, + {"json_data": {"Attributes": {}, + "RegistryEntries": { + "Attributes": [ + { + "AttributeName": "SystemModelName", + "ReadOnly": True, + "Type": "String" + }, { + "AttributeName": "MemoryMode", + "ReadOnly": False, + "Type": "Enumeration", + "Value": [ + { + "ValueDisplayName": "Off", + "ValueName": "PersistentMemoryOff" + }, + { + "ValueDisplayName": "Non-Volatile DIMM", + "ValueName": "NVDIMM" + } + ], + }, { + "AttributeName": "ValidEnum", + "ReadOnly": False, + "Type": "Enumeration", + "Value": [ + { + "ValueDisplayName": "Enabled", + "ValueName": "On" + }, + { + "ValueDisplayName": "Disabled", + "ValueName": "Off" + } + ], + "WriteOnly": False + }, { + "AttributeName": "IntSetting", + "LowerBound": 0, + "ReadOnly": False, + "Type": "Integer", + "UpperBound": 32, + }, { + "AttributeName": "IntSetting3", + "LowerBound": 0, + "ReadOnly": False, + "Type": "Integer", + "UpperBound": 32, + }, { + "AttributeName": "IntSetting2", + "LowerBound": 0, + "ReadOnly": False, + "Type": "Integer", + "UpperBound": 32, + }, ]}}, 'message': INVALID_ATTRIBUTES_MSG, + "reset_host": True, "get_pending_attributes": {}, + "success": True, + 'mparams': {"attributes": {"NumLock": "On", "SystemModelName": "new name", "MemoryMode": "DRAM", + "IntSetting": 33, "IntSetting2": 'zero', "IntSetting3": 25, + "ValidEnum": "On"}}}, + {"json_data": {"Attributes": {"NumLock": "On"}}, 'message': CHANGES_MSG, + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "check_mode": True, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}}}, + {"json_data": { + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", "InMaintenanceWindowOnReset"]}}, + 'message': UNSUPPORTED_APPLY_TIME.format('AtMaintenanceWindowStart'), + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, + "apply_time": 'AtMaintenanceWindowStart', + "maintenance_window": {"start_time": '"2022-09-30T05:15:40-05:00"', + "duration": 600}}}, + {"json_data": {"DateTime": "2022-09-14T05:59:35-05:00", + "DateTimeLocalOffset": "-05:00", + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", 'AtMaintenanceWindowStart', + "InMaintenanceWindowOnReset"]}}, + 'message': MAINTENANCE_OFFSET.format('-05:00'), + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, + "apply_time": 'AtMaintenanceWindowStart', + "maintenance_window": {"start_time": '"2022-09-30T05:15:40-00:00"', + "duration": 600}}}, + {"json_data": {"DateTime": '2022-09-30T05:15:41-05:00', + "DateTimeLocalOffset": "-05:00", + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", 'AtMaintenanceWindowStart', + "InMaintenanceWindowOnReset"]}}, + 'message': MAINTENANCE_TIME, + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, + "apply_time": 'AtMaintenanceWindowStart', + "maintenance_window": {"start_time": '2022-09-30T05:15:40-05:00', + "duration": 600}}}, + {"json_data": {"DateTime": '2022-09-30T05:15:39-05:00', + "DateTimeLocalOffset": "-05:00", + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", 'AtMaintenanceWindowStart', + "InMaintenanceWindowOnReset"]}}, + 'message': COMMITTED_SUCCESS.format('AtMaintenanceWindowStart'), + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, + "apply_time": 'AtMaintenanceWindowStart', + "maintenance_window": {"start_time": '2022-09-30T05:15:40-05:00', + "duration": 600}}}, + {"json_data": {"DateTime": '2022-09-30T05:15:39-05:00', + "DateTimeLocalOffset": "-05:00", + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": []}}, + 'message': SCHEDULED_SUCCESS, + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, 'job_wait': False}}, + {"json_data": {"DateTime": '2022-09-30T05:15:39-05:00', + "DateTimeLocalOffset": "-05:00", + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", 'AtMaintenanceWindowStart', + "InMaintenanceWindowOnReset"]}}, + 'message': SCHEDULED_SUCCESS, + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, 'job_wait': False}}, + {"json_data": {"DateTime": '2022-09-30T05:15:39-05:00', + "DateTimeLocalOffset": "-05:00", + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", 'AtMaintenanceWindowStart', + "InMaintenanceWindowOnReset"]}}, + 'message': COMMITTED_SUCCESS.format('OnReset'), + "reset_host": True, "get_pending_attributes": {}, "validate_vs_registry": {}, + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, 'apply_time': 'OnReset'}}, + {"json_data": { + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}}, + 'message': BIOS_JOB_RUNNING, + "reset_host": True, "get_pending_attributes": {"AssetTag": 'test'}, "validate_vs_registry": {}, + "check_scheduled_bios_job": ("job1", "Running"), + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}}}, + {"json_data": { + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}}, + 'message': "Attributes committed but reboot has failed {0}".format(HOST_RESTART_FAILED), + "reset_host": False, "get_pending_attributes": {"AssetTag": 'test'}, "validate_vs_registry": {}, + "check_scheduled_bios_job": ("job1", "Scheduled"), "apply_attributes": ("job1", True), + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}}}, + {"json_data": { + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": + {"SupportedApplyTimes": ["OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}}, + 'message': "Job Tracking Failed", + "reset_host": True, "get_pending_attributes": {"AssetTag": 'test'}, "validate_vs_registry": {}, + "check_scheduled_bios_job": ("job1", "Scheduled"), "apply_attributes": ("job1", True), + "idrac_redfish_job_tracking": (True, "Job Tracking Failed", {}, 10), + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}}}, + {"json_data": { + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}}, + 'message': SUCCESS_COMPLETE, + "reset_host": True, "get_pending_attributes": {"AssetTag": 'test'}, "validate_vs_registry": {}, + "check_scheduled_bios_job": ("job1", "Scheduled"), "apply_attributes": ("job1", True), + "idrac_redfish_job_tracking": (False, "Job Tracking Failed", {}, 10), + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}}}, + {"json_data": { + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}}, + 'message': SCHEDULED_SUCCESS, + "reset_host": True, "get_pending_attributes": {"AssetTag": 'test'}, "validate_vs_registry": {}, + "check_scheduled_bios_job": ("job1", "Scheduled"), "apply_attributes": ("job1", True), + "idrac_redfish_job_tracking": (False, "Job Tracking Failed", {}, 10), + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}, "job_wait": False}}, + {"json_data": { + "Attributes": {"NumLock": "On"}, + "@Redfish.Settings": { + "SupportedApplyTimes": ["OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}}, + 'message': COMMITTED_SUCCESS.format("Immediate"), + "reset_host": False, "get_pending_attributes": {"AssetTag": 'test'}, "validate_vs_registry": {}, + "check_scheduled_bios_job": ("job1", "Scheduled"), "apply_attributes": (None, True), + "success": True, 'mparams': {"attributes": {"NumLock": "Off"}}}, + ]) + def test_idrac_bios_attributes(self, params, idrac_redfish_mock_for_bios, ome_response_mock, idrac_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params.get('json_data') + ome_response_mock.headers = {'Location': 'job1'} + mocks = ["get_current_attributes", "get_attributes_registry", "get_pending_attributes", + "check_scheduled_bios_job", "apply_attributes", "idrac_redfish_job_tracking", + "reset_host", "get_power_state", "track_power_state", "power_act_host"] + for m in mocks: + if m in params: + mocker.patch(MODULE_PATH + m, return_value=params.get(m, {})) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.module_utils.utils." + 'time.sleep', + return_value=None) + idrac_default_args.update(params['mparams']) + result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False)) + assert result['status_msg'] == params['message'] + + @pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, HTTPError]) + def test_main_idrac_config_bios_exception_handling_case(self, exc_type, mocker, + idrac_connection_configure_bios_mock, + idrac_default_args): + idrac_default_args.update({"share_name": "sharename"}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'run_server_bios_config', + side_effect=exc_type('test')) + else: + mocker.patch( + MODULE_PATH + 'run_server_bios_config', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result + + def test_run_idrac_bios_config_success_case01(self, idrac_connection_configure_bios_mock, + idrac_default_args, mocker): + idrac_default_args.update({"boot_sources": "bootsources"}) + message = {"changes_applicable": True, "message": "changes are applicable"} + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(False, "message of validate params")) + idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message + idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = True + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == {'changes_applicable': True, 'message': 'changes are applicable'} + + def test_run_idrac_bios_config_success_case02(self, idrac_connection_configure_bios_mock, idrac_default_args, + mocker): + idrac_default_args.update({"boot_sources": "bootsources"}) + message = {"changes_applicable": True, "Status": "Success", "message": "changes found to commit!"} + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(False, "message of validate params")) + idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message + idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == {'Status': 'Success', + 'changes_applicable': True, + 'message': 'changes found to commit!'} + + def test_run_idrac_bios_config_success_case03(self, idrac_connection_configure_bios_mock, idrac_default_args, + mocker): + idrac_default_args.update({"boot_sources": "bootsources"}) + message = {"changes_applicable": False, "Status": "Success", "Message": "No changes found to commit!"} + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(False, "message of validate params")) + idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message + idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == {'Message': 'No changes found to commit!', + 'Status': 'Success', + 'changes_applicable': False} + + def test_run_idrac_bios_config_success_case04(self, idrac_connection_configure_bios_mock, idrac_default_args, + mocker): + idrac_default_args.update({"boot_sources": "bootsources"}) + message = {"changes_applicable": False, "Status": "Success", "Message": "No changes found to apply."} + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(False, "message of validate params")) + idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message + idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == {'Message': 'No changes found to apply.', + 'Status': 'Success', + 'changes_applicable': False} + + def test_run_idrac_bios_config_bootmode_failed_case0(self, idrac_connection_configure_bios_mock, + idrac_default_args, + mocker): + idrac_default_args.update({"boot_sources": "bootsources"}) + message = {"changes_applicable": False, "Status": "failed", "Message": "No changes found to apply."} + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(False, "message of validate params")) + idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message + idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == {'Message': 'No changes found to apply.', + 'Status': 'failed', + 'changes_applicable': False} + + def test_run_idrac_bios_config_errorhandle_failed_case0(self, idrac_connection_configure_bios_mock, + idrac_default_args, + mocker): + idrac_default_args.update({"boot_sources": "bootsources"}) + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(True, "Error occurs")) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources() + + def test_run_idrac_bios_config_status_failed_case01(self, idrac_connection_configure_bios_mock, idrac_default_args, + mocker): + idrac_default_args.update({"boot_sources": "bootsources"}) + message = {'Status': 'Failed', 'Message': 'message of validate params'} + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(True, "Error occurs")) + idrac_connection_configure_bios_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources() + + def test_run_idrac_bios_config_status_success_case01(self, idrac_connection_configure_bios_mock, idrac_default_args, + mocker): + idrac_default_args.update({"boot_sources": "bootsources", + "attributes": {"boot_mode": "BootMode", "nvme_mode": "NvmeMode"}}) + message = {'Status': 'Successs', 'Message': 'message of validate params'} + mocker.patch(MODULE_PATH + + '_validate_params', return_value=(False, "Error did not occurs")) + idrac_connection_configure_bios_mock.config_mgr.configure_bios.return_value = message + idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == {'Message': 'message of validate params', 'Status': 'Successs'} + + def test_run_bios_config_status_boot_sources_failed_case(self, idrac_connection_configure_bios_mock, mocker, + idrac_default_args): + idrac_default_args.update({"boot_sources": "bootsources"}) + message = {'Status': 'Failed', "Data": {'Message': 'message of validate params'}} + idrac_connection_configure_bios_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module) + assert msg == idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources() + + def test__validate_params_error_keys_case(self, idrac_connection_configure_bios_mock, idrac_default_args, + mocker): + idrac_default_args.update({}) + attr = [{"name": "Name"}, {"index": "Index"}, {"enabled": "Enabled"}] + msg = self.module._validate_params(attr) + assert msg == "attribute keys must be one of the ['Name', 'Index', 'Enabled']." + + def test__validate_params_check_params_case(self, idrac_connection_configure_bios_mock, mocker, + idrac_default_args): + mocker.patch(MODULE_PATH + + 'check_params', return_value=(True, "Error occurs in check params")) + attr = [{"name": "name1"}, {"Index": "index1"}] + msg = self.module._validate_params(attr) + assert msg == "attribute keys must be one of the ['Name', 'Index', 'Enabled']." + + def test__validate_params_empty_params_case(self, idrac_connection_configure_bios_mock, mocker, + idrac_default_args): + mocker.patch(MODULE_PATH + + '_validate_name_index_duplication', return_value=(True, "Error occurs in " + "validate name")) + msg = self.module._validate_params([]) + assert msg == (True, 'Error occurs in validate name') + + def test__validate_name_index_duplication_error_true_case(self, idrac_connection_configure_bios_mock, + idrac_default_args): + result = self.module._validate_name_index_duplication([{"Name": "Name1"}, {"Name": "Name1"}]) + assert result == 'duplicate name Name1' + + def test__validate_name_index_duplication_error_false_case(self, idrac_connection_configure_bios_mock, + idrac_default_args): + result = self.module._validate_name_index_duplication([{"Name": "Name1"}, {"Name": "Name2"}]) + assert result == '' + + def test_check_params_false_case(self, idrac_connection_configure_bios_mock, idrac_default_args): + result = self.module.check_params({"required": False}, [{"name": "Name1", "required": False}, + {"name": "Name2", "required": False}]) + assert result == '' + + @pytest.mark.parametrize("params", [ + {"each": {"Name": 1}, 'message': "Name must be of type: . 1 () provided."}, + {"each": {"Index": "one"}, 'message': "Index must be of type: . one () provided."}, + {"each": {"Index": -1}, 'message': "Index must be greater than or equal to: 0"}, + {"each": {"Name": 'test', "Index": 1}, 'message': ""}, + {"each": {"Enabled": "one"}, 'message': "Enabled must be of type: . one () provided."}, + ]) + def test_check_params_required_true_case(self, idrac_connection_configure_bios_mock, params, + idrac_default_args): + fields = [ + {"name": "Name", "type": str, "required": True}, + {"name": "Index", "type": int, "required": False, "min": 0}, + {"name": "Enabled", "type": bool, "required": False} + ] + result = self.module.check_params(params.get('each'), fields) + assert result == params.get('message') diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py new file mode 100644 index 00000000..2e754888 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_boot +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from mock import PropertyMock +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def boot_connection_mock(mocker, redfish_response_mock): + idrac_conn_mock = mocker.patch(MODULE_PATH + 'idrac_boot.iDRACRedfishAPI') + idrac_conn_mock_obj = idrac_conn_mock.return_value.__enter__.return_value + idrac_conn_mock_obj.invoke_request.return_value = redfish_response_mock + return idrac_conn_mock_obj + + +class TestConfigBios(FakeAnsibleModule): + + module = idrac_boot + + def test_get_response_attributes(self, boot_connection_mock, redfish_response_mock, idrac_default_args): + idrac_default_args.update({"boot_options": {"display_name": "Boot001", "enabled": True}}) + f_module = self.get_module_mock(params=idrac_default_args) + redfish_response_mock.success = True + redfish_response_mock.json_data = {"Boot": { + "BootOptions": "", "Certificates": "", "BootOrder": [], "BootOrder@odata.count": 1, + "BootSourceOverrideEnabled": "Disabled", "BootSourceOverrideMode": "Legacy", + "BootSourceOverrideTarget": "None", "UefiTargetBootSourceOverride": None, + "BootSourceOverrideTarget@Redfish.AllowableValues": []}, + "Actions": {"#ComputerSystem.Reset": {"ResetType@Redfish.AllowableValues": ["GracefulShutdown"]}}} + result = self.module.get_response_attributes(f_module, boot_connection_mock, "System.Embedded.1") + assert result["BootSourceOverrideEnabled"] == "Disabled" + redfish_response_mock.json_data["Boot"].pop("BootOptions", None) + with pytest.raises(Exception) as err: + self.module.get_response_attributes(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "The system does not support the BootOptions feature." + + def test_get_existing_boot_options(self, boot_connection_mock, redfish_response_mock, idrac_default_args): + redfish_response_mock.success = True + redfish_response_mock.json_data = {"Members": [ + {"@odata.context": "/redfish/v1/$metadata#BootOption.BootOption", + "@odata.id": "/redfish/v1/Systems/System.Embedded.1/BootOptions/HardDisk.List.1-1", + "@odata.type": "#BootOption.v1_0_4.BootOption", "BootOptionEnabled": True, + "BootOptionReference": "HardDisk.List.1-1", + "Description": "Current settings of the Legacy Boot option", + "DisplayName": "Hard drive C:", "Id": "HardDisk.List.1-1", "Name": "Legacy Boot option"}]} + resp_data = {'Members': [{ + 'BootOptionEnabled': True, 'BootOptionReference': 'HardDisk.List.1-1', + 'Description': 'Current settings of the Legacy Boot option', + 'DisplayName': 'Hard drive C:', 'Id': 'HardDisk.List.1-1', + 'Name': 'Legacy Boot option'}]} + result = self.module.get_existing_boot_options(boot_connection_mock, "System.Embedded.1") + assert result == resp_data + + def test_system_reset(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + mocker.patch(MODULE_PATH + 'idrac_boot.idrac_system_reset', return_value=(True, False, "Completed", {})) + idrac_default_args.update({"boot_source_override_mode": "uefi", "reset_type": "graceful_restart"}) + f_module = self.get_module_mock(params=idrac_default_args) + reset, track_failed, reset_msg, resp_data = self.module.system_reset(f_module, boot_connection_mock, + "System.Embedded.1") + assert reset is True + + def test_get_scheduled_job(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + mocker.patch(MODULE_PATH + 'idrac_boot.time', return_value=None) + redfish_response_mock.success = True + redfish_response_mock.json_data = {"Members": [{ + "Description": "Job Instance", "EndTime": "TIME_NA", "Id": "JID_609237056489", "JobState": "Scheduled", + "JobType": "BIOSConfiguration", "Message": "Job scheduled successfully.", "MessageArgs": [], + "MessageId": "PR19", "Name": "Configure: BIOS.Setup.1-1", "PercentComplete": 10}]} + status, job = self.module.get_scheduled_job(boot_connection_mock) + assert status is True + + def test_configure_boot_options(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"boot_source_override_mode": "uefi", "job_wait": True, "reset_type": "none", + "job_wait_timeout": 900}) + f_module = self.get_module_mock(params=idrac_default_args) + mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(True, {})) + resp_data = {"BootOrder": ["Boot001", "Boot002", "Boot003"], "BootSourceOverrideEnabled": "Disabled", + "BootSourceOverrideMode": "Legacy", "BootSourceOverrideTarget": "UefiTarget", + "UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01"} + mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data) + with pytest.raises(Exception) as err: + self.module.configure_boot_options(f_module, boot_connection_mock, "System.Embedded.1", {"Boot001": False}) + assert err.value.args[0] == "Unable to complete the request because the BIOS configuration job already " \ + "exists. Wait for the pending job to complete." + redfish_response_mock.status_code = 202 + redfish_response_mock.success = True + redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"} + redfish_response_mock.json_data = {"Attributes": {"BootSeq": [{"Name": "Boot001", "Id": 0, "Enabled": True}, + {"Name": "Boot000", "Id": 1, "Enabled": True}]}} + mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(False, {})) + mocker.patch(MODULE_PATH + 'idrac_boot.idrac_system_reset', return_value=(False, False, "Completed", {})) + mocker.patch(MODULE_PATH + 'idrac_boot.wait_for_idrac_job_completion', + return_value=({}, "This job is not complete after 900 seconds.")) + with pytest.raises(Exception) as err: + self.module.configure_boot_options(f_module, boot_connection_mock, "System.Embedded.1", {"Boot001": False}) + assert err.value.args[0] == "This job is not complete after 900 seconds." + resp_data = {"BootOrder": ["Boot001", "Boot002", "Boot003"], "BootSourceOverrideEnabled": "Disabled", + "BootSourceOverrideMode": "UEFI", "BootSourceOverrideTarget": "UefiTarget", + "UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01"} + mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data) + idrac_default_args.update({"boot_source_override_mode": "legacy"}) + f_module = self.get_module_mock(params=idrac_default_args) + redfish_response_mock.json_data = {"Attributes": {"UefiBootSeq": [ + {"Name": "Boot001", "Id": 0, "Enabled": True}, {"Name": "Boot000", "Id": 1, "Enabled": True}]}} + with pytest.raises(Exception) as err: + self.module.configure_boot_options(f_module, boot_connection_mock, "System.Embedded.1", {"Boot001": False}) + assert err.value.args[0] == "This job is not complete after 900 seconds." + + def test_apply_boot_settings(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"boot_source_override_mode": "uefi", "job_wait": True, "reset_type": "none", + "job_wait_timeout": 900}) + f_module = self.get_module_mock(params=idrac_default_args) + payload = {"Boot": {"BootSourceOverrideMode": "UEFI"}} + redfish_response_mock.success = True + redfish_response_mock.status_code = 200 + mocker.patch(MODULE_PATH + 'idrac_boot.idrac_system_reset', return_value=(False, False, "Completed", {})) + mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(True, [{"Id": "JID_123456789"}])) + mocker.patch(MODULE_PATH + 'idrac_boot.wait_for_idrac_job_completion', + return_value=({}, "This job is not complete after 900 seconds.")) + with pytest.raises(Exception) as err: + self.module.apply_boot_settings(f_module, boot_connection_mock, payload, "System.Embedded.1") + assert err.value.args[0] == "This job is not complete after 900 seconds." + + def test_configure_boot_settings(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"boot_order": ["Boot005", "Boot001"], "job_wait": True, "reset_type": "none", + "job_wait_timeout": 900, "boot_source_override_mode": "uefi", + "boot_source_override_enabled": "once", "boot_source_override_target": "cd", + "uefi_target_boot_source_override": "test_uefi_path"}) + f_module = self.get_module_mock(params=idrac_default_args) + resp_data = {"BootOrder": ["Boot001", "Boot002", "Boot003"], "BootSourceOverrideEnabled": "Disabled", + "BootSourceOverrideMode": "Legacy", "BootSourceOverrideTarget": "UefiTarget", + "UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01"} + mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data) + with pytest.raises(Exception) as err: + self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "Invalid boot order reference provided." + idrac_default_args.update({"boot_order": ["Boot001", "Boot001"]}) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as err: + self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "Duplicate boot order reference provided." + mocker.patch(MODULE_PATH + 'idrac_boot.apply_boot_settings', return_value={"JobStatus": "Completed"}) + idrac_default_args.update({"boot_order": ["Boot001", "Boot003", "Boot002"]}) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1") + assert result["JobStatus"] == "Completed" + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "Changes found to be applied." + + def test_configure_idrac_boot(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"job_wait": True, "reset_type": "none", "job_wait_timeout": 900, + "boot_options": [{"boot_option_reference": "HardDisk.List.1-1", "enabled": True}]}) + f_module = self.get_module_mock(params=idrac_default_args) + boot_return_data = {"Members": [{"BootOptionEnabled": False, "BootOptionReference": "HardDisk.List.1-1", + "Description": "Current settings of the Legacy Boot option", + "DisplayName": "Hard drive C:", "Id": "HardDisk.List.1-1", + "Name": "Legacy Boot option", "UefiDevicePath": "VenHw(D6C0639F-823DE6)"}], + "Name": "Boot Options Collection", "Description": "Collection of BootOptions"} + mocker.patch(MODULE_PATH + 'idrac_boot.get_existing_boot_options', return_value=boot_return_data) + mocker.patch(MODULE_PATH + 'idrac_boot.configure_boot_options', return_value={"JobType": "Completed"}) + mocker.patch(MODULE_PATH + 'idrac_boot.configure_boot_settings', return_value={"JobType": "Completed"}) + result = self.module.configure_idrac_boot(f_module, boot_connection_mock, "System.Embedded.1") + assert result["JobType"] == "Completed" + idrac_default_args.update({"boot_options": [{"boot_option_reference": "HardDisk.List.1-2", "enabled": True}]}) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as err: + self.module.configure_idrac_boot(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "Invalid boot_options provided." + idrac_default_args.update({"boot_options": [{"boot_option_reference": "HardDisk.List.1-1", "enabled": True}, + {"boot_option_reference": "HardDisk.List.1-1", "enabled": True}]}) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as err: + self.module.configure_idrac_boot(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "Duplicate boot_options provided." + idrac_default_args.update({"boot_options": [{"boot_option_reference": "HardDisk.List.1-1", "enabled": False}]}) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.configure_idrac_boot(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "No changes found to be applied." + idrac_default_args.update({"boot_options": [{"boot_option_reference": "HardDisk.List.1-1", "enabled": True}]}) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.configure_idrac_boot(f_module, boot_connection_mock, "System.Embedded.1") + assert err.value.args[0] == "Changes found to be applied." + + @pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError]) + def test_main_exception(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker, exc_type): + idrac_default_args.update({"boot_source_override_mode": "legacy"}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'idrac_boot.get_system_res_id', side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + 'idrac_boot.get_system_res_id', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result + + def test_manin_success(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"boot_source_override_mode": "legacy"}) + redfish_response_mock.success = True + mocker.patch(MODULE_PATH + 'idrac_boot.get_system_res_id', return_value=("System.Embedded.1", "")) + job_resp = {"Description": "Job Instance", "EndTime": "TIME_NA", "Id": "JID_609237056489", + "JobState": "Completed", "JobType": "BIOSConfiguration", "MessageId": "PR19", + "Message": "Job scheduled successfully.", "MessageArgs": [], + "Name": "Configure: BIOS.Setup.1-1", "PercentComplete": 100} + mocker.patch(MODULE_PATH + 'idrac_boot.configure_idrac_boot', return_value=job_resp) + boot_return_data = {"Members": [{"BootOptionEnabled": False, "BootOptionReference": "HardDisk.List.1-1", + "Description": "Current settings of the Legacy Boot option", + "DisplayName": "Hard drive C:", "Id": "HardDisk.List.1-1", + "Name": "Legacy Boot option", "UefiDevicePath": "VenHw(D6C0639F-823DE6)"}], + "Name": "Boot Options Collection", "Description": "Collection of BootOptions"} + mocker.patch(MODULE_PATH + 'idrac_boot.get_existing_boot_options', return_value=boot_return_data) + resp_data = {"BootOrder": ["Boot001", "Boot002", "Boot003"], "BootSourceOverrideEnabled": "Disabled", + "BootSourceOverrideMode": "Legacy", "BootSourceOverrideTarget": "UefiTarget", + "UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01"} + mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully updated the boot settings." + + def test_main_res_id_error(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"boot_source_override_mode": "legacy"}) + mocker.patch(MODULE_PATH + 'idrac_boot.get_system_res_id', return_value=("System.Embedded.5", "Failed")) + with pytest.raises(Exception) as err: + self._run_module(idrac_default_args) + assert err.value.args[0]["msg"] == "Failed" diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py new file mode 100644 index 00000000..c5ee0dc8 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.5.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import os +import tempfile +from io import StringIO + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_certificates +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule +from mock import MagicMock + +NOT_SUPPORTED_ACTION = "Certificate {op} not supported for the specified certificate type {certype}." +SUCCESS_MSG = "Successfully performed the '{command}' operation." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_MSG = "Changes found to be applied." +NO_RESET = " Reset iDRAC to apply new certificate. Until iDRAC is reset, the old certificate will be active." +RESET_UNTRACK = " iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply." +RESET_SUCCESS = " iDRAC has been reset successfully." +RESET_FAIL = " Unable to reset the iDRAC. For changes to reflect, manually reset the iDRAC." +SYSTEM_ID = "System.Embedded.1" +MANAGER_ID = "iDRAC.Embedded.1" +SYSTEMS_URI = "/redfish/v1/Systems" +MANAGERS_URI = "/redfish/v1/Managers" +IDRAC_SERVICE = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService" +CSR_SSL = "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR" +IMPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate" +EXPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate" +RESET_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg" +IDRAC_RESET = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset" +idrac_service_actions = { + "#DelliDRACCardService.DeleteCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.DeleteCertificate", + "#DelliDRACCardService.ExportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportCertificate", + "#DelliDRACCardService.ExportSSLCertificate": EXPORT_SSL, + "#DelliDRACCardService.FactoryIdentityCertificateGenerateCSR": + "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR", + "#DelliDRACCardService.FactoryIdentityExportCertificate": + "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityExportCertificate", + "#DelliDRACCardService.FactoryIdentityImportCertificate": + "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityImportCertificate", + "#DelliDRACCardService.GenerateSEKMCSR": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.GenerateSEKMCSR", + "#DelliDRACCardService.ImportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportCertificate", + "#DelliDRACCardService.ImportSSLCertificate": IMPORT_SSL, + "#DelliDRACCardService.SSLResetCfg": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg", + "#DelliDRACCardService.iDRACReset": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.iDRACReset" +} +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.idrac_certificates.' + + +@pytest.fixture +def idrac_redfish_mock_for_certs(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestIdracCertificates(FakeAnsibleModule): + module = idrac_certificates + + @pytest.fixture + def idrac_certificates_mock(self): + idrac_obj = MagicMock() + return idrac_obj + + @pytest.fixture + def idrac_connection_certificates_mock(self, mocker, idrac_certificates_mock): + idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI', + return_value=idrac_certificates_mock) + idrac_conn_mock.return_value.__enter__.return_value = idrac_certificates_mock + return idrac_conn_mock + + @pytest.mark.parametrize("params", [ + {"json_data": {"CertificateFile": b'Hello world!', "@Message.ExtendedInfo": [ + { + "Message": "Successfully exported SSL Certificate.", + "MessageId": "IDRAC.2.5.LC067", + "Resolution": "No response action is required.", + "Severity": "Informational" + }]}, 'message': SUCCESS_MSG.format(command="export"), "success": True, + "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'export', 'certificate_type': "HTTPS", 'certificate_path': tempfile.gettempdir(), + 'reset': False}}, + {"json_data": {"CertificateFile": b'Hello world!'}, 'message': CHANGES_MSG, "success": True, + "reset_idrac": (True, False, RESET_SUCCESS), 'check_mode': True, + 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', 'reset': False}}, + {"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="import"), NO_RESET), "success": True, + "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', 'reset': False}}, + {"json_data": {}, 'message': SUCCESS_MSG.format(command="generate_csr"), + "success": True, + "get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'generate_csr', 'certificate_type': "HTTPS", 'certificate_path': tempfile.gettempdir(), + 'cert_params': { + "common_name": "dell", + "country_code": "IN", + "email_address": "dell@dell.com", + "locality_name": "Bangalore", + "organization_name": "Dell", + "organization_unit": "ansible", + "state_name": "Karnataka", + "subject_alt_name": [ + "emc" + ]}}}, + {"json_data": {}, 'message': NOT_SUPPORTED_ACTION.format(op="generate_csr", certype="CA"), + "success": True, + "get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'generate_csr', 'certificate_type': "CA", 'certificate_path': tempfile.gettempdir(), + 'cert_params': { + "common_name": "dell", + "country_code": "IN", + "email_address": "dell@dell.com", + "locality_name": "Bangalore", + "organization_name": "Dell", + "organization_unit": "ansible", + "state_name": "Karnataka", + "subject_alt_name": [ + "emc" + ]}}}, + {"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="import"), RESET_SUCCESS), + "success": True, + "get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'import', 'certificate_type': "CA", 'passphrase': 'myphrase', + 'certificate_path': '.p12'}}, + {"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="import"), RESET_SUCCESS), + "success": True, + "get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem'}}, + {"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="import"), RESET_SUCCESS), + "success": True, + "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem'}}, + {"json_data": {}, 'message': SUCCESS_MSG.format(command="export"), "success": True, "get_cert_url": "url", + 'mparams': {'command': 'export', 'certificate_type': "HTTPS", 'certificate_path': tempfile.gettempdir()}}, + {"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="reset"), RESET_SUCCESS), + "success": True, "get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS), + 'mparams': {'command': 'reset', 'certificate_type': "HTTPS"} + } + ]) + def test_idrac_certificates(self, params, idrac_connection_certificates_mock, idrac_default_args, mocker): + idrac_connection_certificates_mock.success = params.get("success", True) + idrac_connection_certificates_mock.json_data = params.get('json_data') + if params.get('mparams').get('certificate_path') and params.get('mparams').get('command') == 'import': + sfx = params.get('mparams').get('certificate_path') + temp = tempfile.NamedTemporaryFile(suffix=sfx, delete=False) + temp.write(b'Hello') + temp.close() + params.get('mparams')['certificate_path'] = temp.name + mocker.patch(MODULE_PATH + 'get_res_id', return_value=MANAGER_ID) + mocker.patch(MODULE_PATH + 'get_idrac_service', return_value=IDRAC_SERVICE.format(res_id=MANAGER_ID)) + mocker.patch(MODULE_PATH + 'get_actions_map', return_value=idrac_service_actions) + # mocker.patch(MODULE_PATH + 'get_cert_url', return_value=params.get('get_cert_url')) + # mocker.patch(MODULE_PATH + 'write_to_file', return_value=params.get('write_to_file')) + mocker.patch(MODULE_PATH + 'reset_idrac', return_value=params.get('reset_idrac')) + idrac_default_args.update(params.get('mparams')) + result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False)) + if params.get('mparams').get('command') == 'import' and params.get('mparams').get( + 'certificate_path') and os.path.exists(temp.name): + os.remove(temp.name) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("params", [{"json_data": {"Members": [{"@odata.id": '/redfish/v1/Mangers/iDRAC.1'}]}, + "certype": 'Server', "res_id": "iDRAC.1"}, + {"json_data": {"Members": []}, + "certype": 'Server', "res_id": MANAGER_ID} + ]) + def test_res_id( + self, params, idrac_redfish_mock_for_certs, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + res_id = self.module.get_res_id(idrac_redfish_mock_for_certs, params.get('certype')) + assert res_id == params['res_id'] + + @pytest.mark.parametrize("params", [{"json_data": { + "Links": { + "Oem": { + "Dell": { + "DelliDRACCardService": { + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService" + }}}}, + "VirtualMedia": { + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia"} + }, + "idrac_srv": '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService', "res_id": "iDRAC.1"}, + {"json_data": {"Members": []}, + "idrac_srv": '/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DelliDRACCardService', "res_id": MANAGER_ID} + ]) + def test_get_idrac_service( + self, params, idrac_redfish_mock_for_certs, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + idrac_srv = self.module.get_idrac_service(idrac_redfish_mock_for_certs, params.get('res_id')) + assert idrac_srv == params['idrac_srv'] + + @pytest.mark.parametrize("params", [{"json_data": { + "Actions": { + "#DelliDRACCardService.ExportSSLCertificate": { + "SSLCertType@Redfish.AllowableValues": ["CA", "CSC", "ClientTrustCertificate", "Server"], + "target": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate" + }, + "#DelliDRACCardService.ImportSSLCertificate": { + "CertificateType@Redfish.AllowableValues": ["CA", "CSC", "ClientTrustCertificate", "Server"], + "target": + "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate" + }, + "#DelliDRACCardService.SSLResetCfg": { + "target": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg" + }, + }, + }, + "idrac_service_uri": '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService', + "actions": { + '#DelliDRACCardService.ExportSSLCertificate': + '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate', + '#DelliDRACCardService.ImportSSLCertificate': + '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate', + '#DelliDRACCardService.SSLResetCfg': + '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg'}}, + {"json_data": {"Members": []}, + "idrac_service_uri": '/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DelliDRACCardService', + "actions": idrac_service_actions} + ]) + def test_get_actions_map( + self, params, idrac_redfish_mock_for_certs, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + actions = self.module.get_actions_map(idrac_redfish_mock_for_certs, params.get('idrac_service_uri')) + assert actions == params['actions'] + + @pytest.mark.parametrize("params", [{"actions": {}, "op": "generate_csr", + "certype": 'Server', "res_id": "iDRAC.1", + "dynurl": "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR"}, + {"actions": {}, "op": "import", + "certype": 'Server', "res_id": "iDRAC.1", + "dynurl": "/redfish/v1/Dell/Managers/iDRAC.1/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate"} + ]) + def test_get_cert_url(self, params): + dynurl = self.module.get_cert_url(params.get('actions'), params.get('op'), params.get('certype'), + params.get('res_id')) + assert dynurl == params['dynurl'] + + @pytest.mark.parametrize("params", [ + {"cert_data": {"CertificateFile": 'Hello world!', + "@Message.ExtendedInfo": [{ + "Message": "Successfully exported SSL Certificate.", + "MessageId": "IDRAC.2.5.LC067", + "Resolution": "No response action is required.", + "Severity": "Informational"} + ]}, + "result": {'@Message.ExtendedInfo': [ + {'Message': 'Successfully exported SSL Certificate.', + 'MessageId': 'IDRAC.2.5.LC067', + 'Resolution': 'No response action is required.', + 'Severity': 'Informational'}]}, + "mparams": {'command': 'export', 'certificate_type': "HTTPS", + 'certificate_path': tempfile.gettempdir(), 'reset': False}}]) + def test_format_output(self, params, idrac_default_args): + idrac_default_args.update(params.get('mparams')) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.format_output(f_module, params.get('cert_data')) + if os.path.exists(result.get('certificate_path')): + os.remove(result.get('certificate_path')) + assert 'result' not in result + + @pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError, + ConnectionError, HTTPError, ImportError, RuntimeError]) + def test_main_exceptions(self, exc_type, idrac_connection_certificates_mock, idrac_default_args, mocker): + idrac_default_args.update({"command": "export", "certificate_path": "mypath"}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + "get_res_id", + side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + "get_res_id", + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py new file mode 100644 index 00000000..c30ce409 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py @@ -0,0 +1,625 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_firmware +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from mock import MagicMock, patch, Mock +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.parse import urlparse, ParseResult +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestidracFirmware(FakeAnsibleModule): + module = idrac_firmware + + @pytest.fixture + def idrac_firmware_update_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.update_mgr = idrac_obj + idrac_obj.update_from_repo = Mock(return_value={ + "update_status": { + "job_details": { + "Data": { + "StatusCode": 200, + "body": { + "PackageList": [{}] + } + } + } + } + }) + idrac_obj.update_from_repo_url = Mock(return_value={"job_details": {"Data": {"StatusCode": 200, + "body": {"PackageList": [ + {}] + } + } + } + }) + return idrac_obj + + @pytest.fixture + def idrac_firmware_job_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.job_mgr = idrac_obj + idrac_obj.get_job_status_redfish = Mock(return_value={ + "update_status": { + "job_details": { + "Data": { + "StatusCode": 200, + "body": { + "PackageList": [{}] + } + } + } + } + }) + idrac_obj.job_wait = Mock(return_value="21543") + return idrac_obj + + @pytest.fixture + def re_match_mock(self, mocker): + try: + re_mock = mocker.patch( + MODULE_PATH + 'idrac_firmware.re') + except AttributeError: + re_mock = MagicMock() + obj = MagicMock() + re_mock.match.group.return_value = obj + return "3.30" + + @pytest.fixture + def ET_convert_mock(self, mocker): + try: + ET_mock = mocker.patch( + MODULE_PATH + 'idrac_firmware.ET') + except AttributeError: + ET_mock = MagicMock() + obj = MagicMock() + ET_mock.fromstring.return_value = obj + return ET_mock + + @pytest.fixture + def fileonshare_idrac_firmware_mock(self, mocker): + share_mock = mocker.patch(MODULE_PATH + 'idrac_firmware.FileOnShare', + return_value=MagicMock()) + return share_mock + + @pytest.fixture + def idrac_connection_firmware_mock(self, mocker, idrac_firmware_update_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_firmware.iDRACConnection', + return_value=idrac_firmware_update_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_firmware_update_mock + return idrac_firmware_update_mock + + @pytest.fixture + def idrac_connection_firmware_redfish_mock(self, mocker, idrac_firmware_job_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_firmware.iDRACRedfishAPI', + return_value=idrac_firmware_job_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_firmware_job_mock + return idrac_firmware_job_mock + + def test_main_idrac_firmware_success_case(self, idrac_connection_firmware_mock, + idrac_connection_firmware_redfish_mock, + idrac_default_args, mocker): + idrac_default_args.update({"share_name": "sharename", "catalog_file_name": "Catalog.xml", + "share_user": "sharename", "share_password": "sharepswd", + "share_mnt": "sharmnt", + "reboot": True, "job_wait": True + }) + message = {"Status": "Success", "update_msg": "Successfully updated the firmware.", + "update_status": "Success", 'changed': False, 'failed': False} + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {} + mocker.patch(MODULE_PATH + 'idrac_firmware.update_firmware_redfish', return_value=message) + result = self._run_module(idrac_default_args) + assert result == {'msg': 'Successfully updated the firmware.', 'update_status': 'Success', + 'changed': False, 'failed': False} + + @pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError]) + def test_main_idrac_firmware_exception_handling_case(self, exc_type, mocker, idrac_default_args, + idrac_connection_firmware_redfish_mock, + idrac_connection_firmware_mock): + idrac_default_args.update({"share_name": "sharename", "catalog_file_name": "Catalog.xml", + "share_user": "sharename", "share_password": "sharepswd", + "share_mnt": "sharmnt", + "reboot": True, "job_wait": True + }) + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"} + mocker.patch(MODULE_PATH + + 'idrac_firmware._validate_catalog_file', return_value="catalog_file_name") + mocker.patch(MODULE_PATH + + 'idrac_firmware.update_firmware_omsdk', side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True + + def test_main_HTTPError_case(self, idrac_connection_firmware_mock, idrac_default_args, + idrac_connection_firmware_redfish_mock, mocker): + idrac_default_args.update({"share_name": "sharename", "catalog_file_name": "Catalog.xml", + "share_user": "sharename", "share_password": "sharepswd", + "share_mnt": "sharmnt", + "reboot": True, "job_wait": True + }) + json_str = to_text(json.dumps({"data": "out"})) + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"} + mocker.patch(MODULE_PATH + 'idrac_firmware.update_firmware_omsdk', + side_effect=HTTPError('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True + + def test_update_firmware_omsdk_success_case01(self, idrac_connection_firmware_mock, + idrac_connection_firmware_redfish_mock, idrac_default_args, mocker, + re_match_mock): + idrac_default_args.update({"share_name": "https://downloads.dell.com", "catalog_file_name": "Catalog.xml", + "share_user": "UserName", "share_password": "sharepswd", + "share_mnt": "shrmnt", + "reboot": True, "job_wait": True, "ignore_cert_warning": True, + "apply_update": True}) + mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk", + return_value=({"update_status": {"job_details": {"Data": {"StatusCode": 200, + "body": {"PackageList": [{}]}}}}}, + {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}})) + + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", + return_value=({"BaseLocation": None, + "ComponentID": "18981", + "ComponentType": "APAC", + "Criticality": "3", + "DisplayName": "Dell OS Driver Pack", + "JobID": None, + "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64" + "_19.10.12_A00.EXE", + "PackagePath": "FOLDER05902898M/1/Drivers-for-" + "OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackageVersion": "19.10.12", + "RebootType": "NONE", + "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1" + }, True, False)) + f_module = self.get_module_mock(params=idrac_default_args) + idrac_connection_firmware_mock.match.return_value = "2.70" + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"} + idrac_connection_firmware_mock.ServerGeneration.return_value = "13" + idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = { + "job_details": {"Data": {"StatusCode": 200, "GetRepoBasedUpdateList_OUTPUT": {}, + "body": {"PackageList1": [{}]}}} + } + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result["update_status"]["job_details"]["Data"]["StatusCode"] == 200 + + def test_update_firmware_omsdk_success_case02(self, idrac_connection_firmware_mock, + idrac_connection_firmware_redfish_mock, idrac_default_args, mocker, + re_match_mock, fileonshare_idrac_firmware_mock): + idrac_default_args.update({"share_name": "mhttps://downloads.dell.com", "catalog_file_name": "Catalog.xml", + "share_user": "UserName", "share_password": "sharepswd", + "share_mnt": "shrmnt", + "reboot": True, "job_wait": True, "ignore_cert_warning": True, + "apply_update": True + }) + mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk", + return_value=({"update_status": {"job_details": {"data": {"StatusCode": 200, + "body": {"PackageList": [{}]}}}}}, + {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}})) + + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", + return_value=({"BaseLocation": None, + "ComponentID": "18981", + "ComponentType": "APAC", + "Criticality": "3", + "DisplayName": "Dell OS Driver Pack", + "JobID": None, + "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64" + "_19.10.12_A00.EXE", + "PackagePath": "FOLDER05902898M/1/Drivers-for-" + "OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackageVersion": "19.10.12", + "RebootType": "NONE", + "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1" + }, True)) + + f_module = self.get_module_mock(params=idrac_default_args) + idrac_connection_firmware_mock.match.return_value = "2.70" + idrac_connection_firmware_mock.ServerGeneration.return_value = "13" + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"} + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=("INSTANCENAME", False, False)) + idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = { + "job_details": {"Data": {"StatusCode": 200, "GetRepoBasedUpdateList_OUTPUT": {}, + "body": {"PackageList": [{}]}}}} + upd_share = fileonshare_idrac_firmware_mock + upd_share.IsValid = True + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result["update_status"]["job_details"]["Data"]["StatusCode"] == 200 + + def test_update_firmware_redfish_success_case03(self, idrac_connection_firmware_mock, + idrac_connection_firmware_redfish_mock, + idrac_default_args, mocker, re_match_mock): + idrac_default_args.update({"share_name": "https://downloads.dell.com", "catalog_file_name": "Catalog.xml", + "share_user": "UserName", "share_password": "sharepswd", + "share_mnt": "shrmnt", + "reboot": True, "job_wait": False, "ignore_cert_warning": True, + "apply_update": True + }) + mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_redfish", + return_value=( + {"job_details": {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}}, + {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}})) + + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", + return_value=({"BaseLocation": None, + "ComponentID": "18981", + "ComponentType": "APAC", + "Criticality": "3", + "DisplayName": "Dell OS Driver Pack", + "JobID": None, + "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_" + "19.10.12_A00.EXE", + "PackagePath": "FOLDER05902898M/1/Drivers-for-OS-" + "Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackageVersion": "19.10.12", + "RebootType": "NONE", + "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1" + }, True)) + f_module = self.get_module_mock(params=idrac_default_args) + idrac_connection_firmware_mock.re_match_mock.group = Mock(return_value="3.30") + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "3.30"} + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=("INSTANCENAME", False, False)) + idrac_connection_firmware_mock.ServerGeneration = "14" + result = self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {}) + assert result["changed"] is False + assert result["update_msg"] == "Successfully triggered the job to update the firmware." + + def test_update_firmware_omsdk_status_success_case01(self, idrac_connection_firmware_mock, + idrac_connection_firmware_redfish_mock, idrac_default_args, + mocker, re_match_mock, fileonshare_idrac_firmware_mock): + idrac_default_args.update({"share_name": "mhttps://downloads.dell.com", "catalog_file_name": "Catalog.xml", + "share_user": "UserName", "share_password": "sharepswd", + "share_mnt": "sharemnt", + "reboot": True, "job_wait": True, "ignore_cert_warning": True, + "apply_update": True + }) + mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk", + return_value=({"update_status": {"job_details": {"data": {"StatusCode": 200, + "body": {"PackageList": [{}]}}}}}, + {"job_details": {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}})) + + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", + return_value={ + "BaseLocation": None, + "ComponentID": "18981", + "ComponentType": "APAC", + "Criticality": "3", + "DisplayName": "Dell OS Driver Pack", + "JobID": None, + "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackagePath": "FOLDER05902898M/1/Drivers-for-OS-Deployment_" + "Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackageVersion": "19.10.12", + "RebootType": "NONE", + "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1" + }) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + idrac_connection_firmware_mock.match.return_value = "2.70" + idrac_connection_firmware_mock.ServerGeneration.return_value = "13" + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"} + idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = {"job_details": { + "Data": {"StatusCode": 200, "body": {}, "GetRepoBasedUpdateList_OUTPUT": {}}, "Status": "Success"}, + "Status": "Success"} + upd_share = fileonshare_idrac_firmware_mock + upd_share.IsValid = True + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result == {'changed': False, 'failed': False, + 'update_msg': 'Successfully triggered the job to update the firmware.', + 'update_status': {'Status': 'Success', + 'job_details': {'Data': {'StatusCode': 200, 'body': {}, + "GetRepoBasedUpdateList_OUTPUT": {}}, + 'Status': 'Success'}}} + + def test_update_firmware_omsdk_status_failed_case01(self, idrac_connection_firmware_mock, + idrac_connection_firmware_redfish_mock, + idrac_default_args, mocker, re_match_mock): + idrac_default_args.update({"share_name": "mhttps://downloads.dell.com", "catalog_file_name": "Catalog.xml", + "share_user": "UserName", "share_password": "sharepswd", + "share_mnt": "sharemnt", + "reboot": True, "job_wait": True, "ignore_cert_warning": True, + "apply_update": True}) + mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk", + return_value=({"update_status": {"job_details": {"data": {"StatusCode": 200, + "body": {"PackageList": [{}]}}}}}, + {"job_details": {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}})) + + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", + return_value={ + "BaseLocation": None, + "ComponentID": "18981", + "ComponentType": "APAC", + "Criticality": "3", + "DisplayName": "Dell OS Driver Pack", + "JobID": None, + "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackagePath": "FOLDER05902898M/1/Drivers-for-OS-Deployment_" + "Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackageVersion": "19.10.12", + "RebootType": "NONE", + "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1" + }) + + f_module = self.get_module_mock(params=idrac_default_args) + idrac_connection_firmware_mock.match.return_value = "2.70" + idrac_connection_firmware_mock.ServerGeneration.return_value = "13" + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"} + idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = {"job_details": {"Data": { + "StatusCode": 200, "body": {}, "GetRepoBasedUpdateList_OUTPUT": {}}, "Status": "Failed"}, + "Status": "Failed"} + with pytest.raises(Exception) as ex: + self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert ex.value.args[0] == "Firmware update failed." + + def test__validate_catalog_file_case01(self, idrac_connection_firmware_mock, idrac_default_args): + idrac_default_args.update({"catalog_file_name": ""}) + with pytest.raises(ValueError) as exc: + self.module._validate_catalog_file("") + assert exc.value.args[0] == 'catalog_file_name should be a non-empty string.' + + def test__validate_catalog_file_case02(self, idrac_connection_firmware_mock, idrac_default_args): + idrac_default_args.update({"catalog_file_name": "Catalog.json"}) + with pytest.raises(ValueError) as exc: + self.module._validate_catalog_file("Catalog.json") + assert exc.value.args[0] == 'catalog_file_name should be an XML file.' + + def test_convert_xmltojson_case01(self, mocker, idrac_connection_firmware_mock, + idrac_default_args, ET_convert_mock): + idrac_default_args.update({"PackageList": [{ + "BaseLocation": None, + "ComponentID": "18981", + "ComponentType": "APAC", + "Criticality": "3", + "DisplayName": "Dell OS Driver Pack", + "JobID": None, + "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackagePath": + "FOLDER05902898M/1/Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackageVersion": "19.10.12"}]}) + mocker.patch(MODULE_PATH + "idrac_firmware.get_job_status", return_value=("Component", False)) + mocker.patch(MODULE_PATH + 'idrac_firmware.ET') + result = self.module._convert_xmltojson({"PackageList": [{"INSTANCENAME": {"PROPERTY": {"NAME": "abc"}}}]}, + MagicMock(), None) + assert result == ([], True, False) + + def test_convert_xmltojson_case02(self, mocker, idrac_connection_firmware_mock, idrac_default_args): + idrac_default_args.update({"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}) + packagelist = {"PackageList": "INSTANCENAME"} + mocker.patch(MODULE_PATH + "idrac_firmware.get_job_status", return_value=("Component", False)) + mocker.patch(MODULE_PATH + 'idrac_firmware.ET') + result = self.module._convert_xmltojson(packagelist, MagicMock(), None) + assert result == ([], True, False) + + def test_get_jobid_success_case01(self, idrac_connection_firmware_mock, idrac_default_args, + idrac_firmware_job_mock, + idrac_connection_firmware_redfish_mock): + idrac_default_args.update({"Location": "https://jobmnager/jid123"}) + idrac_firmware_job_mock.status_code = 202 + idrac_firmware_job_mock.Success = True + idrac_connection_firmware_redfish_mock.update_mgr.headers.get().split().__getitem__().return_value = "jid123" + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.get_jobid(f_module, idrac_firmware_job_mock) + assert result == idrac_connection_firmware_redfish_mock.headers.get().split().__getitem__() + + def test_get_jobid_fail_case01(self, idrac_connection_firmware_mock, idrac_default_args, + idrac_firmware_job_mock): + idrac_firmware_job_mock.status_code = 202 + idrac_firmware_job_mock.headers = {"Location": None} + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as exc: + self.module.get_jobid(f_module, idrac_firmware_job_mock) + assert exc.value.args[0] == "Failed to update firmware." + + def test_get_jobid_fail_case02(self, idrac_connection_firmware_mock, idrac_default_args, + idrac_firmware_job_mock): + idrac_firmware_job_mock.status_code = 400 + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as exc: + self.module.get_jobid(f_module, idrac_firmware_job_mock) + assert exc.value.args[0] == "Failed to update firmware." + + def test_update_firmware_url_omsdk_success_case02(self, idrac_connection_firmware_mock, idrac_default_args, + mocker, idrac_connection_firmware_redfish_mock): + idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml", + "share_user": "shareuser", "share_password": "sharepswd", + "share_mnt": "sharmnt", + "reboot": True, "job_wait": False, "ignore_cert_warning": True, + "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user", + "idrac_password": "idrac_password", "idrac_port": 443 + }) + mocker.patch(MODULE_PATH + "idrac_firmware.get_jobid", + return_value="23451") + + mocker.patch(MODULE_PATH + "idrac_firmware.urlparse", + return_value=ParseResult(scheme='http', netloc='downloads.dell.com', + path='/%7Eguido/Python.html', + params='', query='', fragment='')) + mocker.patch("socket.gethostbyname", return_value="downloads.dell.com") + f_module = self.get_module_mock(params=idrac_default_args) + idrac_connection_firmware_mock.use_redfish = False + idrac_connection_firmware_redfish_mock.get_job_status_redfish = "Status" + idrac_connection_firmware_redfish_mock.update_mgr.job_mgr.job_wait.return_value = "12345" + idrac_connection_firmware_mock.update_mgr.update_from_repo_url.return_value = { + "update_status": {"job_details": {"data": { + "StatusCode": 200, + "body": { + "PackageList": [ + {}] + } + } + } + } + } + idrac_connection_firmware_mock.update_mgr.update_from_dell_repo_url.return_value = {"job_details": {"Data": { + "GetRepoBasedUpdateList_OUTPUT": { + "Message": [ + {}] + } + } + } + } + payload = {"ApplyUpdate": "True", + "CatalogFile": "Catalog.xml", + "IgnoreCertWarning": "On", + "RebootNeeded": True, + "UserName": "username", + "Password": "psw" + } + result = self.module.update_firmware_url_omsdk(f_module, idrac_connection_firmware_mock, + "http://downloads.dell.com", "catalog.xml", True, True, True, + False, payload) + assert result == ( + {'job_details': {'Data': {'GetRepoBasedUpdateList_OUTPUT': {'Message': [{}]}}}}, {}) + + def test_update_firmware_url_omsdk(self, idrac_connection_firmware_mock, idrac_default_args, mocker, + idrac_connection_firmware_redfish_mock): + idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml", + "share_user": "shareuser", "share_password": "sharepswd", + "share_mnt": "sharmnt", + "reboot": True, "job_wait": False, "ignore_cert_warning": True, + "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user", + "idrac_password": "idrac_password", "idrac_port": 443 + }) + mocker.patch(MODULE_PATH + "idrac_firmware.get_jobid", + return_value="23451") + mocker.patch(MODULE_PATH + "idrac_firmware.get_check_mode_status") + idrac_connection_firmware_mock.use_redfish = True + idrac_connection_firmware_mock.job_mgr.get_job_status_redfish.return_value = "23451" + idrac_connection_firmware_mock.update_mgr.update_from_dell_repo_url.return_value = { + "InstanceID": "JID_12345678"} + f_module = self.get_module_mock(params=idrac_default_args) + payload = {"ApplyUpdate": "True", "CatalogFile": "Catalog.xml", "IgnoreCertWarning": "On", + "RebootNeeded": True, "UserName": "username", "Password": "psw"} + result = self.module.update_firmware_url_omsdk(f_module, idrac_connection_firmware_mock, + "http://downloads.dell.com/repo", + "catalog.xml", True, True, True, True, payload) + assert result[0] == {"InstanceID": "JID_12345678"} + + def _test_update_firmware_redfish(self, idrac_connection_firmware_mock, idrac_default_args, re_match_mock, + mocker, idrac_connection_firmware_redfish_mock, + fileonshare_idrac_firmware_mock): + idrac_default_args.update({"share_name": "192.168.0.1:/share_name", "catalog_file_name": "catalog.xml", + "share_user": "shareuser", "share_password": "sharepswd", + "share_mnt": "sharmnt", + "reboot": True, "job_wait": False, "ignore_cert_warning": True, + "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user", + "idrac_password": "idrac_password", "idrac_port": 443, 'apply_update': True + }) + mocker.patch(MODULE_PATH + "idrac_firmware.SHARE_TYPE", + return_value={"NFS": "NFS"}) + mocker.patch(MODULE_PATH + "idrac_firmware.eval", + return_value={"PackageList": []}) + mocker.patch(MODULE_PATH + "idrac_firmware.wait_for_job_completion", return_value=({}, None)) + f_module = self.get_module_mock(params=idrac_default_args) + re_mock = mocker.patch(MODULE_PATH + "idrac_firmware.re", + return_value=MagicMock()) + re_mock.match(MagicMock(), MagicMock()).group.return_value = "3.60" + mocker.patch(MODULE_PATH + "idrac_firmware.get_jobid", + return_value="23451") + idrac_connection_firmware_mock.idrac.update_mgr.job_mgr.get_job_status_redfish.return_value = "23451" + idrac_connection_firmware_mock.ServerGeneration = "14" + upd_share = fileonshare_idrac_firmware_mock + upd_share.remote_addr.return_value = "192.168.0.1" + upd_share.remote.share_name.return_value = "share_name" + upd_share.remote_share_type.name.lower.return_value = "NFS" + result = self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module) + assert result['update_msg'] == "Successfully triggered the job to update the firmware." + + def _test_get_job_status(self, idrac_connection_firmware_mock, idrac_default_args, + mocker, idrac_connection_firmware_redfish_mock): + idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml", + "share_user": "shareuser", "share_password": "sharepswd", + "share_mnt": "sharmnt", "apply_update": False, + "reboot": True, "job_wait": False, "ignore_cert_warning": True, + "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user", + "idrac_password": "idrac_password", "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"JobStatus": "OK"} + each_comp = {"JobID": "JID_1234567", "Messages": [{"Message": "test_message"}], "JobStatus": "Completed"} + result = self.module.get_job_status(f_module, each_comp, None) + assert result[1] is False + + def test_message_verification(self, idrac_connection_firmware_mock, idrac_connection_firmware_redfish_mock, + idrac_default_args, mocker): + idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml", + "share_user": "shareuser", "share_password": "sharepswd", + "share_mnt": "sharmnt", "apply_update": False, + "reboot": False, "job_wait": True, "ignore_cert_warning": True, + "idrac_ip": "idrac_ip", "idrac_user": "idrac_user", + "idrac_password": "idrac_password", "idrac_port": 443}) + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=("INSTANCENAME", False, False)) + # mocker.patch(MODULE_PATH + "idrac_firmware.re") + idrac_connection_firmware_redfish_mock.success = True + idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"} + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result['update_msg'] == "Successfully fetched the applicable firmware update package list." + + idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": False}) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result['update_msg'] == "Successfully triggered the job to stage the firmware." + + idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": True}) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result['update_msg'] == "Successfully staged the applicable firmware update packages." + + idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": True}) + mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk", + return_value=({"Status": "Success"}, {"PackageList": []})) + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=({}, True, True)) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result['update_msg'] == "Successfully staged the applicable firmware update packages with error(s)." + + idrac_default_args.update({"apply_update": True, "reboot": True, "job_wait": True}) + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=({}, True, False)) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result['update_msg'] == "Successfully updated the firmware." + + idrac_default_args.update({"apply_update": True, "reboot": True, "job_wait": True}) + mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=({}, True, True)) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module) + assert result['update_msg'] == "Firmware update failed." diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py new file mode 100644 index 00000000..787dba2c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_firmware_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, PropertyMock +from pytest import importorskip +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from io import StringIO +from ansible.module_utils._text import to_text + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestFirmware(FakeAnsibleModule): + module = idrac_firmware_info + + @pytest.fixture + def idrac_firmware_info_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.update_mgr = idrac_obj + type(idrac_obj).InstalledFirmware = PropertyMock(return_value="msg") + return idrac_obj + + @pytest.fixture + def idrac_firmware_info_connection_mock(self, mocker, idrac_firmware_info_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + 'idrac_firmware_info.iDRACConnection', + return_value=idrac_firmware_info_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_firmware_info_mock + return idrac_firmware_info_mock + + def test_main_idrac_get_firmware_info_success_case01(self, idrac_firmware_info_connection_mock, + idrac_default_args): + obj2 = MagicMock() + idrac_firmware_info_connection_mock.update_mgr = obj2 + type(obj2).InstalledFirmware = PropertyMock(return_value={"Status": "Success"}) + result = self._run_module(idrac_default_args) + assert result == {"firmware_info": {"Status": "Success"}, + "msg": "Successfully fetched the firmware inventory details.", + "changed": False} + + @pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError, + ConnectionError, HTTPError]) + def test_idrac_get_firmware_info_exception_handling_case(self, idrac_firmware_info_connection_mock, + exc_type, mocker, idrac_default_args): + json_str = to_text(json.dumps({"data": "out"})) + obj2 = MagicMock() + idrac_firmware_info_connection_mock.update_mgr = obj2 + if exc_type not in [HTTPError, SSLValidationError]: + type(obj2).InstalledFirmware = PropertyMock(side_effect=exc_type('test')) + else: + type(obj2).InstalledFirmware = PropertyMock(side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py new file mode 100644 index 00000000..39df4e4c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_job_status_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, PropertyMock +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestLcJobStatus(FakeAnsibleModule): + module = idrac_lifecycle_controller_job_status_info + + @pytest.fixture + def idrac_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.job_mgr = idrac_obj + type(idrac_obj).get_job_status = PropertyMock(return_value="job_id") + return idrac_obj + + @pytest.fixture + def idrac_get_lc_job_status_connection_mock(self, mocker, idrac_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_lifecycle_controller_job_status_info.iDRACConnection', + return_value=idrac_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_mock + return idrac_mock + + def test_main_idrac_get_lc_job_status_success_case01(self, idrac_get_lc_job_status_connection_mock, + idrac_default_args, mocker): + idrac_default_args.update({"job_id": "job_id"}) + idrac_get_lc_job_status_connection_mock.job_mgr.get_job_status.return_value = {"Status": "Success"} + result = self._run_module(idrac_default_args) + assert result["changed"] is False + + @pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError, + ConnectionError, HTTPError]) + def test_main_exception_handling_case(self, exc_type, mocker, idrac_get_lc_job_status_connection_mock, + idrac_default_args): + idrac_default_args.update({"job_id": "job_id"}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type == URLError: + idrac_get_lc_job_status_connection_mock.job_mgr.get_job_status.side_effect = exc_type("url open error") + result = self._run_module(idrac_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + idrac_get_lc_job_status_connection_mock.job_mgr.get_job_status.side_effect = exc_type("exception message") + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + idrac_get_lc_job_status_connection_mock.job_mgr.get_job_status.side_effect = exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str)) + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py new file mode 100644 index 00000000..49193267 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_jobs +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from mock import MagicMock, PropertyMock +from io import StringIO +from ansible.module_utils._text import to_text +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestDeleteLcJob(FakeAnsibleModule): + module = idrac_lifecycle_controller_jobs + + @pytest.fixture + def idrac_lc_job_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.job_mgr = idrac_obj + type(idrac_obj).delete_job = PropertyMock(return_value="msg") + type(idrac_obj).delete_all_jobs = PropertyMock(return_value="msg") + return idrac_obj + + @pytest.fixture + def idrac_connection_delete_lc_job_queue_mock(self, mocker, idrac_lc_job_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_lifecycle_controller_jobs.iDRACConnection', return_value=idrac_lc_job_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_lc_job_mock + return idrac_lc_job_mock + + def test_main_idrac_lc_job_success_case01(self, idrac_connection_delete_lc_job_queue_mock, idrac_default_args): + idrac_default_args.update({"job_id": "job_id"}) + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_job.return_value = {"Status": "Success"} + result = self._run_module(idrac_default_args) + assert result == {'changed': True, 'msg': 'Successfully deleted the job.', 'status': {'Status': 'Success'}} + + def test_main_idrac_lc_job_success_case02(self, idrac_connection_delete_lc_job_queue_mock, idrac_default_args): + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_all_jobs.return_value = {"Status": "Success"} + result = self._run_module(idrac_default_args) + assert result == {'changed': True, 'msg': 'Successfully deleted the job queue.', 'status': {'Status': 'Success'}} + + def test_main_idrac_delete_lc_job_failure_case(self, idrac_connection_delete_lc_job_queue_mock, idrac_default_args): + idrac_default_args.update({"job_id": "job_id"}) + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_job.return_value = {"Status": "Error"} + result = self._run_module_with_fail_json(idrac_default_args) + assert result == {'failed': True, 'msg': "Failed to delete the Job: {0}.".format("job_id"), + 'status': {'Status': 'Error'}} + + @pytest.mark.parametrize("exc_type", [URLError, HTTPError, ImportError, ValueError, RuntimeError, TypeError]) + def test_main_exception_handling_idrac_lc_job_case(self, exc_type, idrac_connection_delete_lc_job_queue_mock, + idrac_default_args): + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_all_jobs.side_effect = exc_type('test') + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_job.side_effect = exc_type('test') + else: + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_all_jobs.side_effect = \ + exc_type('http://testhost.com', 400, 'http error message', {"accept-type": "application/json"}, + StringIO(json_str)) + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_job.side_effect = \ + exc_type('http://testhost.com', 400, 'http error message', {"accept-type": "application/json"}, + StringIO(json_str)) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_all_jobs + idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_job + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py new file mode 100644 index 00000000..c1a0894e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_logs +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestExportLcLogs(FakeAnsibleModule): + module = idrac_lifecycle_controller_logs + + @pytest.fixture + def idrac_export_lc_logs_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.log_mgr = idrac_obj + return idrac_obj + + @pytest.fixture + def idrac_connection_export_lc_logs_mock(self, mocker, idrac_export_lc_logs_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.iDRACConnection', + return_value=idrac_export_lc_logs_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_export_lc_logs_mock + return idrac_export_lc_logs_mock + + @pytest.fixture + def idrac_file_manager_export_lc_logs_mock(self, mocker): + try: + lclog_file_name_format = "%ip_%Y%m%d_%H%M%S_LC_Log.log" + file_manager_obj = mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + file_manager_obj.myshare.new_file(lclog_file_name_format).return_value = obj + return file_manager_obj + + def test_main_export_lc_logs_success_case(self, idrac_connection_export_lc_logs_mock, idrac_default_args, mocker, + idrac_file_manager_export_lc_logs_mock): + idrac_default_args.update({"share_name": "sharename", "share_user": "shareuser", + "share_password": "sharepassword", "job_wait": True}) + message = {"Status": "Success", "JobStatus": "Success"} + mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.run_export_lc_logs', return_value=message) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully exported the lifecycle controller logs." + + def test_run_export_lc_logs_success_case01(self, idrac_connection_export_lc_logs_mock, idrac_default_args, + idrac_file_manager_export_lc_logs_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "share_password": "sharepassword", "job_wait": True}) + idrac_connection_export_lc_logs_mock.log_mgr.lclog_export.return_value = {"Status": "Success"} + f_module = self.get_module_mock(params=idrac_default_args) + msg = self.module.run_export_lc_logs(idrac_connection_export_lc_logs_mock, f_module) + assert msg == {'Status': 'Success'} + + def test_run_export_lc_logs_status_fail_case01(self, idrac_connection_export_lc_logs_mock, idrac_default_args, + idrac_file_manager_export_lc_logs_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "share_password": "sharepassword", "job_wait": True}) + idrac_connection_export_lc_logs_mock.log_mgr.lclog_export.return_value = {"Status": "failed"} + f_module = self.get_module_mock(params=idrac_default_args) + msg = self.module.run_export_lc_logs(idrac_connection_export_lc_logs_mock, f_module) + assert msg == {'Status': 'failed'} + + @pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, HTTPError, URLError]) + def test_main_export_lc_logs_exception_handling_case(self, exc_type, mocker, idrac_connection_export_lc_logs_mock, + idrac_default_args, idrac_file_manager_export_lc_logs_mock): + idrac_default_args.update({"share_name": "sharename", "share_user": "shareuser", + "share_password": "sharepassword", "job_wait": True}) + idrac_connection_export_lc_logs_mock.log_mgr.lclog_export.return_value = {"Status": "Failed"} + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.run_export_lc_logs', + side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.run_export_lc_logs', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py new file mode 100644 index 00000000..d00e2bc0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_status_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from mock import PropertyMock +from io import StringIO +from ansible.module_utils._text import to_text +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestLcStatus(FakeAnsibleModule): + module = idrac_lifecycle_controller_status_info + + @pytest.fixture + def idrac_lc_status_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).LCStatus = Mock(return_value="lcstatus") + type(idrac_obj).LCReady = Mock(return_value="lcready") + return idrac_obj + + @pytest.fixture + def idrac_connection_lcstatus_mock(self, mocker, idrac_lc_status_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_status_info.iDRACConnection', + return_value=idrac_lc_status_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_lc_status_mock + return idrac_lc_status_mock + + def test_main_get_lcstatus_success_case01(self, idrac_connection_lcstatus_mock, idrac_default_args): + obj2 = MagicMock() + idrac_connection_lcstatus_mock.config_mgr = obj2 + type(obj2).LCStatus = PropertyMock(return_value="lcstatus") + type(obj2).LCReady = PropertyMock(return_value="lcready") + result = self._run_module(idrac_default_args) + assert result['lc_status_info']['LCReady'] == "lcready" + assert result['lc_status_info']['LCStatus'] == "lcstatus" + + @pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, HTTPError, URLError]) + def test_main_get_lcstatus_exception_handling_case(self, exc_type, idrac_connection_lcstatus_mock, + idrac_default_args): + obj2 = MagicMock() + idrac_connection_lcstatus_mock.config_mgr = obj2 + json_str = to_text(json.dumps({"data": "out"})) + if exc_type == URLError: + type(obj2).LCReady = PropertyMock(side_effect=exc_type("url open error")) + result = self._run_module(idrac_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + type(obj2).LCReady = PropertyMock(side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + assert 'msg' in result + else: + type(obj2).LCReady = PropertyMock(side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py new file mode 100644 index 00000000..10f7183f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_network +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestConfigNetwork(FakeAnsibleModule): + module = idrac_network + + @pytest.fixture + def idrac_configure_network_mock(self): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).create_share_obj = Mock(return_value="networkstatus") + type(idrac_obj).set_liason_share = Mock(return_value="networkstatus") + return idrac_obj + + @pytest.fixture + def idrac_file_manager_config_networking_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + MODULE_PATH + 'idrac_network.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + @pytest.fixture + def idrac_connection_configure_network_mock(self, mocker, idrac_configure_network_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_network.iDRACConnection', + return_value=idrac_configure_network_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_network_mock + return idrac_configure_network_mock + + def test_main_idrac_configure_network_success_case(self, idrac_connection_configure_network_mock, mocker, + idrac_default_args, idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None}) + message = {'changed': False, 'msg': {'Status': "Success", "message": "No changes found to commit!"}} + mocker.patch(MODULE_PATH + 'idrac_network.run_idrac_network_config', return_value=message) + result = self._run_module(idrac_default_args) + assert result == {'msg': 'Successfully configured the idrac network settings.', + 'network_status': { + 'changed': False, + 'msg': {'Status': 'Success', 'message': 'No changes found to commit!'}}, + 'changed': False, 'failed': False} + status_msg = {"Status": "Success", "Message": "No changes found to commit!"} + mocker.patch(MODULE_PATH + 'idrac_network.run_idrac_network_config', return_value=status_msg) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully configured the idrac network settings." + status_msg = {"Status": "Success", "Message": "No changes were applied"} + mocker.patch(MODULE_PATH + 'idrac_network.run_idrac_network_config', return_value=status_msg) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully configured the idrac network settings." + + def test_run_idrac_network_config_success_case01(self, idrac_connection_configure_network_mock, idrac_default_args, + idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": "Enabled", + "dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns", + "setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled", + "enable_nic": "Enabled", "nic_selection": "Dedicated", + "failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled", + "network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu", + "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled", + "dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1", + "static_dns_2": "staticdns2", "static_gateway": "staticgateway", + "static_net_mask": "staticnetmask"}) + message = {"changes_applicable": True, "message": "changes are applicable"} + idrac_connection_configure_network_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert msg == {'changes_applicable': True, 'message': 'changes are applicable'} + + def test_run_idrac_network_config_success_case02(self, idrac_connection_configure_network_mock, idrac_default_args, + idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": "Enabled", + "dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns", + "setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled", + "enable_nic": "Enabled", "nic_selection": "Dedicated", + "failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled", + "network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu", + "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled", + "dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1", + "static_dns_2": "staticdns2", "static_gateway": "staticgateway", + "static_net_mask": "staticnetmask"}) + message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True, + "Status": "Success"} + idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert msg == {'Status': 'Success', + 'changed': True, + 'changes_applicable': True, + 'message': 'changes found to commit!'} + + def test_run_idrac_network_config_success_case03(self, idrac_connection_configure_network_mock, idrac_default_args, + idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": "Enabled", + "dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns", + "setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled", + "enable_nic": "Enabled", "nic_selection": "Dedicated", + "failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled", + "network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu", + "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled", + "dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1", + "static_dns_2": "staticdns2", "static_gateway": "staticgateway", + "static_net_mask": "staticnetmask"}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert msg == {'Message': 'No changes found to commit!', + 'Status': 'Success', + 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_network_config_success_case04(self, idrac_connection_configure_network_mock, + idrac_default_args, idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": "Enabled", + "dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns", + "setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled", + "enable_nic": "Enabled", "nic_selection": "Dedicated", + "failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled", + "network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu", + "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled", + "dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1", + "static_dns_2": "staticdns2", "static_gateway": "staticgateway", + "static_net_mask": "staticnetmask"}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "Success"} + idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert msg == {'Message': 'No changes were applied', + 'Status': 'Success', + 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_network_config_success_case05(self, idrac_connection_configure_network_mock, idrac_default_args, + idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": None, + "dns_idrac_name": None, "auto_config": None, "static_dns": None, + "setup_idrac_nic_vlan": None, "vlan_id": None, "vlan_priority": None, + "enable_nic": None, "nic_selection": None, + "failover_network": None, "auto_detect": None, "auto_negotiation": None, + "network_speed": None, "duplex_mode": None, "nic_mtu": None, + "enable_dhcp": None, "ip_address": None, "enable_ipv4": None, + "dns_from_dhcp": None, "static_dns_1": None, "static_dns_2": None, + "static_gateway": None, "static_net_mask": None}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "Success"} + idrac_connection_configure_network_mock.config_mgr.configure_dns.return_value = message + idrac_connection_configure_network_mock.config_mgr.configure_nic_vlan.return_value = message + idrac_connection_configure_network_mock.config_mgr.configure_network_settings.return_value = message + idrac_connection_configure_network_mock.config_mgr.configure_ipv4.return_value = message + idrac_connection_configure_network_mock.config_mgr.configure_static_ipv4.return_value = message + idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert msg == {'Message': 'No changes were applied', + 'Status': 'Success', + 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_network_config_failed_case01(self, idrac_connection_configure_network_mock, idrac_default_args, + idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": "Enabled", + "dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns", + "setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled", + "enable_nic": "Enabled", "nic_selection": "Dedicated", + "failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled", + "network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu", + "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled", + "dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1", + "static_dns_2": "staticdns2", "static_gateway": "staticgateway", + "static_net_mask": "staticnetmask"}) + message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}} + idrac_connection_configure_network_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_network_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + result = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert result == idrac_connection_configure_network_mock.config_mgr.is_change_applicable() + + def test_run_idrac_network_config_failed_case02(self, idrac_connection_configure_network_mock, + idrac_default_args, idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": "Enabled", + "dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns", + "setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled", + "enable_nic": "Enabled", "nic_selection": "Dedicated", + "failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled", + "network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu", + "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled", + "dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1", + "static_dns_2": "staticdns2", "static_gateway": "staticgateway", + "static_net_mask": "staticnetmask"}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "failed"} + idrac_connection_configure_network_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert msg == {'Message': 'No changes were applied', 'Status': 'failed', 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_network_config_failed_case03(self, idrac_connection_configure_network_mock, + idrac_default_args, idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "register_idrac_on_dns": "Enabled", + "dns_idrac_name": "testname", "auto_config": "Disabled", "static_dns": "staticdns", + "setup_idrac_nic_vlan": "Enabled", "vlan_id": 4, "vlan_priority": "Enabled", + "enable_nic": "Enabled", "nic_selection": "Dedicated", + "failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled", + "network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu", + "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled", + "dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1", + "static_dns_2": "staticdns2", "static_gateway": "staticgateway", + "static_net_mask": "staticnetmask"}) + message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}} + idrac_connection_configure_network_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_network_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + msg = self.module.run_idrac_network_config(idrac_connection_configure_network_mock, f_module) + assert msg == idrac_connection_configure_network_mock.config_mgr.is_change_applicable() + + @pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, HTTPError, URLError]) + def test_main_idrac_configure_network_exception_handling_case(self, exc_type, mocker, idrac_default_args, + idrac_connection_configure_network_mock, + idrac_file_manager_config_networking_mock): + idrac_default_args.update({"share_name": None}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'idrac_network.run_idrac_network_config', + side_effect=exc_type('test')) + else: + mocker.patch( + MODULE_PATH + 'idrac_network.run_idrac_network_config', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py new file mode 100644 index 00000000..d8967356 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_os_deployment +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.utils import set_module_args, exit_json, \ + fail_json, AnsibleFailJson, AnsibleExitJson +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.' + + +class TestOsDeployment(FakeAnsibleModule): + module = idrac_os_deployment + + @pytest.fixture + def idrac_connection_mock(self, mocker, idrac_mock): + idrac_connection_class_mock = mocker.patch( + MODULE_PATH + 'idrac_os_deployment.iDRACConnection') + # idrac_connection_class_mock.return_value = idrac_mock + idrac_connection_class_mock.return_value.__enter__.return_value = idrac_mock + return idrac_connection_class_mock + + @pytest.fixture + def idrac_mock(self, mocker): + sdkinfra_obj = mocker.patch(MODULE_UTIL_PATH + 'dellemc_idrac.sdkinfra') + obj = MagicMock() + sdkinfra_obj.get_driver.return_value = obj + return sdkinfra_obj + + @pytest.fixture + def omsdk_mock(self, mocker): + mocker.patch(MODULE_UTIL_PATH + 'dellemc_idrac.UserCredentials') + mocker.patch(MODULE_UTIL_PATH + 'dellemc_idrac.WsManOptions') + + @pytest.fixture + def fileonshare_mock(self, mocker): + share_mock = mocker.patch(MODULE_PATH + 'idrac_os_deployment.FileOnShare', + return_value=MagicMock()) + return share_mock + + @pytest.fixture + def minutes_to_cim_format_mock(self, mocker): + validate_device_inputs_mock = mocker.patch( + MODULE_PATH + 'idrac_os_deployment.minutes_to_cim_format') + validate_device_inputs_mock.return_value = "time" + + @pytest.mark.parametrize("expose_duration_val", ["abc", None, "", 1.5, {"abc": 1}, [110, 210, 300], [120]]) + def test_main_failure_case_01(self, expose_duration_val, idrac_default_args, module_mock): + """when invalid value for expose_durationis given """ + idrac_default_args.update({"iso_image": "iso_image"}) + idrac_default_args.update({"expose_duration": expose_duration_val}) + result = self._run_module_with_fail_json(idrac_default_args) + + def test_main_failure_case_02(self, module_mock, idrac_default_args): + """when required arg iso_image is not passed""" + idrac_default_args.update({"iso_image": "iso_image"}) + result = self._run_module_with_fail_json(idrac_default_args) + + def test_main_failure_case_03(self, module_mock, idrac_default_args): + """when invalid ansible option is given""" + idrac_default_args.update({"iso_image": "iso_image", "invalid_key": "val"}) + result = self._run_module_with_fail_json(idrac_default_args) + + def test_main_run_boot_to_network_iso_success_case01(self, idrac_connection_mock, idrac_mock, module_mock, + fileonshare_mock, omsdk_mock, minutes_to_cim_format_mock): + idrac_connection_mock.return_value.__enter__.return_value = idrac_mock + idrac_mock.config_mgr.boot_to_network_iso.return_value = {"Status": "Success"} + params = {"idrac_ip": "idrac_ip", "idrac_user": "idrac_user", "idrac_password": "idrac_password", + "ca_path": "/path/to/ca_cert.pem", + "share_name": "dummy_share_name", "share_password": "dummy_share_password", + "iso_image": "dummy_iso_image", "expose_duration": "100" + } + set_module_args(params) + result = self._run_module(params) + assert result == {'changed': True, 'boot_status': {'Status': 'Success'}} + + def test_main_run_boot_to_network_iso_success_case02(self, idrac_connection_mock, idrac_mock, module_mock, + fileonshare_mock, omsdk_mock, minutes_to_cim_format_mock): + """share_name None case""" + idrac_connection_mock.return_value.__enter__.return_value = idrac_mock + idrac_mock.config_mgr.boot_to_network_iso.return_value = {"Status": "Success"} + params = {"idrac_ip": "idrac_ip", "idrac_user": "idrac_user", "idrac_password": "idrac_password", + "ca_path": "/path/to/ca_cert.pem", + "share_name": None, "share_password": "dummy_share_password", + "iso_image": "dummy_iso_image", "expose_duration": "100" + } + set_module_args(params) + result = self._run_module(params) + assert result == {'changed': True, 'boot_status': {'Status': 'Success'}} + + def test_main_run_boot_to_network_iso_fleonshare_failure_case(self, idrac_connection_mock, idrac_mock, module_mock, + fileonshare_mock, omsdk_mock, + minutes_to_cim_format_mock): + idrac_connection_mock.return_value.__enter__.return_value = idrac_mock + fileonshare_mock.side_effect = RuntimeError("Error in Runtime") + params = {"idrac_ip": "idrac_ip", "idrac_user": "idrac_user", "idrac_password": "idrac_password", + "ca_path": "/path/to/ca_cert.pem", + "share_name": "invalid_share_name", "share_password": "dummy_share_password", + "iso_image": "dummy_iso_image", "expose_duration": "100" + } + set_module_args(params) + result = self._run_module_with_fail_json(params) + assert result == {'failed': True, 'msg': 'Error in Runtime'} + + def test_main_run_boot_to_network_iso_failure_case(self, idrac_connection_mock, idrac_mock, module_mock, + fileonshare_mock, omsdk_mock, minutes_to_cim_format_mock): + idrac_mock.config_mgr.boot_to_network_iso.return_value = {"Status": "Failure"} + params = {"idrac_ip": "idrac_ip", "idrac_user": "idrac_user", "idrac_password": "idrac_password", + "ca_path": "/path/to/ca_cert.pem", + "share_name": "dummy_share_name", "share_password": "dummy_share_password", + "iso_image": "dummy_iso_image", "expose_duration": "100" + } + set_module_args(params) + result = self._run_module_with_fail_json(params) + assert result['failed'] is True + + def test_minutes_to_cim_format_success_case_01(self, module_mock): + result = self.module.minutes_to_cim_format(module_mock, 180) + assert result == '00000000030000.000000:000' + + def test_minutes_to_cim_format_success_case_02(self, module_mock): + result = self.module.minutes_to_cim_format(module_mock, 0) + assert result == '00000000000000.000000:000' + + def test_minutes_to_cim_format_success_case_03(self, module_mock): + """when day>0 condition""" + result = self.module.minutes_to_cim_format(module_mock, 2880) + assert result == '00000002230000.000000:000' + + def test_minutes_to_cim_format_failure_case(self): + fmodule = self.get_module_mock() + with pytest.raises(Exception) as exc: + set_module_args({}) + self.module.minutes_to_cim_format(fmodule, -1) + assert exc.value.args[0] == "Invalid value for ExposeDuration." + + @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError]) + def test_main_idrac_os_deployment_exception_handling_case(self, exc_type, mocker, idrac_connection_mock, + idrac_default_args, idrac_mock, fileonshare_mock, + omsdk_mock): + idrac_default_args.update({"iso_image": "iso_image", "share_name": "share_name"}) + idrac_default_args.update({"expose_duration": 10}) + mocker.patch(MODULE_PATH + 'idrac_os_deployment.run_boot_to_network_iso', + side_effect=exc_type('test')) + result = self._run_module_with_fail_json(idrac_default_args) + assert 'msg' in result + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py new file mode 100644 index 00000000..99185a93 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.3.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_redfish_storage_controller +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.urls import urllib_error + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def redfish_str_controller_conn(mocker, redfish_response_mock): + connection_class_mock = mocker.patch( + MODULE_PATH + 'idrac_redfish_storage_controller.Redfish') + idrac_redfish_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + idrac_redfish_connection_mock_obj.invoke_request.return_value = redfish_response_mock + return idrac_redfish_connection_mock_obj + + +class TestIdracRedfishStorageController(FakeAnsibleModule): + module = idrac_redfish_storage_controller + + def test_check_id_exists(self, redfish_str_controller_conn, redfish_response_mock): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password"} + uri = "/redfish/v1/Dell/Systems/{system_id}/Storage/DellController/{controller_id}" + f_module = self.get_module_mock(params=param) + redfish_response_mock.success = True + redfish_response_mock.status_code = 200 + result = self.module.check_id_exists(f_module, redfish_str_controller_conn, "controller_id", + "RAID.Integrated.1-1", uri) + assert result is None + redfish_response_mock.success = False + redfish_response_mock.status_code = 400 + with pytest.raises(Exception) as ex: + self.module.check_id_exists(f_module, redfish_str_controller_conn, "controller_id", + "RAID.Integrated.1-1", uri) + assert ex.value.args[0] == "controller_id with id 'RAID.Integrated.1-1' not found in system" + + def test_validate_inputs(self, redfish_str_controller_conn, redfish_response_mock): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "ReKey", "mode": "LKM"} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as ex: + self.module.validate_inputs(f_module) + assert ex.value.args[0] == "All of the following: key, key_id and old_key are required for 'ReKey' operation." + param.update({"command": "AssignSpare", "target": ["Disk.Bay.0:Enclosure.Internal.0-2:RAID.Integrated.1-1", + "Disk.Bay.1:Enclosure.Internal.0-2:RAID.Integrated.1-1"]}) + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as ex: + self.module.validate_inputs(f_module) + assert ex.value.args[0] == "The Fully Qualified Device Descriptor (FQDD) of the target " \ + "physical disk must be only one." + param.update({"volume_id": ["Disk.Virtual.0:RAID.Mezzanine.1C-0", + "Disk.Virtual.0:RAID.Mezzanine.1C-1"], "target": None}) + with pytest.raises(Exception) as ex: + self.module.validate_inputs(f_module) + assert ex.value.args[0] == "The Fully Qualified Device Descriptor (FQDD) of the target " \ + "virtual drive must be only one." + param.update({"command": "EnableControllerEncryption"}) + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as ex: + self.module.validate_inputs(f_module) + assert ex.value.args[0] == "All of the following: key, key_id are " \ + "required for 'EnableControllerEncryption' operation." + param.update({"command": "ChangePDStateToOnline", + "target": ["Disk.Bay.0:Enclosure.Internal.0-2:RAID.Integrated.1-1", + "Disk.Bay.0:Enclosure.Internal.0-2:RAID.Integrated.1-1"]}) + with pytest.raises(Exception) as ex: + self.module.validate_inputs(f_module) + assert ex.value.args[0] == "The Fully Qualified Device Descriptor (FQDD) of the target " \ + "physical disk must be only one." + + def test_target_identify_pattern(self, redfish_str_controller_conn, redfish_response_mock): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "BlinkTarget", "target": "Disk.Bay.1:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1", + "volume_id": "Disk.Virtual.0:RAID.Mezzanine.1C-1"} + f_module = self.get_module_mock(params=param) + redfish_response_mock.success = True + redfish_response_mock.status_code = 200 + result = self.module.target_identify_pattern(f_module, redfish_str_controller_conn) + assert result.status_code == 200 + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.target_identify_pattern(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + + def test_ctrl_reset_config(self, redfish_str_controller_conn, redfish_response_mock, mocker): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "controller_id": "RAID.Mezzanine.1C-1", "command": "ResetConfig"} + f_module = self.get_module_mock(params=param) + mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None) + redfish_str_controller_conn.json_data = {"Members": ["virtual_drive"]} + redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"} + result = self.module.ctrl_reset_config(f_module, redfish_str_controller_conn) + assert result[2] == "JID_XXXXXXXXXXXXX" + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.ctrl_reset_config(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + redfish_response_mock.json_data = {"Members": []} + with pytest.raises(Exception) as ex: + self.module.ctrl_reset_config(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + + def test_hot_spare_config(self, redfish_str_controller_conn, redfish_response_mock): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "AssignSpare", "target": "Disk.Bay.1:Enclosure.Internal.0-2:RAID.Integrated.1-1"} + f_module = self.get_module_mock(params=param) + redfish_response_mock.json_data = {"HotspareType": "None"} + redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"} + result = self.module.hot_spare_config(f_module, redfish_str_controller_conn) + assert result[2] == "JID_XXXXXXXXXXXXX" + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.hot_spare_config(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + redfish_response_mock.json_data = {"HotspareType": "Global"} + with pytest.raises(Exception) as ex: + self.module.hot_spare_config(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + + def test_ctrl_key(self, redfish_str_controller_conn, redfish_response_mock, mocker): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "SetControllerKey", "controller_id": "RAID.Integrated.1-1", "mode": "LKM"} + mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None) + f_module = self.get_module_mock(params=param) + redfish_response_mock.json_data = {"SecurityStatus": "EncryptionNotCapable", "KeyID": None} + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "The storage controller 'RAID.Integrated.1-1' does not support encryption." + f_module.check_mode = True + redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None} + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": "Key@123"} + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + param.update({"command": "ReKey"}) + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + param.update({"command": "RemoveControllerKey"}) + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None} + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + param.update({"command": "EnableControllerEncryption"}) + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + redfish_response_mock.json_data = {"SecurityStatus": "SecurityKeyAssigned", "KeyID": None} + with pytest.raises(Exception) as ex: + self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + f_module.check_mode = False + redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None} + redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"} + result = self.module.ctrl_key(f_module, redfish_str_controller_conn) + assert result[2] == "JID_XXXXXXXXXXXXX" + + def test_convert_raid_status(self, redfish_str_controller_conn, redfish_response_mock): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "ConvertToRAID", "target": ["Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1", + "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"]} + f_module = self.get_module_mock(params=param) + redfish_response_mock.json_data = {"Oem": {"Dell": {"DellPhysicalDisk": {"RaidStatus": "NonRAID"}}}} + redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"} + result = self.module.convert_raid_status(f_module, redfish_str_controller_conn) + assert result[2] == "JID_XXXXXXXXXXXXX" + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.convert_raid_status(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + f_module.check_mode = False + redfish_response_mock.json_data = {"Oem": {"Dell": {"DellPhysicalDisk": {"RaidStatus": "Ready"}}}} + with pytest.raises(Exception) as ex: + self.module.convert_raid_status(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + + def test_change_pd_status(self, redfish_str_controller_conn, redfish_response_mock): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "ChangePDStateToOnline", + "target": ["Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1", + "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"]} + f_module = self.get_module_mock(params=param) + redfish_response_mock.json_data = {"Oem": {"Dell": {"DellPhysicalDisk": {"RaidStatus": "NonRAID"}}}} + redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"} + result = self.module.change_pd_status(f_module, redfish_str_controller_conn) + assert result[2] == "JID_XXXXXXXXXXXXX" + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.change_pd_status(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + f_module.check_mode = False + redfish_response_mock.json_data = {"Oem": {"Dell": {"DellPhysicalDisk": {"RaidStatus": "Online"}}}} + with pytest.raises(Exception) as ex: + self.module.change_pd_status(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + + def test_lock_virtual_disk(self, redfish_str_controller_conn, redfish_response_mock, mocker): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "LockVirtualDisk", + "volume_id": "Disk.Virtual.0:RAID.SL.3-1"} + f_module = self.get_module_mock(params=param) + mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None) + redfish_response_mock.json_data = {"Oem": {"Dell": {"DellVolume": {"LockStatus": "Unlocked"}}}} + redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"} + result = self.module.lock_virtual_disk(f_module, redfish_str_controller_conn) + assert result[2] == "JID_XXXXXXXXXXXXX" + f_module.check_mode = True + with pytest.raises(Exception) as ex: + self.module.lock_virtual_disk(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "Changes found to be applied." + f_module.check_mode = False + redfish_response_mock.json_data = {"Oem": {"Dell": {"DellVolume": {"LockStatus": "Locked"}}}} + with pytest.raises(Exception) as ex: + self.module.lock_virtual_disk(f_module, redfish_str_controller_conn) + assert ex.value.args[0] == "No changes found to be applied." + + @pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError]) + def test_main_error(self, redfish_str_controller_conn, redfish_response_mock, mocker, + exc_type, redfish_default_args): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "ResetConfig", "controller_id": "RAID.Integrated.1-1"} + redfish_default_args.update(param) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.validate_inputs', return_value=None) + redfish_response_mock.success = False + redfish_response_mock.status_code = 400 + json_str = to_text(json.dumps({"data": "out"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.ctrl_reset_config', + side_effect=exc_type("url open error")) + result = self._run_module(redfish_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.ctrl_reset_config', + side_effect=exc_type('exception message')) + result = self._run_module_with_fail_json(redfish_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.ctrl_reset_config', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(redfish_default_args) + assert result['failed'] is True + assert 'msg' in result + + def test_main_success(self, redfish_str_controller_conn, redfish_response_mock, redfish_default_args, mocker): + param = {"baseuri": "192.168.0.1", "username": "username", "password": "password", + "command": "SetControllerKey", "key": "Key@123", "key_id": "keyid@123", + "controller_id": "RAID.Integrated.1-1", + "target": ["Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"]} + redfish_default_args.update(param) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.validate_inputs', return_value=None) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.ctrl_key', + return_value=("", "", "JID_XXXXXXXXXXXXX")) + result = self._run_module(redfish_default_args) + assert result["task"]["id"] == "JID_XXXXXXXXXXXXX" + param.update({"command": "AssignSpare"}) + redfish_default_args.update(param) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.hot_spare_config', + return_value=("", "", "JID_XXXXXXXXXXXXX")) + result = self._run_module(redfish_default_args) + assert result["task"]["id"] == "JID_XXXXXXXXXXXXX" + param.update({"command": "BlinkTarget"}) + redfish_default_args.update(param) + redfish_response_mock.status_code = 200 + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.target_identify_pattern', + return_value=redfish_response_mock) + result = self._run_module(redfish_default_args) + assert result["msg"] == "Successfully performed the 'BlinkTarget' operation." + param.update({"command": "ConvertToRAID"}) + redfish_default_args.update(param) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.convert_raid_status', + return_value=("", "", "JID_XXXXXXXXXXXXX")) + result = self._run_module(redfish_default_args) + assert result["task"]["id"] == "JID_XXXXXXXXXXXXX" + param.update({"command": "ChangePDStateToOnline", "job_wait": True}) + redfish_default_args.update(param) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.change_pd_status', + return_value=("", "", "JID_XXXXXXXXXXXXX")) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.wait_for_job_completion', + return_value=(redfish_response_mock, "")) + mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.strip_substr_dict', + return_value={"JobState": "Failed"}) + result = self._run_module(redfish_default_args) + assert result["task"]["id"] == "JID_XXXXXXXXXXXXX" diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py new file mode 100644 index 00000000..3f4ca497 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_reset +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from mock import MagicMock, patch, Mock +from io import StringIO +from ansible.module_utils._text import to_text + +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def idrac_reset_connection_mock(mocker, idrac_mock): + idrac_connection_class_mock = mocker.patch(MODULE_PATH + 'idrac_reset.iDRACConnection') + idrac_connection_class_mock.return_value.__enter__.return_value = idrac_mock + return idrac_mock + + +class TestReset(FakeAnsibleModule): + module = idrac_reset + + @pytest.fixture + def idrac_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).reset_idrac = Mock(return_value="idracreset") + return idrac_obj + + @pytest.fixture + def idrac_config_mngr_reset_mock(self, mocker): + try: + config_manager_obj = mocker.patch(MODULE_PATH + 'idrac_reset.config_mgr') + except AttributeError: + config_manager_obj = MagicMock() + obj = MagicMock() + config_manager_obj.config_mgr.return_value = obj + config_manager_obj.config_mgr.reset_idrac().return_value = obj + return config_manager_obj + + def test_main_idrac_reset_success_case01(self, idrac_reset_connection_mock, idrac_default_args, mocker): + mocker.patch(MODULE_PATH + "idrac_reset.run_idrac_reset", + return_value=({"Status": "Success"}, False)) + idrac_reset_connection_mock.config_mgr.reset_idrac.return_value = {"Status": "Success"} + idrac_reset_connection_mock.config_mgr.reset_idrac.return_value = "Success" + result = self._run_module(idrac_default_args) + assert result == {'msg': 'Successfully performed iDRAC reset.', + 'reset_status': ({'Status': 'Success'}, False), 'changed': False} + + def test_run_idrac_reset_success_case01(self, idrac_reset_connection_mock, idrac_default_args): + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.run_idrac_reset(idrac_reset_connection_mock, f_module) + assert result == idrac_reset_connection_mock.config_mgr.reset_idrac() + + def test_run_idrac_reset_status_success_case02(self, idrac_reset_connection_mock, idrac_default_args): + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + result = self.module.run_idrac_reset(idrac_reset_connection_mock, f_module) + assert result == {'Message': 'Changes found to commit!', 'Status': 'Success', 'changes_applicable': True} + + @pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError, + ConnectionError, HTTPError]) + def test_main_exception_handling_case(self, exc_type, mocker, idrac_reset_connection_mock, idrac_default_args): + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'idrac_reset.run_idrac_reset', side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + 'idrac_reset.run_idrac_reset', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py new file mode 100644 index 00000000..16d5b030 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.4.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_server_config_profile +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants,\ + AnsibleExitJson +from mock import MagicMock, patch, Mock, mock_open +from pytest import importorskip +from ansible.module_utils.six.moves.urllib.parse import urlparse, ParseResult +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + + +class TestServerConfigProfile(FakeAnsibleModule): + module = idrac_server_config_profile + + @pytest.fixture + def idrac_server_configure_profile_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + return idrac_obj + + @pytest.fixture + def idrac_file_manager_server_config_profile_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + MODULE_PATH + 'idrac_server_config_profile.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + @pytest.fixture + def idrac_scp_redfish_mock(self, mocker, idrac_server_configure_profile_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + 'idrac_server_config_profile.iDRACRedfishAPI', + return_value=idrac_server_configure_profile_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_server_configure_profile_mock + return idrac_server_configure_profile_mock + + def test_run_export_import_http(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename", + "share_password": "sharepswd", "command": "export", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", "export_use": "Default"}) + f_module = self.get_module_mock(params=idrac_default_args) + export_response = {"msg": "Successfully exported the Server Configuration Profile.", + "scp_status": {"Name": "Export: Server Configuration Profile", "PercentComplete": 100, + "TaskState": "Completed", "TaskStatus": "OK", "Id": "JID_236654661194"}} + mocker.patch(MODULE_PATH + "idrac_server_config_profile.urlparse", + return_value=ParseResult(scheme='http', netloc='192.168.0.1', + path='/share/', + params='', query='', fragment='')) + mocker.patch(MODULE_PATH + "idrac_server_config_profile.response_format_change", + return_value=export_response) + result = self.module.run_export_import_scp_http(idrac_scp_redfish_mock, f_module) + assert result["msg"] == "Successfully exported the Server Configuration Profile." + idrac_default_args.update({"command": "import"}) + f_module = self.get_module_mock(params=idrac_default_args) + import_response = {"msg": "Successfully imported the Server Configuration Profile.", + "scp_status": {"Name": "Import: Server Configuration Profile", "PercentComplete": 100, + "TaskState": "Completed", "TaskStatus": "OK", "Id": "JID_236654661194"}} + mocker.patch(MODULE_PATH + "idrac_server_config_profile.response_format_change", + return_value=import_response) + result = self.module.run_export_import_scp_http(idrac_scp_redfish_mock, f_module) + assert result["msg"] == "Successfully imported the Server Configuration Profile." + + def test_http_share_msg_main(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "http://192.168.0.1:/share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": False, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False}) + share_return = {"Oem": {"Dell": {"MessageId": "SYS069"}}} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http', + return_value=share_return) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully triggered the job to import the Server Configuration Profile." + share_return = {"Oem": {"Dell": {"MessageId": "SYS053"}}} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http', + return_value=share_return) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully triggered the job to import the Server Configuration Profile." + idrac_default_args.update({"command": "export"}) + share_return = {"Oem": {"Dell": {"MessageId": "SYS043"}}} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http', + return_value=share_return) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully triggered the job to export the Server Configuration Profile." + + def test_export_scp_redfish(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": False, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False}) + f_module = self.get_module_mock(params=idrac_default_args) + share_return = {"Oem": {"Dell": {"MessageId": "SYS069"}}} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http', + return_value=share_return) + f_module.check_mode = False + result = self.module.export_scp_redfish(f_module, idrac_scp_redfish_mock) + assert result["file"] == "192.168.0.1:/share/scp_file.xml" + idrac_default_args.update({"share_name": "\\\\100.96.16.123\\cifsshare"}) + result = self.module.export_scp_redfish(f_module, idrac_scp_redfish_mock) + assert result["file"] == "\\\\100.96.16.123\\cifsshare\\scp_file.xml" + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change', + return_value={"TaskStatus": "Critical"}) + with pytest.raises(Exception) as ex: + self.module.export_scp_redfish(f_module, idrac_scp_redfish_mock) + assert ex.value.args[0] == "Failed to import scp." + + def test_response_format_change(self, idrac_scp_redfish_mock, idrac_default_args): + idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False}) + f_module = self.get_module_mock(params=idrac_default_args) + idrac_scp_redfish_mock.json_data = {"Oem": {"Dell": {"key": "value"}}} + result = self.module.response_format_change(idrac_scp_redfish_mock, f_module, "export_scp.yml") + assert result["key"] == "value" + idrac_default_args.update({"command": "export"}) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.response_format_change(idrac_scp_redfish_mock, f_module, "export_scp.yml") + assert result["key"] == "value" + + def test_preview_scp_redfish(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "192.168.0.1:/nfsshare", "share_user": "sharename", + "share_password": "sharepswd", "command": "preview", "job_wait": True, + "scp_components": "IDRAC", "scp_file": "scp_file.xml", + "end_host_power_state": "On", "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + share = {"share_ip": "192.168.0.1", "share_user": "sharename", "share_password": "password", + "job_wait": True} + f_module.check_mode = False + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_share_details', + return_value=(share, "scp_file.xml")) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change', + return_value={"Status": "Success"}) + result = self.module.preview_scp_redfish(f_module, idrac_scp_redfish_mock, True, import_job_wait=False) + assert result["Status"] == "Success" + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change', + return_value={"TaskStatus": "Critical"}) + with pytest.raises(Exception) as ex: + self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True) + assert ex.value.args[0] == "Failed to preview scp." + idrac_default_args.update({"share_name": "192.168.0.1:/nfsshare", "share_user": "sharename", + "share_password": "sharepswd", "command": "preview", "job_wait": True, + "scp_components": "IDRAC", "scp_file": "scp_file.xml", + "end_host_power_state": "On", "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + share = {"share_ip": "192.168.0.1", "share_user": "sharename", "share_password": "password", + "job_wait": True, "share_type": "LOCAL", "share_name": "share_name"} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_share_details', + return_value=(share, "scp_file.xml")) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.exists', + return_value=False) + with pytest.raises(Exception) as ex: + self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, False) + assert ex.value.args[0] == "Invalid file path provided." + + def test_import_scp_redfish(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = True + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.preview_scp_redfish', + return_value={"MessageId": "SYS081"}) + with pytest.raises(Exception) as ex: + self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True) + assert ex.value.args[0] == "Changes found to be applied." + idrac_default_args.update({"share_name": "http://192.168.0.1/http-share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change', + return_value={"Status": "Success"}) + result = self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True) + assert result["Status"] == "Success" + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change', + return_value={"TaskStatus": "Critical"}) + with pytest.raises(Exception) as ex: + self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True) + assert ex.value.args[0] == "Failed to import scp." + idrac_default_args.update({"share_name": "local-share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + share = {"share_ip": "192.168.0.1", "share_user": "sharename", "share_password": "password", + "job_wait": True, "share_type": "LOCAL", "share_name": "share_name"} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_share_details', + return_value=(share, "scp_file.xml")) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.exists', + return_value=False) + with pytest.raises(Exception) as ex: + self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, False) + assert ex.value.args[0] == "Invalid file path provided." + + def test_get_scp_file_format(self, idrac_scp_redfish_mock, idrac_default_args): + idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.get_scp_file_format(f_module) + assert result == "scp_file.xml" + idrac_default_args.update({"scp_file": None}) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module.get_scp_file_format(f_module) + assert result.startswith("idrac_ip_") is True + + def test_main_success_case(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "http://192.168.0.1/http-share", "share_user": "sharename", + "share_password": "sharepswd", "command": "import", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http', + return_value={"MessageId": "SYS069"}) + result = self._run_module(idrac_default_args) + assert result["scp_status"] == {'MessageId': 'SYS069'} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http', + return_value={"MessageId": "SYS053"}) + result = self._run_module(idrac_default_args) + assert result["scp_status"] == {'MessageId': 'SYS053'} + idrac_default_args.update({"share_name": "192.168.0.1:/nfsshare"}) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.import_scp_redfish', + return_value={"Message": "No changes were applied since the current component configuration " + "matched the requested configuration"}) + result = self._run_module(idrac_default_args) + assert result["changed"] is False + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.import_scp_redfish', + return_value={"MessageId": "SYS043"}) + result = self._run_module(idrac_default_args) + assert result["scp_status"] == {'MessageId': 'SYS043'} + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.import_scp_redfish', + return_value={"MessageId": "SYS069"}) + result = self._run_module(idrac_default_args) + assert result["scp_status"] == {'MessageId': 'SYS069'} + idrac_default_args.update({"command": "export"}) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.export_scp_redfish', + return_value={"Status": "Success"}) + result = self._run_module(idrac_default_args) + assert result["scp_status"] == {'Status': 'Success'} + idrac_default_args.update({"command": "preview"}) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.preview_scp_redfish', + return_value={"MessageId": "SYS081"}) + result = self._run_module(idrac_default_args) + assert result["scp_status"] == {"MessageId": "SYS081"} + + def test_get_scp_share_details(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "/local-share", "share_user": "sharename", + "share_password": "sharepswd", "command": "export", + "job_wait": True, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_file_format', + return_value="export_scp.xml") + result = self.module.get_scp_share_details(f_module) + assert result[1] == "export_scp.xml" + + def test_wait_for_response(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "/local-share", "share_user": "sharename", + "share_password": "sharepswd", "command": "export", + "job_wait": False, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "XML", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + idrac_scp_redfish_mock.headers = {"Location": "/redfish/v1/TaskService/Tasks/JID_123456789"} + resp_return_value = {"return_data": b"" + b"" + b"0" + b" ", + "return_job": {"JobState": "Completed", "JobType": "ExportConfiguration", + "PercentComplete": 100, "Status": "Success"}} + idrac_scp_redfish_mock.wait_for_job_complete.return_value = resp_return_value["return_data"] + idrac_scp_redfish_mock.job_resp = resp_return_value["return_job"] + share = {"share_name": "/local_share", "file_name": "export_file.xml"} + if sys.version_info.major == 3: + builtin_module_name = 'builtins' + else: + builtin_module_name = '__builtin__' + with patch("{0}.open".format(builtin_module_name), mock_open(read_data=resp_return_value["return_data"])) as mock_file: + result = self.module.wait_for_response(idrac_scp_redfish_mock, f_module, share, idrac_scp_redfish_mock) + assert result.job_resp == resp_return_value["return_job"] + + def test_wait_for_response_json(self, idrac_scp_redfish_mock, idrac_default_args, mocker): + idrac_default_args.update({"share_name": "/local-share", "share_user": "sharename", + "share_password": "sharepswd", "command": "export", + "job_wait": False, "scp_components": "IDRAC", + "scp_file": "scp_file.xml", "end_host_power_state": "On", + "shutdown_type": "Graceful", "export_format": "JSON", + "export_use": "Default", "validate_certs": False, "idrac_port": 443}) + f_module = self.get_module_mock(params=idrac_default_args) + resp_return_value = {"return_data": { + "SystemConfiguration": {"Components": [ + {"FQDD": "SupportAssist.Embedded.1", + "Attributes": [{"Name": "SupportAssist.1#SupportAssistEULAAccepted"}] + }]} + }, + "return_job": {"JobState": "Completed", "JobType": "ExportConfiguration", + "PercentComplete": 100, "Status": "Success"}} + mock_scp_json_data = idrac_scp_redfish_mock + mock_scp_json_data.json_data = resp_return_value["return_data"] + idrac_scp_redfish_mock.wait_for_job_complete.return_value = mock_scp_json_data + idrac_scp_redfish_mock.job_resp = resp_return_value["return_job"] + share = {"share_name": "/local_share", "file_name": "export_file.xml"} + if sys.version_info.major == 3: + builtin_module_name = 'builtins' + else: + builtin_module_name = '__builtin__' + with patch("{0}.open".format(builtin_module_name), mock_open(read_data=str(resp_return_value["return_data"]))) as mock_file: + result = self.module.wait_for_response(idrac_scp_redfish_mock, f_module, share, idrac_scp_redfish_mock) + assert result.job_resp == resp_return_value["return_job"] diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py new file mode 100644 index 00000000..ae89c280 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_syslog +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from io import StringIO +from ansible.module_utils._text import to_text +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestSetupSyslog(FakeAnsibleModule): + module = idrac_syslog + + @pytest.fixture + def idrac_setup_syslog_mock(self): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + return idrac_obj + + @pytest.fixture + def idrac_file_manager_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + MODULE_PATH + 'idrac_syslog.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + @pytest.fixture + def idrac_connection_setup_syslog_mock(self, mocker, idrac_setup_syslog_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_syslog.iDRACConnection', return_value=idrac_setup_syslog_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_setup_syslog_mock + return idrac_setup_syslog_mock + + def test_main_setup_syslog_success_case01(self, idrac_connection_setup_syslog_mock, idrac_default_args, mocker, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", 'share_password': None, "syslog": "Enabled", + 'share_mnt': None, 'share_user': None}) + message = {'changed': False, 'msg': {'Status': "Success", "message": "No changes found to commit!"}} + mocker.patch(MODULE_PATH + + 'idrac_syslog.run_setup_idrac_syslog', + return_value=message) + result = self._run_module(idrac_default_args) + assert result == {'msg': 'Successfully fetch the syslogs.', + 'syslog_status': { + 'changed': False, + 'msg': {'Status': 'Success', 'message': 'No changes found to commit!'}}, + 'changed': False} + + def test_run_setup_idrac_syslog_success_case01(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "syslog": "Enabled", "share_password": "sharepassword"}) + message = {"changes_applicable": True, "message": "changes are applicable"} + idrac_connection_setup_syslog_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + msg = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert msg == {'changes_applicable': True, 'message': 'changes are applicable'} + + def test_run_setup_idrac_syslog_success_case02(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "syslog": "Enabled", "share_password": "sharepassword"}) + message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True, + "Status": "Success"} + idrac_connection_setup_syslog_mock.config_mgr.enable_syslog.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert msg == {'Status': 'Success', + 'changed': True, + 'changes_applicable': True, + 'message': 'changes found to commit!'} + + def test_run_setup_idrac_syslog_success_case03(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "syslog": "Enabled", "share_password": "sharepassword"}) + message = {"changes_applicable": True, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_setup_syslog_mock.config_mgr.enable_syslog.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert msg == {'Message': 'No changes found to commit!', + 'Status': 'Success', + 'changed': False, + 'changes_applicable': True} + + def test_run_setup_idrac_syslog_success_case04(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "syslog": "Disabled", "share_password": "sharepassword"}) + message = {"changes_applicable": True, "Message": "No Changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_setup_syslog_mock.config_mgr.disable_syslog.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert msg == {'Message': 'No Changes found to commit!', 'Status': 'Success', + 'changed': False, 'changes_applicable': True} + + def test_run_setup_syslog_disable_case(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "share_password": "sharepassword", "syslog": 'Disabled'}) + message = "Disabled" + idrac_connection_setup_syslog_mock.config_mgr.disable_syslog.return_value = message + idrac_connection_setup_syslog_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + msg = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert msg == 'Disabled' + + def test_run_setup_syslog_enable_case(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "share_password": "sharepassword", "syslog": 'Enabled'}) + message = "Enabled" + idrac_connection_setup_syslog_mock.config_mgr.enable_syslog.return_value = message + idrac_connection_setup_syslog_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + msg = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert msg == "Enabled" + + def test_run_setup_idrac_syslog_failed_case01(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser", + "syslog": "Enable", "share_password": "sharepassword"}) + message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}} + idrac_connection_setup_syslog_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_setup_syslog_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + result = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert result == idrac_connection_setup_syslog_mock.config_mgr.is_change_applicable() + + def test_run_setup_idrac_syslog_failed_case03(self, idrac_connection_setup_syslog_mock, idrac_default_args, + idrac_file_manager_mock): + idrac_default_args.update( + {"share_name": "dummy_share_name", "share_mnt": "mountname", "share_user": "shareuser", + "syslog": "Disabled", "share_password": "sharepassword"}) + message = {"message": "No changes were applied", "changed": False, + "Status": "failed"} + idrac_connection_setup_syslog_mock.config_mgr.enable_syslog.return_value = message + idrac_connection_setup_syslog_mock.config_mgr.disable_syslog.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_setup_idrac_syslog(idrac_connection_setup_syslog_mock, f_module) + assert msg == {'Status': 'failed', 'changed': False, 'message': 'No changes were applied'} + + @pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError, + ConnectionError, HTTPError]) + def test_main_setup_syslog_exception_handling_case(self, exc_type, mocker, idrac_connection_setup_syslog_mock, + idrac_default_args, idrac_file_manager_mock): + idrac_default_args.update({"share_name": "sharename", 'share_password': None, + "syslog": "Enabled", 'share_mnt': None, 'share_user': None}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + + 'idrac_syslog.run_setup_idrac_syslog', + side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + + 'idrac_syslog.run_setup_idrac_syslog', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py new file mode 100644 index 00000000..dbbb130e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_system_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, Mock +from pytest import importorskip +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from io import StringIO +from ansible.module_utils._text import to_text + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestSystemInventory(FakeAnsibleModule): + module = idrac_system_info + + @pytest.fixture + def idrac_system_info_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.get_entityjson = idrac_obj + type(idrac_obj).get_json_device = Mock(return_value="msg") + return idrac_obj + + @pytest.fixture + def idrac_system_info_connection_mock(self, mocker, idrac_system_info_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_system_info.iDRACConnection', + return_value=idrac_system_info_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_system_info_mock + return idrac_system_info_mock + + def test_idrac_system_info_main_success_case01(self, idrac_system_info_mock, idrac_system_info_connection_mock, + idrac_default_args): + idrac_system_info_mock.get_entityjson.return_value = None + idrac_system_info_connection_mock.get_json_device.return_value = {"status": "Success"} + result = self._run_module(idrac_default_args) + assert result == {"system_info": {"status": "Success"}, + "msg": "Successfully fetched the system inventory details.", + "changed": False} + + @pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError, + ConnectionError, HTTPError]) + def test_idrac_system_info_main_exception_handling_case(self, exc_type, idrac_system_info_connection_mock, + idrac_default_args): + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + idrac_system_info_connection_mock.get_json_device.side_effect = exc_type('test') + else: + idrac_system_info_connection_mock.get_json_device.side_effect = exc_type('http://testhost.com', 400, + 'http error message', + { + "accept-type": "application/json"}, + StringIO(json_str)) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py new file mode 100644 index 00000000..ee1d9d2e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 6.0.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_timezone_ntp +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock, PropertyMock +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from pytest import importorskip + +importorskip("omsdk.sdkfile") +importorskip("omsdk.sdkcreds") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestConfigTimezone(FakeAnsibleModule): + module = idrac_timezone_ntp + + @pytest.fixture + def idrac_configure_timezone_mock(self, mocker): + omsdk_mock = MagicMock() + idrac_obj = MagicMock() + omsdk_mock.file_share_manager = idrac_obj + omsdk_mock.config_mgr = idrac_obj + type(idrac_obj).create_share_obj = Mock(return_value="servicesstatus") + type(idrac_obj).set_liason_share = Mock(return_value="servicestatus") + return idrac_obj + + @pytest.fixture + def idrac_file_manager_config_timesone_mock(self, mocker): + try: + file_manager_obj = mocker.patch( + MODULE_PATH + 'idrac_timezone_ntp.file_share_manager') + except AttributeError: + file_manager_obj = MagicMock() + obj = MagicMock() + file_manager_obj.create_share_obj.return_value = obj + return file_manager_obj + + @pytest.fixture + def idrac_connection_configure_timezone_mock(self, mocker, idrac_configure_timezone_mock): + idrac_conn_class_mock = mocker.patch(MODULE_PATH + + 'idrac_timezone_ntp.iDRACConnection', + return_value=idrac_configure_timezone_mock) + idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_timezone_mock + return idrac_configure_timezone_mock + + def test_main_idrac_timezone_config_success_Case(self, idrac_connection_configure_timezone_mock, idrac_default_args, + mocker, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None}) + message = {'changed': False, 'msg': {'Status': "Success", "Message": "No changes found to commit!"}} + mocker.patch(MODULE_PATH + + 'idrac_timezone_ntp.run_idrac_timezone_config', return_value=(message, False)) + result = self._run_module(idrac_default_args) + assert result == {'msg': 'Successfully configured the iDRAC time settings.', + 'timezone_ntp_status': ({'changed': False, + 'msg': {'Status': 'Success', + 'Message': 'No changes found to commit!'}}, False), + 'changed': False} + status_msg = {"Status": "Success", "Message": "No changes found to commit!", + "msg": {"Status": "Success", "Message": "No changes found to commit!"}} + mocker.patch(MODULE_PATH + + 'idrac_timezone_ntp.run_idrac_timezone_config', return_value=status_msg) + result = self._run_module(idrac_default_args) + assert result["msg"] == "Successfully configured the iDRAC time settings." + + def test_run_idrac_timezone_config_success_case01(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": "setuptimezone", + "enable_ntp": "Enabled", "ntp_server_1": "ntp server1", + "ntp_server_2": "ntp server2", "ntp_server_3": "ntp server3"}) + message = {"changes_applicable": True, "message": "changes are applicable"} + idrac_connection_configure_timezone_mock.config_mgr.is_change_applicable.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + msg = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert msg == {'changes_applicable': True, 'message': 'changes are applicable'} + + def test_run_idrac_timezone_config_success_case02(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": "setuptimezone", + "enable_ntp": "Enabled", "ntp_server_1": "ntp server1", + "ntp_server_2": "ntp server2", "ntp_server_3": "ntp server3"}) + message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True, + "Status": "Success"} + idrac_connection_configure_timezone_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert msg == {'Status': 'Success', + 'changed': True, + 'changes_applicable': True, + 'message': 'changes found to commit!'} + + def test_run_idrac_timezone_config_success_case03(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": "setuptimezone", + "enable_ntp": "Enabled", "ntp_server_1": "ntp server1", + "ntp_server_2": "ntp server2", "ntp_server_3": "ntp server3"}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_timezone_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert msg == {'Message': 'No changes found to commit!', + 'Status': 'Success', + 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_timezone_config_success_case04(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": "setuptimezone", + "enable_ntp": "Enabled", "ntp_server_1": "ntp server1", + "ntp_server_2": "ntp server2", "ntp_server_3": "ntp server3"}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_timezone_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert msg == {'Message': 'No changes found to commit!', + 'Status': 'Success', + 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_timezone_config_success_case05(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": None, + "enable_ntp": None, "ntp_server_1": None, "ntp_server_2": None, + "ntp_server_3": None}) + message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False, + "Status": "Success"} + idrac_connection_configure_timezone_mock.config_mgr.configure_timezone.return_value = message + idrac_connection_configure_timezone_mock.config_mgr.configure_ntp.return_value = message + idrac_connection_configure_timezone_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert msg == {'Message': 'No changes found to commit!', + 'Status': 'Success', + 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_timezone_config_failed_case01(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": "setuptimezone", + "enable_ntp": "Enabled", "ntp_server_1": "ntp server1", + "ntp_server_2": "ntp server2", "ntp_server_3": "ntp server3"}) + message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}} + idrac_connection_configure_timezone_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_timezone_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + result = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert result == idrac_connection_configure_timezone_mock.config_mgr.is_change_applicable() + + def test_run_idrac_timezone_config_failed_case02(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": "setuptimezone", + "enable_ntp": "Enabled", "ntp_server_1": "ntp server1", + "ntp_server_2": "ntp server2", "ntp_server_3": "ntp server3"}) + message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False, + "Status": "failed"} + idrac_connection_configure_timezone_mock.config_mgr.apply_changes.return_value = message + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = False + msg = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert msg == {'Message': 'No changes were applied', + 'Status': 'failed', + 'changed': False, + 'changes_applicable': False} + + def test_run_idrac_timezone_config_failed_case03(self, idrac_connection_configure_timezone_mock, + idrac_default_args, idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None, + "share_password": None, "setup_idrac_timezone": "setuptimezone", + "enable_ntp": "Enabled", "ntp_server_1": "ntp server1", + "ntp_server_2": "ntp server2", "ntp_server_3": "ntp server3"}) + message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}} + idrac_connection_configure_timezone_mock.file_share_manager.create_share_obj.return_value = "mnt/iso" + idrac_connection_configure_timezone_mock.config_mgr.set_liason_share.return_value = message + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + msg = self.module.run_idrac_timezone_config(idrac_connection_configure_timezone_mock, f_module) + assert msg == idrac_connection_configure_timezone_mock.config_mgr.is_change_applicable() + + @pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError, + ImportError, ValueError, TypeError, HTTPError, URLError]) + def test_main_idrac_configure_timezone_exception_handling_case(self, exc_type, mocker, idrac_default_args, + idrac_connection_configure_timezone_mock, + idrac_file_manager_config_timesone_mock): + idrac_default_args.update({"share_name": None}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'idrac_timezone_ntp.run_idrac_timezone_config', + side_effect=exc_type('test')) + else: + mocker.patch( + MODULE_PATH + 'idrac_timezone_ntp.run_idrac_timezone_config', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py new file mode 100644 index 00000000..2fa528d0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_user +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from ansible.module_utils._text import to_text +from io import StringIO + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestIDRACUser(FakeAnsibleModule): + module = idrac_user + + @pytest.fixture + def idrac_user_mock(self): + idrac_obj = MagicMock() + return idrac_obj + + @pytest.fixture + def idrac_connection_user_mock(self, mocker, idrac_user_mock): + idrac_conn_mock = mocker.patch(MODULE_PATH + 'idrac_user.iDRACRedfishAPI', + return_value=idrac_user_mock) + idrac_conn_mock.return_value.__enter__.return_value = idrac_user_mock + return idrac_conn_mock + + def test_get_payload(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args) + resp = self.module.get_payload(f_module, 1, action="update") + assert resp["Users.1.UserName"] == idrac_default_args["new_user_name"] + + def test_convert_payload_xml(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + payload = {"Users.1.UserName": idrac_default_args["user_name"], + "Users.1.Password": idrac_default_args["user_password"], + "Users.1.Enable": idrac_default_args["enable"], + "Users.1.Privilege": idrac_default_args["privilege"], + "Users.1.IpmiLanPrivilege": idrac_default_args["ipmi_lan_privilege"], + "Users.1.IpmiSerialPrivilege": idrac_default_args["ipmi_serial_privilege"], + "Users.1.SolEnable": idrac_default_args["sol_enable"], + "Users.1.ProtocolEnable": idrac_default_args["protocol_enable"], + "Users.1.AuthenticationProtocol": idrac_default_args["authentication_protocol"], + "Users.1.PrivacyProtocol": idrac_default_args["privacy_protocol"]} + xml_payload, json_payload = self.module.convert_payload_xml(payload) + assert json_payload["Users.1#SolEnable"] is True + + def test_remove_user_account_check_mode_1(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "absent", "user_name": "user_name", "new_user_name": None, + "user_password": None, "privilege": None, "ipmi_lan_privilege": None, + "ipmi_serial_privilege": None, "enable": False, "sol_enable": False, + "protocol_enable": False, "authentication_protocol": "SHA", + "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + slot_id = 1 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + with pytest.raises(Exception) as exc: + self.module.remove_user_account(f_module, idrac_connection_user_mock, slot_uri, slot_id) + assert exc.value.args[0] == "Changes found to commit!" + + def test_remove_user_account_check_mode_2(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "absent", "user_name": "user_name", "new_user_name": None, + "user_password": None, "privilege": None, "ipmi_lan_privilege": None, + "ipmi_serial_privilege": None, "enable": False, "sol_enable": False, + "protocol_enable": False, "authentication_protocol": "SHA", + "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + with pytest.raises(Exception) as exc: + self.module.remove_user_account(f_module, idrac_connection_user_mock, None, None) + assert exc.value.args[0] == "No changes found to commit!" + + def test_remove_user_account_check_mode_3(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "absent", "user_name": "user_name", "new_user_name": None, + "user_password": None, "privilege": None, "ipmi_lan_privilege": None, + "ipmi_serial_privilege": None, "enable": False, "sol_enable": False, + "protocol_enable": False, "authentication_protocol": "SHA", + "privacy_protocol": "AES"}) + idrac_connection_user_mock.remove_user_account.return_value = {"success": True} + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + slot_id = 1 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + mocker.patch(MODULE_PATH + 'idrac_user.time.sleep', return_value=None) + self.module.remove_user_account(f_module, idrac_connection_user_mock, slot_uri, slot_id) + + def test_remove_user_account_check_mode_4(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "absent", "user_name": "user_name", "new_user_name": None, + "user_password": None, "privilege": None, "ipmi_lan_privilege": None, + "ipmi_serial_privilege": None, "enable": False, "sol_enable": False, + "protocol_enable": False, "authentication_protocol": "SHA", + "privacy_protocol": "AES"}) + idrac_connection_user_mock.remove_user_account.return_value = {"success": True} + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + with pytest.raises(Exception) as exc: + self.module.remove_user_account(f_module, idrac_connection_user_mock, None, None) + assert exc.value.args[0] == 'The user account is absent.' + + def test_get_user_account_1(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.export_scp", + return_value=MagicMock()) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.get_idrac_local_account_attr", + return_value={"Users.2#UserName": "test_user", "Users.3#UserName": ""}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + response = self.module.get_user_account(f_module, idrac_connection_user_mock) + assert response[0]["Users.2#UserName"] == "test_user" + assert response[3] == 3 + + def test_get_user_account_2(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.export_scp", + return_value=MagicMock()) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.get_idrac_local_account_attr", + return_value={"Users.2#UserName": "test_user", "Users.3#UserName": ""}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + response = self.module.get_user_account(f_module, idrac_connection_user_mock) + assert response[3] == 3 + assert response[4] == "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/3" + + def test_create_or_modify_account_1(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + idrac_connection_user_mock.get_server_generation = (13, "2.70.70.70") + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.1#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.import_scp", + return_value={"Message": "Successfully created a request."}) + empty_slot_id = 2 + empty_slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(empty_slot_id) + user_attr = {"User.2#UserName": "test_user"} + mocker.patch(MODULE_PATH + 'idrac_user.time.sleep', return_value=None) + response = self.module.create_or_modify_account(f_module, idrac_connection_user_mock, None, None, + empty_slot_id, empty_slot_uri, user_attr) + assert response[1] == "Successfully created user account." + + def test_create_or_modify_account_2(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + idrac_connection_user_mock.get_server_generation = (13, "2.70.70.70") + mocker.patch(MODULE_PATH + 'idrac_user.time.sleep', return_value=None) + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.1#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.import_scp", + return_value={"Message": "Successfully created a request."}) + slot_id = 2 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + user_attr = {"User.2#UserName": "test_user"} + response = self.module.create_or_modify_account(f_module, idrac_connection_user_mock, slot_uri, slot_id, + None, None, user_attr) + assert response[1] == "Successfully updated user account." + + def test_create_or_modify_account_3(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + idrac_connection_user_mock.get_server_generation = (13, "2.70.70.70") + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.1#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.import_scp", + return_value={"Message": "Successfully created a request."}) + slot_id = 2 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + user_attr = {"Users.1#UserName": "test_user"} + with pytest.raises(Exception) as exc: + self.module.create_or_modify_account(f_module, idrac_connection_user_mock, slot_uri, slot_id, + None, None, user_attr) + assert exc.value.args[0] == "Requested changes are already present in the user slot." + + def test_create_or_modify_account_4(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + idrac_connection_user_mock.get_server_generation = (13, "2.70.70.70") + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.1#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.import_scp", + return_value={"Message": "Successfully created a request."}) + slot_id = 2 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + user_attr = {"Users.1#UserName": "test_user"} + with pytest.raises(Exception) as exc: + self.module.create_or_modify_account(f_module, idrac_connection_user_mock, slot_uri, slot_id, + None, None, user_attr) + assert exc.value.args[0] == "No changes found to commit!" + + def test_create_or_modify_account_5(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + idrac_connection_user_mock.get_server_generation = (13, "2.70.70.70") + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.2#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.import_scp", + return_value={"Message": "Successfully created a request."}) + slot_id = 2 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + user_attr = {"Users.1#UserName": "test_user"} + with pytest.raises(Exception) as exc: + self.module.create_or_modify_account(f_module, idrac_connection_user_mock, slot_uri, slot_id, + None, None, user_attr) + assert exc.value.args[0] == "Changes found to commit!" + + def test_create_or_modify_account_6(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + idrac_connection_user_mock.get_server_generation = (14, "3.60.60.60") + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.1#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.invoke_request", + return_value={"Message": "Successfully created a request."}) + slot_id = 2 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + user_attr = {"User.2#UserName": "test_user"} + response = self.module.create_or_modify_account(f_module, idrac_connection_user_mock, None, None, + slot_id, slot_uri, user_attr) + assert response[1] == "Successfully created user account." + + def test_create_or_modify_account_7(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=True) + idrac_connection_user_mock.get_server_generation = (14, "3.60.60.60") + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.1#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.invoke_request", + return_value={"Message": "Successfully created a request."}) + slot_id = 2 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + user_attr = {"User.2#UserName": "test_user"} + with pytest.raises(Exception) as exc: + self.module.create_or_modify_account(f_module, idrac_connection_user_mock, None, None, + slot_id, slot_uri, user_attr) + assert exc.value.args[0] == "Changes found to commit!" + + def test_create_or_modify_account_8(self, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + f_module = self.get_module_mock(params=idrac_default_args, check_mode=False) + idrac_connection_user_mock.get_server_generation = (14, "3.60.60.60") + mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"}) + mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml", + return_value=("", {"Users.1#UserName": "test_user"})) + mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.invoke_request", + return_value={"Message": "Successfully created a request."}) + slot_id = 2 + slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id) + user_attr = {"User.2#UserName": "test_user"} + response = self.module.create_or_modify_account(f_module, idrac_connection_user_mock, slot_uri, slot_id, + None, None, user_attr) + assert response[1] == "Successfully updated user account." + + @pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError, + ConnectionError, HTTPError, ImportError, RuntimeError]) + def test_main(self, exc_type, idrac_connection_user_mock, idrac_default_args, mocker): + idrac_default_args.update({"state": "present", "new_user_name": "new_user_name", + "user_name": "test", "user_password": "password", + "privilege": "Administrator", "ipmi_lan_privilege": "Administrator", + "ipmi_serial_privilege": "Administrator", "enable": True, + "sol_enable": True, "protocol_enable": True, + "authentication_protocol": "SHA", "privacy_protocol": "AES"}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + "idrac_user.create_or_modify_account", + side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + "idrac_user.create_or_modify_account", + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py new file mode 100644 index 00000000..94e620f3 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.3.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import idrac_virtual_media +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock +from mock import PropertyMock +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def virtual_media_conn_mock(mocker, redfish_response_mock): + idrac_conn_mock = mocker.patch(MODULE_PATH + 'idrac_virtual_media.iDRACRedfishAPI') + idrac_conn_mock_obj = idrac_conn_mock.return_value.__enter__.return_value + idrac_conn_mock_obj.invoke_request.return_value = redfish_response_mock + return idrac_conn_mock_obj + + +class TestVirtualMedia(FakeAnsibleModule): + + module = idrac_virtual_media + + def test_validate_params(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args): + idrac_default_args.update( + {"virtual_media": [{"index": 1, "insert": True, "image": "//192.168.0.1/path/image.iso"}]}) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as err: + self.module._validate_params(f_module, {"index": 1, "insert": True, + "image": "//192.168.0.1/path/image.iso"}, "140") + assert err.value.args[0] == "CIFS share required username and password." + idrac_default_args.update({"virtual_media": [{"index": 1, "insert": True, "username": "user", "password": "pwd", + "image": "\\\\192.168.0.1\\path\\image.iso"}]}) + f_module = self.get_module_mock(params=idrac_default_args) + result = self.module._validate_params(f_module, {"password": "pwd", "insert": True, "username": "usr", + "image": "\\\\192.168.0.1\\path\\image.iso", "index": 1}, + "141") + assert result is None + + def test_get_virtual_media_info(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args): + redfish_response_mock.json_data = { + "RedfishVersion": "1.13.1", + "VirtualMedia": {"@odata.id": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia"}, + "Members": [{"Inserted": False, "Image": None}, + {"Inserted": True, "Image": "//192.168.0.1/file_path/file.iso"}] + } + resp, vr_id, rd_version = self.module.get_virtual_media_info(virtual_media_conn_mock) + assert vr_id == "system" + redfish_response_mock.json_data.update({"RedfishVersion": "1.11.1"}) + resp, vr_id, rd_version = self.module.get_virtual_media_info(virtual_media_conn_mock) + assert vr_id == "manager" + + def test_get_payload_data(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args): + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}]}) + each = {"insert": True, "image": "//192.168.0.1/path/file.iso", "index": 1, "media_type": "CD"} + vr_member = [{"Inserted": True, "Image": "//192.168.0.1/path/image_file.iso", + "UserName": "username", "Password": "password", "Id": "CD", "MediaTypes": ["CD", "DVD"]}] + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager") + assert is_change is True + assert input_vr_mem == {'Inserted': True, 'Image': '//192.168.0.1/path/file.iso'} + assert vr_mem == {'Inserted': True, 'Image': '//192.168.0.1/path/image_file.iso', 'UserName': 'username', + 'Password': 'password', 'Id': 'CD', 'MediaTypes': ['CD', 'DVD']} + each.update({"username": "user_name", "password": "password", "domain": "domain", + "image": "192.168.0.3:/file_path/image.iso"}) + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager") + assert is_change is True + each.update({"media_type": "USBStick"}) + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager") + assert unsup_media == 1 + each = {"insert": False, "index": 1} + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager") + assert is_change is True + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "system") + assert is_change is True + each.update({"username": "user_name", "password": "password", "domain": "domain", "media_type": "CD", + "image": "192.168.0.3:/file_path/image.img", "insert": True}) + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager") + assert unsup_media == 1 + each.update({"username": "user_name", "password": "password", "domain": "domain", "media_type": "DVD", + "image": "192.168.0.3:/file_path/image.img", "insert": True}) + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager") + assert unsup_media == 1 + + def test_domain_name(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args): + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}]}) + each = {"insert": True, "image": "//192.168.0.1/path/file.iso", "index": 1, "media_type": "CD", + "domain": "domain", "username": "user", "password": "pwd"} + vr_member = [{"Inserted": True, "Image": "//192.168.0.1/path/image_file.iso", "domain": "domain", + "UserName": "username", "Password": "password", "Id": "CD", "MediaTypes": ["CD", "DVD"]}] + is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager") + assert is_change is True + + def test_virtual_media_operation(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}], + "force": True}) + f_module = self.get_module_mock(params=idrac_default_args) + mocker.patch(MODULE_PATH + 'idrac_virtual_media.time.sleep', return_value=None) + payload = [{ + "vr_mem": {"Inserted": True, "Actions": { + "#VirtualMedia.EjectMedia": { + "target": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia/1/Actions/VirtualMedia.EjectMedia"}, + "#VirtualMedia.InsertMedia": { + "target": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia/1/Actions/VirtualMedia.InsertMedia"} + }}, + "payload": {"Inserted": True, "Image": "http://192.168.0.1/file_path/file.iso"}, + "input": {"index": 1, "insert": True, "image": "//192.168.0.1/path/file.iso", "force": True} + }] + result = self.module.virtual_media_operation(virtual_media_conn_mock, f_module, payload, "manager") + assert result == [] + idrac_default_args.update({"force": False}) + result = self.module.virtual_media_operation(virtual_media_conn_mock, f_module, payload, "manager") + assert result == [] + payload[0]["vr_mem"].update({"Inserted": False}) + result = self.module.virtual_media_operation(virtual_media_conn_mock, f_module, payload, "manager") + assert result == [] + payload[0]["vr_mem"].update({"Inserted": True}) + payload[0]["payload"].update({"Inserted": False}) + result = self.module.virtual_media_operation(virtual_media_conn_mock, f_module, payload, "manager") + assert result == [] + + @pytest.mark.parametrize("exc_type", [HTTPError]) + def test_virtual_media_operation_http(self, virtual_media_conn_mock, redfish_response_mock, + idrac_default_args, mocker, exc_type): + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}], + "force": True}) + f_module = self.get_module_mock(params=idrac_default_args) + mocker.patch(MODULE_PATH + 'idrac_virtual_media.time.sleep', return_value=None) + payload = [{ + "vr_mem": {"Inserted": True, "Actions": { + "#VirtualMedia.EjectMedia": { + "target": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.EjectMedia"}, + "#VirtualMedia.InsertMedia": { + "target": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia"} + }}, + "payload": {"Inserted": True, "Image": "http://192.168.0.1/file_path/file.iso"}, + "input": {"index": 1, "insert": True, "image": "//192.168.0.1/path/file.iso", "force": True} + }] + if exc_type == HTTPError: + mocker.patch(MODULE_PATH + 'idrac_virtual_media.json.load', return_value={ + "error": {"@Message.ExtendedInfo": [{"MessageId": "VRM0012"}]} + }) + json_str = to_text(json.dumps({"data": "out"})) + mocker.patch( + MODULE_PATH + 'idrac_virtual_media.time.sleep', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self.module.virtual_media_operation(virtual_media_conn_mock, f_module, payload, "system") + assert result == [{'@Message.ExtendedInfo': [{'MessageId': 'VRM0012'}]}] + + def test_virtual_media(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args, mocker): + vr_member = [{"Inserted": True, "Image": "//192.168.0.1/path/image_file.iso", + "UserName": "username", "Password": "password", "Id": "CD", "MediaTypes": ["CD", "DVD"]}] + mocker.patch(MODULE_PATH + 'idrac_virtual_media.virtual_media_operation', return_value=[]) + mocker.patch(MODULE_PATH + 'idrac_virtual_media._validate_params', return_value=None) + mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_payload_data', return_value=(True, {}, {}, 1)) + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}], + "force": True}) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "manager", "141") + assert ex.value.args[0] == "Unable to complete the virtual media operation because unsupported " \ + "media type provided for index 1" + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.img"}], + "force": True}) + f_module = self.get_module_mock(params=idrac_default_args) + with pytest.raises(Exception) as ex: + self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "manager", "141") + assert ex.value.args[0] == "Unable to complete the virtual media operation because " \ + "unsupported media type provided for index 1" + with pytest.raises(Exception) as ex: + self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "system", "141") + assert ex.value.args[0] == "Unable to complete the virtual media operation because " \ + "unsupported media type provided for index 1" + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso", + "index": 1, "media_type": "CD"}], "force": True}) + f_module = self.get_module_mock(params=idrac_default_args) + mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_payload_data', return_value=(True, {}, {}, None)) + result = self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "manager", "141") + assert result == [] + result = self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "system", "141") + assert result == [] + f_module.check_mode = True + mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_payload_data', return_value=(True, {"Insert": True}, + {}, None)) + with pytest.raises(Exception) as ex: + self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "manager", "141") + assert ex.value.args[0] == "Changes found to be applied." + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso", + "index": 1, "media_type": "CD"}], "force": False}) + f_module = self.get_module_mock(params=idrac_default_args) + f_module.check_mode = True + mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_payload_data', return_value=(False, {}, {}, None)) + with pytest.raises(Exception) as ex: + self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "manager", "141") + assert ex.value.args[0] == "No changes found to be applied." + + def test_main_success(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args, mocker): + idrac_default_args.update({"virtual_media": [ + {"insert": True, "image": "http://192.168.0.1/path/file.iso"}, + {"insert": True, "image": "192.168.0.2:/file/file.iso"}], "force": True}) + mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_virtual_media_info', + return_value=([{"Insert": True}, {"Insert": True}], "manager", "141")) + with pytest.raises(Exception) as ex: + self._run_module(idrac_default_args) + assert ex.value.args[0]["msg"] == "Unable to complete the operation because the virtual media settings " \ + "provided exceeded the maximum limit." + mocker.patch(MODULE_PATH + 'idrac_virtual_media.virtual_media', return_value=[]) + idrac_default_args.update({"virtual_media": [{"insert": True, "image": "http://192.168.0.1/path/file.iso"}], + "force": True}) + result = self._run_module(idrac_default_args) + assert result == {'changed': True, 'msg': 'Successfully performed the virtual media operation.'} + mocker.patch(MODULE_PATH + 'idrac_virtual_media.virtual_media', return_value=["error"]) + with pytest.raises(Exception) as ex: + self._run_module(idrac_default_args) + assert ex.value.args[0]["msg"] == "Unable to complete the virtual media operation." + + @pytest.mark.parametrize("exc_type", [HTTPError, URLError, ValueError, RuntimeError, SSLValidationError, + ConnectionError, KeyError, ImportError, ValueError, TypeError]) + def test_main_exception(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args, mocker, exc_type): + idrac_default_args.update({"virtual_media": [{"index": 1, "insert": False}]}) + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError]: + mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_virtual_media_info', side_effect=exc_type('test')) + else: + mocker.patch( + MODULE_PATH + 'idrac_virtual_media.get_virtual_media_info', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(idrac_default_args) + assert result['failed'] is True + else: + result = self._run_module(idrac_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py new file mode 100644 index 00000000..1722a3da --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.0.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_active_directory +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +AD_URI = "AccountService/ExternalAccountProvider/ADAccountProvider" +TEST_CONNECTION = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.TestADConnection" +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +MAX_AD_MSG = "Unable to add the account provider because the maximum number of configurations allowed for an" \ + " Active Directory service is {0}." +CREATE_SUCCESS = "Successfully added the Active Directory service." +MODIFY_SUCCESS = "Successfully modified the Active Directory service." +DELETE_SUCCESS = "Successfully deleted the Active Directory service." +DOM_SERVER_MSG = "Specify the domain server. Domain server is required to create an Active Directory service." +GRP_DOM_MSG = "Specify the group domain. Group domain is required to create an Active Directory service." +CERT_INVALID = "The provided certificate file path is invalid or not readable." +DOMAIN_ALLOWED_COUNT = "Maximum entries allowed for {0} lookup type is {1}." +TEST_CONNECTION_SUCCESS = "Test Connection is successful. " +TEST_CONNECTION_FAIL = "Test Connection has failed. " +ERR_READ_FAIL = "Unable to retrieve the error details." +INVALID_ID = "The provided Active Directory ID is invalid." +TIMEOUT_RANGE = "The {0} value is not in the range of {1} to {2}." +MAX_AD = 2 +MIN_TIMEOUT = 15 +MAX_TIMEOUT = 300 + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_active_directory.' +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.ome.' + + +@pytest.fixture +def ome_connection_mock_for_ad(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeAD(FakeAnsibleModule): + module = ome_active_directory + + @pytest.mark.parametrize("params", [ + {"module_args": {"name": "domdev"}, "json_data": {"value": [{'Name': 'domdev', 'Id': 12}]}, + "ad": {'Name': 'domdev', 'Id': 12}, "ad_cnt": 1}, + {"module_args": {"id": 12}, "json_data": {"value": [{'Name': 'domdev', 'Id': 12}]}, + "ad": {'Name': 'domdev', 'Id': 12}, "ad_cnt": 1}, + {"module_args": {"id": 11}, "json_data": {"value": [ + {'Name': 'domdev', 'Id': 12}, {'Name': 'domdev', 'Id': 13}]}, "ad": {}, "ad_cnt": 2}]) + def test_get_ad(self, params, ome_connection_mock_for_ad, ome_response_mock): + ome_response_mock.success = params.get("success", True) + f_module = self.get_module_mock(params=params['module_args']) + ome_response_mock.json_data = params["json_data"] + ad, ad_cnt = self.module.get_ad(f_module, ome_connection_mock_for_ad) + assert ad == params['ad'] + assert ad_cnt == params['ad_cnt'] + + @pytest.mark.parametrize("params", [{ + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev"}, "msg": CREATE_SUCCESS}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev"}, "msg": CHANGES_FOUND, "check_mode": True}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev", "test_connection": True, + "domain_username": "user", "domain_password": "passwd"}, + "msg": "{0}{1}".format(TEST_CONNECTION_SUCCESS, CREATE_SUCCESS)} + ]) + def test_ome_active_directory_create_success(self, params, ome_connection_mock_for_ad, ome_response_mock, + ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = {"Name": "AD1"} + mocker.patch(MODULE_PATH + 'get_ad', return_value=params.get("get_ad", (None, 1))) + ome_default_args.update(params['module_args']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("params", [{ + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev"}, + "get_ad": ({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"], + "DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120, + "ServerPort": 3269, "CertificateValidation": False}, 1), + "msg": MODIFY_SUCCESS}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev", "test_connection": True, + "domain_username": "user", "domain_password": "passwd"}, "get_ad": + ({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"], "DnsServer": [], + "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120, "ServerPort": 3269, + "CertificateValidation": False}, 1), + "msg": "{0}{1}".format(TEST_CONNECTION_SUCCESS, MODIFY_SUCCESS)}, + {"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "dellemcdomain.com", "name": "domdev"}, + "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.96.20.181"], + "DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120, + "ServerPort": 3269, "CertificateValidation": False}, 1), + "msg": NO_CHANGES_MSG}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "dellemcdomain.com", "name": "domdev"}, + "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"], + "DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, + "SearchTimeOut": 120, "ServerPort": 3269, "CertificateValidation": False}, 1), + "msg": CHANGES_FOUND, "check_mode": True} + ]) + def test_ome_active_directory_modify_success(self, params, ome_connection_mock_for_ad, ome_response_mock, + ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = {"Name": "AD1"} + ome_connection_mock_for_ad.strip_substr_dict.return_value = params.get("get_ad", (None, 1))[0] + mocker.patch(MODULE_PATH + 'get_ad', return_value=params.get("get_ad", (None, 1))) + ome_default_args.update(params['module_args']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("params", [{ + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev", "state": "absent"}, + "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"], + "DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120, + "ServerPort": 3269, "CertificateValidation": False}, 1), + "msg": DELETE_SUCCESS}, + {"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "dellemcdomain.com", "name": "domdev1", "state": "absent"}, + "msg": NO_CHANGES_MSG}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "dellemcdomain.com", "name": "domdev", "state": "absent"}, + "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"], + "DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, + "SearchTimeOut": 120, "ServerPort": 3269, "CertificateValidation": False}, 1), + "msg": CHANGES_FOUND, "check_mode": True} + ]) + def test_ome_active_directory_delete_success(self, params, ome_connection_mock_for_ad, ome_response_mock, + ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = {"Name": "AD1"} + ome_connection_mock_for_ad.strip_substr_dict.return_value = params.get("get_ad", (None, 1))[0] + mocker.patch(MODULE_PATH + 'get_ad', return_value=params.get("get_ad", (None, 1))) + ome_default_args.update(params['module_args']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("params", [ + {"module_args": {"domain_controller_lookup": "MANUAL", "group_domain": "domain.com", "name": "domdev"}, + "msg": DOM_SERVER_MSG}, {"module_args": {"domain_controller_lookup": "MANUAL", + "domain_server": ["192.96.20.181", "192.96.20.182", "192.96.20.183", + "192.96.20.184"], "group_domain": "domain.com", + "name": "domdev"}, "msg": DOMAIN_ALLOWED_COUNT.format("MANUAL", 3)}, + {"module_args": {"domain_server": ["dom1.com1", "dom2.com"], "group_domain": "domain.com", "name": "domdev"}, + "msg": DOMAIN_ALLOWED_COUNT.format("DNS", 1)}, + {"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], "name": "domdev"}, + "msg": GRP_DOM_MSG}, {"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev", "network_timeout": 1}, + "msg": TIMEOUT_RANGE.format("NetworkTimeOut", MIN_TIMEOUT, MAX_TIMEOUT)}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev", "search_timeout": 301}, + "msg": TIMEOUT_RANGE.format("SearchTimeOut", MIN_TIMEOUT, MAX_TIMEOUT)}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev"}, "ad_cnt": 2, + "msg": MAX_AD_MSG.format(MAX_AD)}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "name": "domdev", "validate_certificate": True, + "certificate_file": "nonexistingcert.crt"}, "msg": CERT_INVALID}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "domain.com", "id": 1234, "validate_certificate": True, + "certificate_file": "nonexistingcert.crt"}, "msg": INVALID_ID} + ]) + def test_ome_active_directory_create_fails(self, params, ome_connection_mock_for_ad, ome_response_mock, + ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = {"Name": "AD1"} + mocker.patch(MODULE_PATH + 'get_ad', return_value=(None, params.get("ad_cnt", 1))) + ome_default_args.update(params['module_args']) + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("params", [{ + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "testconnectionfail.com", "name": "domdev", "test_connection": True, + "domain_username": "user", "domain_password": "passwd"}, + "msg": "{0}{1}".format(TEST_CONNECTION_FAIL, "Unable to connect to the LDAP or AD server."), "is_http": True, + "error_info": { + "error": {"@Message.ExtendedInfo": [{"Message": "Unable to connect to the LDAP or AD server."}], }}}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "testconnectionfail.com", "name": "domdev", "test_connection": True, + "domain_username": "user", "domain_password": "passwd"}, + "msg": "{0}{1}".format(TEST_CONNECTION_FAIL, ERR_READ_FAIL), "is_http": True, "error_info": { + "error1": {"@Message.ExtendedInfo": [{"Message": "Unable to connect to the LDAP or AD server."}], }}}, { + "module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"], + "group_domain": "testconnectionfail.com", "name": "domdev", "test_connection": True, + "domain_username": "user", "domain_password": "passwd"}, + "msg": "{0}{1}".format(TEST_CONNECTION_FAIL, "Exception occurrence success."), + "error_info": "Exception occurrence success."}, ]) + def test_ome_active_directory_create_test_conenction_fail(self, params, ome_default_args, mocker): + mocker.patch(MODULE_PATH + 'get_ad', return_value=(None, params.get("ad_cnt", 1))) + rest_obj_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = rest_obj_class_mock.return_value.__enter__.return_value + if params.get("is_http"): + json_str = to_text(json.dumps(params['error_info'])) + ome_connection_mock_obj.invoke_request.side_effect = HTTPError('http://testdellemcomead.com', 404, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str)) + else: + ome_connection_mock_obj.invoke_request.side_effect = Exception(params['error_info']) + ome_default_args.update(params['module_args']) + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_active_directory_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_ad, ome_response_mock): + ome_default_args.update({"state": "absent", "name": "t1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py new file mode 100644 index 00000000..b5bc1d94 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py @@ -0,0 +1,457 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.3.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_alerts_smtp +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \ + AnsibleFailJSonException + +SUCCESS_MSG = "Successfully updated the SMTP settings." +SMTP_URL = "AlertService/AlertDestinations/SMTPConfiguration" +NO_CHANGES = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_application_alerts_smtp.' +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.ome.' + + +@pytest.fixture +def ome_connection_mock_for_smtp(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestAppAlertsSMTP(FakeAnsibleModule): + module = ome_application_alerts_smtp + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + "json_data": { + "@odata.context": "/api/$metadata#Collection(AlertDestinations.SMTPConfiguration)", + "@odata.count": 1, + "value": [ + { + "@odata.type": "#AlertDestinations.SMTPConfiguration", + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": "" + } + } + ] + } + } + ]) + def test_fetch_smtp_settings(self, params, ome_connection_mock_for_smtp, ome_response_mock): + ome_response_mock.success = True + f_module = self.get_module_mock(params=params['module_args']) + ome_response_mock.json_data = params["json_data"] + ret_data = self.module.fetch_smtp_settings(ome_connection_mock_for_smtp) + assert ret_data.get("DestinationAddress") == "localhost" + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + + "json_data": { + "DestinationAddress": "localhost", + "PortNumber": 25, + "UseCredentials": True, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + }, + "payload": { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": "password" + } + } + } + ]) + def test_update_smtp_settings(self, params, ome_connection_mock_for_smtp, ome_response_mock): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + f_module = self.get_module_mock(params=params['module_args']) + ome_response_mock.json_data = params["json_data"] + payload = params["payload"] + ret_data = self.module.update_smtp_settings(ome_connection_mock_for_smtp, payload) + assert ret_data.json_data.get("DestinationAddress") == "localhost" + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + "payload": { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + } + } + ]) + def test_update_payload_auth(self, params, ome_connection_mock_for_smtp, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data = self.module.update_payload(f_module, payload) + assert ret_data.get("DestinationAddress") == "localhost" + assert ret_data.get("UseCredentials") is True + assert ret_data.get("Credential") is not None + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": False, + "credentials": {"username": "username", "password": "password"} + }, + "payload": { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + } + } + ]) + def test_update_payload_without_auth(self, params, ome_connection_mock_for_smtp, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data = self.module.update_payload(f_module, payload) + assert ret_data.get("DestinationAddress") == "localhost" + assert ret_data.get("UseCredentials") is False + assert ret_data.get("Credential") is None + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": False, + "credentials": {"username": "username", "password": "password"} + }, + "payload": { + "DestinationAddress": "", + "UseCredentials": True, + "PortNumber": 26, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + } + }, + { + "module_args": { + "destination_address": "localhost", "use_ssl": True, + "enable_authentication": False, + "credentials": {"username": "username", "password": "password"} + }, + "payload": { + "DestinationAddress": "", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + } + }, + ]) + def test_get_value(self, params, ome_connection_mock_for_smtp, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data = self.module.get_value(f_module, payload, "port_number", "PortNumber") + assert ret_data == 25 + + @pytest.mark.parametrize("params", [ + { + "payload1": { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": "password" + } + }, + "payload2": { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": "password" + } + } + }, + ]) + def test_diff_payload_same(self, params, ome_connection_mock_for_smtp, ome_response_mock): + payload1 = params["payload1"] + payload2 = params["payload2"] + diff = self.module._diff_payload(payload1, payload2) + assert diff == 0 + + @pytest.mark.parametrize("params", [ + { + "payload1": { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + }, + "payload2": { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": "password" + } + } + }, + ]) + def test_diff_payload_diff(self, params, ome_connection_mock_for_smtp, ome_response_mock): + payload1 = params["payload1"] + payload2 = params["payload2"] + diff = self.module._diff_payload(payload1, payload2) + assert diff is True + + def test_diff_payload_none(self, ome_connection_mock_for_smtp, ome_response_mock): + diff = self.module._diff_payload(None, None) + assert diff is False + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + "json_data": { + "DestinationAddress": "localhost1", + "PortNumber": 25, + "UseCredentials": True, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + }, + } + ]) + def test_module_success(self, mocker, params, ome_connection_mock_for_smtp, ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + + get_json_data = { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": "" + } + } + + update_json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'fetch_smtp_settings', return_value=get_json_data) + mocker.patch(MODULE_PATH + 'update_payload', return_value=update_json_data) + mocker.patch(MODULE_PATH + '_diff_payload', return_value=1) + result = self._run_module(ome_default_args) + assert result["msg"] == SUCCESS_MSG + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + "json_data": { + "DestinationAddress": "localhost1", + "PortNumber": 25, + "UseCredentials": True, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + }, + } + ]) + def test_module_success_no_auth(self, mocker, params, ome_connection_mock_for_smtp, ome_response_mock, + ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + + get_json_data = { + "DestinationAddress": "localhost", + "UseCredentials": True, + "PortNumber": 25, + "UseSSL": False + } + + update_json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'fetch_smtp_settings', return_value=get_json_data) + mocker.patch(MODULE_PATH + 'update_payload', return_value=update_json_data) + mocker.patch(MODULE_PATH + '_diff_payload', return_value=1) + result = self._run_module(ome_default_args) + assert result["msg"] == SUCCESS_MSG + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + "json_data": { + "DestinationAddress": "localhost1", + "PortNumber": 25, + "UseCredentials": True, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + }, + } + ]) + def test_module_idempotent(self, mocker, params, ome_connection_mock_for_smtp, ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + get_json_data = params["json_data"] + update_json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'fetch_smtp_settings', return_value=get_json_data) + mocker.patch(MODULE_PATH + 'update_payload', return_value=update_json_data) + mocker.patch(MODULE_PATH + '_diff_payload', return_value=0) + result = self._run_module(ome_default_args) + assert result["msg"] == NO_CHANGES + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + "json_data": { + "DestinationAddress": "localhost1", + "PortNumber": 25, + "UseCredentials": True, + "UseSSL": True, + "Credential": { + "User": "username", + "Password": None + } + }, + } + ]) + def test_module_check_mode(self, mocker, params, ome_connection_mock_for_smtp, ome_response_mock, + ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + f_module = self.get_module_mock(params=ome_default_args) + get_json_data = params["json_data"] + update_json_data = params["json_data"] + + f_module.check_mode = True + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 0) + assert err.value.args[0] == NO_CHANGES + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 1) + assert err.value.args[0] == CHANGES_FOUND + + f_module.check_mode = False + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 0) + assert err.value.args[0] == NO_CHANGES + + @pytest.mark.parametrize("exc_type", + [HTTPError, URLError]) + def test_smtp_main_exception_case(self, mocker, exc_type, ome_connection_mock_for_smtp, ome_response_mock, + ome_default_args): + ome_default_args.update({"destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'fetch_smtp_settings', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'fetch_smtp_settings', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'fetch_smtp_settings', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py new file mode 100644 index 00000000..ea4551d9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.3.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_alerts_syslog +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_application_alerts_syslog.' + +SUCCESS_MSG = "Successfully updated the syslog forwarding settings." +DUP_ID_MSG = "Duplicate server IDs are provided." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." + + +@pytest.fixture +def ome_connection_mock_for_syslog(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeAlertSyslog(FakeAnsibleModule): + module = ome_application_alerts_syslog + + @pytest.mark.parametrize("params", [ + {"module_args": { + "syslog_servers": [ + { + "destination_address": "192.168.10.41", + "enabled": True, + "id": 1, + "port_number": 514 + }, + { + "destination_address": "192.168.10.46", + "enabled": False, + "id": 2, + "port_number": 514 + }, + { + "destination_address": "192.168.10.43", + "enabled": False, + "id": 3, + "port_number": 514 + }, + { + "destination_address": "192.168.10.44", + "enabled": True, + "id": 4, + "port_number": 514 + } + ] + }, "json_data": { + "@odata.context": "/api/$metadata#Collection(AlertDestinations.SyslogConfiguration)", + "@odata.count": 4, + "value": [ + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 1, + "Enabled": True, + "DestinationAddress": "192.168.10.41", + "PortNumber": 514 + }, + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 2, + "Enabled": False, + "DestinationAddress": "192.168.10.46", + "PortNumber": 0 + }, + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 3, + "Enabled": False, + "DestinationAddress": "192.168.10.43", + "PortNumber": 514 + }, + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 4, + "Enabled": True, + "DestinationAddress": "192.168.10.44", + "PortNumber": 514 + } + ] + }, "msg": NO_CHANGES_MSG}, + {"module_args": { + "syslog_servers": [ + { + "destination_address": "192.168.10.41", + "enabled": True, + "id": 1, + "port_number": 514 + }, + { + "destination_address": "192.168.10.46", + "enabled": False, + "id": 2, + "port_number": 514 + } + ] + }, "json_data": { + "@odata.context": "/api/$metadata#Collection(AlertDestinations.SyslogConfiguration)", + "@odata.count": 4, + "value": [ + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 1, + "Enabled": True, + "DestinationAddress": "192.168.10.41", + "PortNumber": 511 + }, + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 2, + "Enabled": True, + "DestinationAddress": "192.168.10.46", + "PortNumber": 514 + } + ] + }, "msg": SUCCESS_MSG}, + {"check_mode": True, "module_args": { + "syslog_servers": [ + { + "destination_address": "192.168.10.41", + "enabled": True, + "id": 1, + "port_number": 514 + }, + { + "destination_address": "192.168.10.46", + "enabled": False, + "id": 2, + "port_number": 514 + } + ] + }, "json_data": { + "@odata.context": "/api/$metadata#Collection(AlertDestinations.SyslogConfiguration)", + "@odata.count": 4, + "value": [ + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 1, + "Enabled": True, + "DestinationAddress": "192.168.10.41", + "PortNumber": 511 + }, + { + "@odata.type": "#AlertDestinations.SyslogConfiguration", + "Id": 2, + "Enabled": True, + "DestinationAddress": "192.168.10.46", + "PortNumber": 514 + } + ] + }, "msg": CHANGES_FOUND}, + {"module_args": { + "syslog_servers": [] + }, "json_data": {}, "msg": NO_CHANGES_MSG}, + {"module_args": { + "syslog_servers": [ + { + "destination_address": "192.168.10.41", + "enabled": True, + "id": 1, + "port_number": 514 + }, + { + "destination_address": "192.168.10.46", + "enabled": False, + "id": 2, + "port_number": 514 + }, + { + "destination_address": "192.168.10.43", + "enabled": False, + "id": 3, + "port_number": 514 + }, + { + "destination_address": "192.168.10.44", + "enabled": True, + "id": 4, + "port_number": 514 + }, + { + "destination_address": "192.168.10.44", + "enabled": True, + "id": 4, + "port_number": 514 + } + ] + }, "json_data": { + "@odata.context": "/api/$metadata#Collection(AlertDestinations.SyslogConfiguration)", + "@odata.count": 4, + "value": [] + }, "msg": DUP_ID_MSG}, + ]) + def test_ome_alert_syslog_success(self, params, ome_connection_mock_for_syslog, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params.get("json_data") + ome_connection_mock_for_syslog.strip_substr_dict.return_value = params.get("json_data") + ome_default_args.update(params['module_args']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLValidationError, TypeError, ConnectionError, HTTPError, URLError]) + def test_alert_syslog_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_syslog, ome_response_mock): + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'validate_input', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'validate_input', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'validate_input', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py new file mode 100644 index 00000000..c31983bc --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.1.3 +# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json + +import pytest +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_certificate +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_mock_for_application_certificate(mocker, ome_response_mock): + connection_class_mock = mocker.patch( + MODULE_PATH + 'ome_application_certificate.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeAppCSR(FakeAnsibleModule): + module = ome_application_certificate + + @pytest.mark.parametrize("exc_type", + [ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_application_certificate_main_error_cases(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_application_certificate, + ome_response_mock): + json_str = to_text(json.dumps({"info": "error_details"})) + args = {"command": "generate_csr", "distinguished_name": "hostname.com", + "department_name": "Remote Access Group", "business_name": "Dell Inc.", + "locality": "Round Rock", "country_state": "Texas", "country": "US", + "email": "support@dell.com"} + ome_default_args.update(args) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters', + side_effect=exc_type("TEST")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters', + side_effect=exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'csr_status' not in result + assert 'msg' in result + + def test_get_resource_parameters_generate(self, mocker, ome_default_args, + ome_connection_mock_for_application_certificate, + ome_response_mock): + args = {"command": "generate_csr", "distinguished_name": "hostname.com", + "department_name": "Remote Access Group", "business_name": "Dell Inc.", + "locality": "Round Rock", "country_state": "Texas", "country": "US", + "email": "support@dell.com"} + f_module = self.get_module_mock(params=args) + result = self.module.get_resource_parameters(f_module) + assert result[0] == "POST" + assert result[1] == "ApplicationService/Actions/ApplicationService.GenerateCSR" + assert result[2] == {'DistinguishedName': 'hostname.com', 'Locality': 'Round Rock', + 'DepartmentName': 'Remote Access Group', 'BusinessName': 'Dell Inc.', + 'State': 'Texas', 'Country': 'US', 'Email': 'support@dell.com'} + + def test_upload_csr_fail01(self, mocker, ome_default_args, ome_connection_mock_for_application_certificate, + ome_response_mock): + args = {"command": "upload", "upload_file": "/path/certificate.cer"} + f_module = self.get_module_mock(params=args) + with pytest.raises(Exception) as exc: + self.module.get_resource_parameters(f_module) + assert exc.value.args[0] == "No such file or directory." + + def test_upload_csr_success(self, mocker, ome_default_args, ome_connection_mock_for_application_certificate, + ome_response_mock): + payload = "--BEGIN-REQUEST--" + mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters', + return_value=("POST", "ApplicationService/Actions/ApplicationService.UploadCertificate", payload)) + ome_default_args.update({"command": "upload", "upload_file": "/path/certificate.cer"}) + ome_response_mock.success = True + result = self.execute_module(ome_default_args) + assert result['msg'] == "Successfully uploaded application certificate." + + def test_generate_csr(self, mocker, ome_default_args, ome_connection_mock_for_application_certificate, + ome_response_mock): + csr_json = {"CertificateData": "--BEGIN-REQUEST--"} + payload = {"DistinguishedName": "hostname.com", "DepartmentName": "Remote Access Group", + "BusinessName": "Dell Inc.", "Locality": "Round Rock", "State": "Texas", + "Country": "US", "Email": "support@dell.com"} + mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters', + return_value=("POST", "ApplicationService/Actions/ApplicationService.GenerateCSR", payload)) + ome_default_args.update({"command": "generate_csr", "distinguished_name": "hostname.com", + "department_name": "Remote Access Group", "business_name": "Dell Inc.", + "locality": "Round Rock", "country_state": "Texas", "country": "US", + "email": "support@dell.com"}) + ome_response_mock.success = True + ome_response_mock.json_data = csr_json + result = self.execute_module(ome_default_args) + assert result['msg'] == "Successfully generated certificate signing request." + assert result['csr_status'] == {'CertificateData': '--BEGIN-REQUEST--'} diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py new file mode 100644 index 00000000..3a86a3f0 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py @@ -0,0 +1,2240 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ssl import SSLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_console_preferences +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \ + AnsibleFailJSonException + +SUCCESS_MSG = "Successfully updated the Console Preferences settings." +SETTINGS_URL = "ApplicationService/Settings" +NO_CHANGES = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +HEALTH_CHECK_UNIT_REQUIRED = "The health check unit is required when health check interval is specified." +HEALTH_CHECK_INTERVAL_REQUIRED = "The health check interval is required when health check unit is specified." +HEALTH_CHECK_INTERVAL_INVALID = "The health check interval specified is invalid for the {0}" +JOB_URL = "JobService/Jobs" +CIFS_URL = "ApplicationService/Actions/ApplicationService.UpdateShareTypeSettings" +CONSOLE_SETTINGS_VALUES = ["DATA_PURGE_INTERVAL", "EMAIL_SENDER", "TRAP_FORWARDING_SETTING", + "MX7000_ONBOARDING_PREF", "REPORTS_MAX_RESULTS_LIMIT", + "DISCOVERY_APPROVAL_POLICY", "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DEVICE_PREFERRED_NAME", "INVALID_DEVICE_HOSTNAME", "COMMON_MAC_ADDRESSES", + "CONSOLE_CONNECTION_SETTING", "MIN_PROTOCOL_VERSION", "SHARE_TYPE"] +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_application_console_preferences.' +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.ome.' + + +@pytest.fixture +def ome_connection_mock_for_application_console_preferences(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeAppConsolePreferences(FakeAnsibleModule): + module = ome_application_console_preferences + + @pytest.mark.parametrize("params", [{"module_args": {"report_row_limit": 123, + "mx7000_onboarding_preferences": "all", + "email_sender_settings": "admin@dell.com", + "trap_forwarding_format": "Normalized", + "metrics_collection_settings": 361}, + "json_data": {"value": [ + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": "" + }, + ]}, + }]) + def test_fetch_cp_settings(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock): + ome_response_mock.success = True + f_module = self.get_module_mock(params=params['module_args']) + ome_response_mock.json_data = params["json_data"] + ret_data = self.module.fetch_cp_settings(ome_connection_mock_for_application_console_preferences) + assert ret_data == params["json_data"]["value"] + + @pytest.mark.parametrize("params", [{"module_args": {"device_health": {"health_check_interval": 55, + "health_check_interval_unit": "Minutes"}}, + "json_data": {"@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10093)", + "Id": 10093, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "NextRun": "2022-03-15 05:25:00.0", + "LastRun": "2022-03-15 05:24:00.043", + "StartTime": None, + "EndTime": None, + "Schedule": "0 0/1 * 1/1 * ? *", + "State": "Enabled", + "CreatedBy": "admin", + "UpdatedBy": None, + "Visible": None, + "Editable": None, + "Builtin": False, + "UserGenerated": True, + "Targets": [{"JobId": 10093, "Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}], + "Params": [{"JobId": 10093, "Key": "metricType", "Value": "40, 50"}], + "LastRunStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2060, "Name": "Completed"}, + "JobType": {"@odata.type": "#JobService.JobType", "Id": 6, "Name": "Health_Task", "Internal": False}, + "JobStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2020, "Name": "Scheduled"}, + "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(10093)/ExecutionHistories", + "LastExecutionDetail": {"@odata.id": "/api/JobService/Jobs(10093)/LastExecutionDetail"}}, + }]) + def test_job_details(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock): + ome_response_mock.success = True + f_module = self.get_module_mock(params=params['module_args']) + ome_response_mock.json_data = {"value": [params["json_data"]]} + ret_data = self.module.job_details(ome_connection_mock_for_application_console_preferences) + assert ret_data == params["json_data"] + + @pytest.mark.parametrize("params", + [ + {"module_args": + { + "report_row_limit": 123, + "mx7000_onboarding_preferences": "all", + "email_sender_settings": "admin@dell.com", + "trap_forwarding_format": "Normalized", + "metrics_collection_settings": 361 + }, + "payload": + {"ConsoleSetting": + [ + { + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + { + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "AsIs", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "SLOT_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + } + ]}, + "curr_payload": {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}}, + "json_data": {"value": [ + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, } + ]) + def test_create_payload_success(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + curr_payload = params["json_data"]["value"] + ret_payload, payload_dict = self.module.create_payload(ome_connection_mock_for_application_console_preferences, + curr_payload) + assert payload_dict == params["curr_payload"] + + @pytest.mark.parametrize("params", + [ + {"module_args": + { + "metrics_collection_settings": "361" + }, + "payload": + {"ConsoleSetting": + [ + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + } + ]}, + "curr_payload": + {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}}, + "json_data": {"value": [ + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": "" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, }]) + def test_create_payload_success_case02(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + # ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + curr_payload = params["json_data"]["value"] + ret_payload, payload_dict = self.module.create_payload(f_module, curr_payload) + assert ret_payload == params["payload"] + + @pytest.mark.parametrize("params", [{"module_args": {"builtin_appliance_share": {"share_options": "CIFS", + "cifs_options": "V1"}}, + "payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V1", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}]}, + "curr_payload": {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}}, + "json_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, }]) + def test_create_payload_success_case03(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + # ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + curr_payload = params["json_data"]["value"] + ret_payload, payload_dict = self.module.create_payload(f_module, curr_payload) + assert ret_payload == params["payload"] + + @pytest.mark.parametrize("params", [ + { + "payload": { + "ConsoleSetting": [ + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "AsIs", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "SLOT_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + } + ] + }, + "cifs_payload": { + "ConsoleSetting": [ + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS" + } + ] + }, + "job_payload": {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": None, + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}, + "job_data": + { + "@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10093)", + "Id": 10093, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "NextRun": "2022-03-15 05:25:00.0", + "LastRun": "2022-03-15 05:24:00.043", + "StartTime": None, + "EndTime": None, + "Schedule": "0 0/1 * 1/1 * ? *", + "State": "Enabled", + "CreatedBy": "admin", + "UpdatedBy": None, + "Visible": None, + "Editable": None, + "Builtin": False, + "UserGenerated": True, + "Targets": [{"JobId": 10093, "Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}], + "Params": [{"JobId": 10093, "Key": "metricType", "Value": "40, 50"}], + "LastRunStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2060, "Name": "Completed"}, + "JobType": {"@odata.type": "#JobService.JobType", "Id": 6, "Name": "Health_Task", + "Internal": False}, + "JobStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2020, "Name": "Scheduled"}, + "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(10093)/ExecutionHistories", + "LastExecutionDetail": {"@odata.id": "/api/JobService/Jobs(10093)/LastExecutionDetail"}}, + "payload_dict": {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + "TRAP_FORWARDING_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": "" + }, + "MX7000_ONBOARDING_PREF": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": "" + }, + "REPORTS_MAX_RESULTS_LIMIT": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + "EMAIL_SENDER": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": "" + }, + "DISCOVERY_APPROVAL_POLICY": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"} + + }, + "schedule": None, + "module_args": { + "report_row_limit": 123, + } + } + ]) + def test_update_console_preferences(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_default_args.update(params["module_args"]) + # ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + final_resp, cifs_resp, job_resp = self.module.update_console_preferences(f_module, ome_connection_mock_for_application_console_preferences, + params["payload"], params["cifs_payload"], + params["job_payload"], params["job_data"], + params["payload_dict"], params["schedule"]) + assert final_resp.status_code == 200 + + @pytest.mark.parametrize("params", [ + { + "payload": { + "ConsoleSetting": [ + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "AsIs", + "DataType": "java.lang.String", + "GroupName": "" + }, + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "SLOT_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING" + } + ] + }, + "cifs_payload": { + "ConsoleSetting": [ + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS" + } + ] + }, + "job_payload": {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": None, + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}, + "job_data": + { + "@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10093)", + "Id": 10093, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "NextRun": "2022-03-15 05:25:00.0", + "LastRun": "2022-03-15 05:24:00.043", + "StartTime": None, + "EndTime": None, + "Schedule": "0 0/1 * 1/1 * ? *", + "State": "Enabled", + "CreatedBy": "admin", + "UpdatedBy": None, + "Visible": None, + "Editable": None, + "Builtin": False, + "UserGenerated": True, + "Targets": [{"JobId": 10093, "Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}], + "Params": [{"JobId": 10093, "Key": "metricType", "Value": "40, 50"}], + "LastRunStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2060, "Name": "Completed"}, + "JobType": {"@odata.type": "#JobService.JobType", "Id": 6, "Name": "Health_Task", + "Internal": False}, + "JobStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2020, "Name": "Scheduled"}, + "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(10093)/ExecutionHistories", + "LastExecutionDetail": {"@odata.id": "/api/JobService/Jobs(10093)/LastExecutionDetail"}}, + "payload_dict": {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"} + + }, + "schedule": "0 0 0/5 1/1 * ? *", + "module_args": { + "builtin_appliance_share": {"share_options": "HTTPS", "cifs_options": "V2"} + } + } + ]) + def test_update_console_preferences_case02(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_default_args.update(params["module_args"]) + # ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + final_resp, cifs_resp, job_resp = self.module.update_console_preferences(f_module, + ome_connection_mock_for_application_console_preferences, + params["payload"], + params["cifs_payload"], + params["job_payload"], + params["job_data"], + params["payload_dict"], + params["schedule"]) + assert cifs_resp.success is True + + @pytest.mark.parametrize("params", [{"payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "AsIs", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "SLOT_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}]}, + "cifs_payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, + "job_payload": {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": "0 0 0/5 1/1 * ? *", + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}, + "job_data": {"@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10093)", + "Id": 10093, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "NextRun": "2022-03-15 05:25:00.0", + "LastRun": "2022-03-15 05:24:00.043", + "StartTime": None, + "EndTime": None, + "Schedule": "0 0/1 * 1/1 * ? *", + "State": "Enabled", + "CreatedBy": "admin", + "UpdatedBy": None, + "Visible": None, + "Editable": None, + "Builtin": False, + "UserGenerated": True, + "Targets": [{"JobId": 10093, "Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}], + "Params": [{"JobId": 10093, "Key": "metricType", "Value": "40, 50"}], + "LastRunStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2060, "Name": "Completed"}, + "JobType": {"@odata.type": "#JobService.JobType", "Id": 6, "Name": "Health_Task", "Internal": False}, + "JobStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2020, "Name": "Scheduled"}, + "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(10093)/ExecutionHistories", + "LastExecutionDetail": {"@odata.id": "/api/JobService/Jobs(10093)/LastExecutionDetail"}}, + "payload_dict": {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_" + "DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}}, + "schedule": "0 0 0/5 1/1 * ? *", + "module_args": {"device_health": {"health_check_interval": 50, + "health_check_interval_unit": "Minutes"}}}]) + def test_update_console_preferences_case03(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_default_args.update(params["module_args"]) + # ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + final_resp, cifs_resp, job_resp = self.module.update_console_preferences(f_module, + ome_connection_mock_for_application_console_preferences, + params["payload"], + params["cifs_payload"], + params["job_payload"], + params["job_data"], + params["payload_dict"], + params["schedule"]) + assert job_resp.success is True + + @pytest.mark.parametrize("params", [{"module_args": {"report_row_limit": 123}, + "payload": {"ConsoleSetting": [{"Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "AsIs", + "DataType": "java.lang.String", + "GroupName": ""}, + {"Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "SLOT_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}]}, + "curr_payload": {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_" + "DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}}, + "json_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, }]) + def test_create_payload_dict(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + curr_payload = params["json_data"]["value"] + ret_payload = self.module.create_payload_dict(curr_payload) + assert ret_payload == params["curr_payload"] + + @pytest.mark.parametrize("params", [{"module_args": {"builtin_appliance_share": {"share_options": "CIFS", + "cifs_options": "V2"}}, + "payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, + "curr_payload": {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_" + "DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}}, + "json_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, }]) + def test_create_cifs_payload(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [params["payload"]]} + f_module = self.get_module_mock(params=params['module_args']) + curr_payload = params["json_data"]["value"] + ret_payload = self.module.create_cifs_payload(ome_connection_mock_for_application_console_preferences, + curr_payload) + assert ret_payload.get("ConsoleSetting")[0]["Name"] == params["payload"]["ConsoleSetting"][0]["Name"] + + @pytest.mark.parametrize("params", [{"module_args": {"device_health": {"health_check_interval": 50, + "health_check_interval_unit": "Minutes"}}, + "job_payload": {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": None, + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}}]) + def test_create_job(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + ome_response_mock.json_data = params["job_payload"] + ome_default_args.update(params['module_args']) + job_payload, schedule = self.module.create_job(ome_connection_mock_for_application_console_preferences) + assert job_payload == params["job_payload"] + + @pytest.mark.parametrize("params", [{"module_args": {"device_health": {"health_check_interval": 5, + "health_check_interval_unit": "Hourly"}}, + "job_payload": {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": "0 0 0/5 1/1 * ? *", + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}, + "schedule": "0 0 0/5 1/1 * ? *"}]) + def test_create_job_case02(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + f_module = self.get_module_mock(params=params['module_args']) + ome_response_mock.json_data = params["job_payload"] + ome_default_args.update(params['module_args']) + job_payload, schedule = self.module.create_job(f_module) + assert schedule == params["schedule"] + + @pytest.mark.parametrize("params", [{"module_args": {"device_health": {"health_check_interval": 5, + "health_check_interval_unit": "Minutes"}}, + "job_payload": {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": "0 0/5 * 1/1 * ? *", + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}, + "schedule": "0 0/5 * 1/1 * ? *"}]) + def test_create_job_case03(self, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = True + f_module = self.get_module_mock(params=params['module_args']) + ome_response_mock.json_data = params["job_payload"] + ome_default_args.update(params['module_args']) + job_payload, schedule = self.module.create_job(f_module) + assert schedule == params["schedule"] + + @pytest.mark.parametrize("params", [ + { + "module_args": {"metrics_collection_settings": 361}, + "cifs_payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, + "cp_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, ]}, + "payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}]}, }]) + def test_module_idempotent(self, mocker, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + curr_resp = params["cp_data"]["value"] + payload = params["payload"] + cifs_payload = params["cifs_payload"] + schedule = None + job = None + diff = self.module._diff_payload(curr_resp, payload, cifs_payload, schedule, job) + assert diff == 0 + + @pytest.mark.parametrize("params", [ + { + "module_args": {"metrics_collection_settings": 361}, + "cifs_payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}, + "cp_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, ]}, + "payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "365", + "DataType": "java.lang.Integer", + "GroupName": ""}]}, }]) + def test_module_idempotent_case02(self, mocker, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + curr_resp = params["cp_data"]["value"] + payload = params["payload"] + cifs_payload = params["cifs_payload"] + schedule = None + job = None + diff = self.module._diff_payload(curr_resp, payload, cifs_payload, schedule, job) + assert diff == 1 + + @pytest.mark.parametrize("params", [ + { + "module_args": {"device_health": {"health_check_interval": 5, + "health_check_interval_unit": "Hourly"}}, + "json_data": {"@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10093)", + "Id": 10093, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "NextRun": "2022-03-15 05:25:00.0", + "LastRun": "2022-03-15 05:24:00.043", + "StartTime": None, + "EndTime": None, + "Schedule": "0 0 0/5 1/1 * ? *", + "State": "Enabled", + "CreatedBy": "admin", + "UpdatedBy": None, + "Visible": None, + "Editable": None, + "Builtin": False, + "UserGenerated": True, + "Targets": [{"JobId": 10093, "Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}], + "Params": [{"JobId": 10093, "Key": "metricType", "Value": "40, 50"}], + "LastRunStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2060, "Name": "Completed"}, + "JobType": {"@odata.type": "#JobService.JobType", "Id": 6, "Name": "Health_Task", "Internal": False}, + "JobStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2020, "Name": "Scheduled"}, + "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(10093)/ExecutionHistories", + "LastExecutionDetail": {"@odata.id": "/api/JobService/Jobs(10093)/LastExecutionDetail"}}, + "cp_data": + {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, ]}, + "schedule": "0 0 0/5 1/1 * ? *", + "payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "365", + "DataType": "java.lang.Integer", + "GroupName": ""}]}, + "cifs_payload": {"ConsoleSetting": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}]}}]) + def test_module_idempotent_case03(self, mocker, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + curr_resp = params["cp_data"]["value"] + payload = params["payload"] + cifs_payload = params["cifs_payload"] + schedule = params["schedule"] + job = params["json_data"] + diff = self.module._diff_payload(curr_resp, payload, cifs_payload, schedule, job) + assert diff == 1 + + @pytest.mark.parametrize("params", [ + { + "module_args": {"device_health": {"health_check_interval": 100, + "health_check_interval_unit": "Minutes"} + }}]) + def test__validate_params_fail_case01(self, params, ome_connection_mock_for_application_console_preferences): + health = params['module_args'].get("device_health").get("health_check_interval_unit") + f_module = self.get_module_mock(params=params['module_args']) + with pytest.raises(Exception) as exc: + self.module._validate_params(f_module) + assert exc.value.args[0] == HEALTH_CHECK_INTERVAL_INVALID.format(health) + + @pytest.mark.parametrize("params", [ + { + "module_args": {"device_health": {"health_check_interval_unit": "Minutes"} + }}]) + def test__validate_params_fail_case02(self, params, ome_connection_mock_for_application_console_preferences): + f_module = self.get_module_mock(params=params['module_args']) + with pytest.raises(Exception) as exc: + self.module._validate_params(f_module) + assert exc.value.args[0] == HEALTH_CHECK_INTERVAL_REQUIRED + + @pytest.mark.parametrize("params", [ + { + "module_args": {"device_health": {"health_check_interval": 50} + }}]) + def test__validate_params_fail_case03(self, params, ome_connection_mock_for_application_console_preferences): + f_module = self.get_module_mock(params=params['module_args']) + with pytest.raises(Exception) as exc: + self.module._validate_params(f_module) + assert exc.value.args[0] == HEALTH_CHECK_UNIT_REQUIRED + + @pytest.mark.parametrize("params", [ + { + "module_args": {"device_health": {"health_check_interval": 100, + "health_check_interval_unit": "Hourly"} + }}]) + def test__validate_params_fail_case04(self, params, ome_connection_mock_for_application_console_preferences): + health = params['module_args'].get("device_health").get("health_check_interval_unit") + f_module = self.get_module_mock(params=params['module_args']) + with pytest.raises(Exception) as exc: + self.module._validate_params(f_module) + assert exc.value.args[0] == HEALTH_CHECK_INTERVAL_INVALID.format(health) + + @pytest.mark.parametrize("params", [ + { + "module_args": {"report_row_limit": 123, + "mx7000_onboarding_preferences": "all", + "email_sender_settings": "admin@dell.com", + "trap_forwarding_format": "Normalized", + "metrics_collection_settings": 361 + }, + "json_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, ]}, }]) + def test_module_check_mode(self, mocker, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = {"value": [params["json_data"]]} + ome_default_args.update(params['module_args']) + f_module = self.get_module_mock(params=ome_default_args) + get_json_data = params["json_data"] + update_json_data = params["json_data"] + + f_module.check_mode = True + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 0) + assert err.value.args[0] == NO_CHANGES + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 1) + assert err.value.args[0] == CHANGES_FOUND + + f_module.check_mode = False + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 0) + assert err.value.args[0] == NO_CHANGES + + @pytest.mark.parametrize("params", [ + { + "job_details": { + "@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10093)", + "Id": 10093, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "NextRun": "2022-03-15 05:25:00.0", + "LastRun": "2022-03-15 05:24:00.043", + "StartTime": None, + "EndTime": None, + "Schedule": "0 0/1 * 1/1 * ? *", + "State": "Enabled", + "CreatedBy": "admin", + "UpdatedBy": None, + "Visible": None, + "Editable": None, + "Builtin": False, + "UserGenerated": True, + "Targets": [{"JobId": 10093, "Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}], + "Params": [{"JobId": 10093, "Key": "metricType", "Value": "40, 50"}], + "LastRunStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2060, "Name": "Completed"}, + "JobType": {"@odata.type": "#JobService.JobType", "Id": 6, "Name": "Health_Task", "Internal": False}, + "JobStatus": {"@odata.type": "#JobService.JobStatus", "Id": 2020, "Name": "Scheduled"}, + "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(10093)/ExecutionHistories", + "LastExecutionDetail": {"@odata.id": "/api/JobService/Jobs(10093)/LastExecutionDetail"} + }, + "job_payload": {"Id": 0, + "JobName": "Global Health Task", + "JobDescription": "Global Health Task", + "Schedule": None, + "State": "Enabled", + "JobType": {"Id": 6, "Name": "Health_Task"}, + "Params": [{"Key": "metricType", "Value": "40, 50"}], + "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}, + "cp_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, ]}, + "payload_dict": + {"DATA_PURGE_INTERVAL": {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "TRAP_FORWARDING_SETTING": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": ""}, + "MX7000_ONBOARDING_PREF": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": ""}, + "REPORTS_MAX_RESULTS_LIMIT": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": ""}, + "EMAIL_SENDER": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": ""}, + "DISCOVERY_APPROVAL_POLICY": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DISCOVERY_APPROVAL_POLICY", + "DefaultValue": "Automatic", + "Value": "Automatic", + "DataType": "java.lang.String", + "GroupName": ""}, + "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION", + "DefaultValue": "false", + "Value": "true", + "DataType": "java.lang.Boolean", + "GroupName": ""}, + "DEVICE_PREFERRED_NAME": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DEVICE_PREFERRED_NAME", + "DefaultValue": "HOST_NAME", + "Value": "PREFER_DNS,PREFER_IDRAC_HOSTNAME", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "INVALID_DEVICE_HOSTNAME": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "INVALID_DEVICE_HOSTNAME", + "DefaultValue": "", + "Value": "localhost", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "COMMON_MAC_ADDRESSES": + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "COMMON_MAC_ADDRESSES", + "DefaultValue": "", + "Value": "::", + "DataType": "java.lang.String", + "GroupName": "DISCOVERY_SETTING"}, + "MIN_PROTOCOL_VERSION": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MIN_PROTOCOL_VERSION", + "DefaultValue": "V2", + "Value": "V2", + "DataType": "java.lang.String", + "GroupName": "CIFS_PROTOCOL_SETTINGS"}, + "CONSOLE_CONNECTION_SETTING": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "CONSOLE_CONNECTION_SETTING", + "DefaultValue": "last_known", + "Value": "last_known", + "DataType": "java.lang.String", + "GroupName": "CONSOLE_CONNECTION_SETTING"}, + "SHARE_TYPE": { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "SHARE_TYPE", + "DefaultValue": "CIFS", + "Value": "CIFS", + "DataType": "java.lang.String", + "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"}}, + "payload": + {"ConsoleSetting": + [ + { + "@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }]}, + "cifs_payload": + {"ConsoleSetting": []}, + "module_args": {"metrics_collection_settings": 300}, + "json_data": {"value": [{"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "DATA_PURGE_INTERVAL", + "DefaultValue": "365", + "Value": "361", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "TRAP_FORWARDING_SETTING", + "DefaultValue": "AsIs", + "Value": "Normalized", + "DataType": "java.lang.String", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "MX7000_ONBOARDING_PREF", + "DefaultValue": "all", + "Value": "all", + "DataType": "java.lang.String", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "REPORTS_MAX_RESULTS_LIMIT", + "DefaultValue": "0", + "Value": "123", + "DataType": "java.lang.Integer", + "GroupName": "" + }, + {"@odata.type": "#ApplicationService.ConsoleSetting", + "Name": "EMAIL_SENDER", + "DefaultValue": "omcadmin@dell.com", + "Value": "admin@dell.com", + "DataType": "java.lang.String", + "GroupName": "" + }, ]}, }]) + def test_module_success(self, mocker, params, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + # ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + mocker.patch(MODULE_PATH + 'job_details', return_value=params["job_details"]) + mocker.patch(MODULE_PATH + 'create_job', return_value=(None, None)) + mocker.patch(MODULE_PATH + 'fetch_cp_settings', return_value=params["cp_data"]["value"]) + mocker.patch(MODULE_PATH + 'create_payload', return_value=(params["payload"], params["payload_dict"])) + mocker.patch(MODULE_PATH + 'create_cifs_payload', return_value=params["cifs_payload"]) + mocker.patch(MODULE_PATH + '_diff_payload', return_value=1) + # mocker.patch(MODULE_PATH + 'update_payload', return_value=update_json_data) + # mocker.patch(MODULE_PATH + '_diff_payload', return_value=1) + result = self._run_module(ome_default_args) + assert result["msg"] == SUCCESS_MSG + + @pytest.mark.parametrize("exc_type", [HTTPError, URLError]) + def test_cp_main_exception_case(self, mocker, exc_type, ome_connection_mock_for_application_console_preferences, + ome_response_mock, ome_default_args): + ome_default_args.update({"device_health": {"health_check_interval": 65, + "health_check_interval_unit": "Minutes"}}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + '_validate_params', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + '_validate_params', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + '_validate_params', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py new file mode 100644 index 00000000..3938184e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py @@ -0,0 +1,425 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest +from io import StringIO +from ssl import SSLError +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_address +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_mock_for_application_network_address(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_application_network_address.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeAppNetwork(FakeAnsibleModule): + module = ome_application_network_address + + inp_param = { + "hostname": "192.1.2.3", + "password": "password", + "port": 443, + "username": "root", + "enable_nic": True, + "interface_name": "eth0", + "dns_configuration": {"dns_domain_name": "localdomain", "dns_name": "openmanage-enterprise", + "register_with_dns": False, + "use_dhcp_for_dns_domain_name": False}, + "ipv4_configuration": {"enable": True, "enable_dhcp": True, "use_dhcp_for_dns_server_names": True, + "static_ip_address": "192.168.11.20", "static_subnet_mask": "255.255.255.0", + "static_gateway": "192.168.11.1", "static_preferred_dns_server": "192.168.11.2", + "static_alternate_dns_server": "192.168.11.3"}, + "ipv6_configuration": {"enable": True, "enable_auto_configuration": True, + "static_alternate_dns_server": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121", + "static_gateway": "0000::ffff", + "static_ip_address": "2607:f2b1:f081:9:1c8c:f1c7:47e:f120", + "static_preferred_dns_server": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "static_prefix_length": 0, "use_dhcp_for_dns_server_names": True}, + "management_vlan": {"enable_vlan": False, "vlan_id": 0}, + "reboot_delay": 1} + inp_param1 = { + "hostname": "192.1.2.3", + "password": "password", + "port": 443, + "username": "root", + "enable_nic": False + } + out_param = {"EnableNIC": False, + "InterfaceName": "eth0", + "PrimaryInterface": True, + "Ipv4Configuration": {"Enable": True, "EnableDHCP": True, "StaticIPAddress": "192.168.11.20", + "StaticSubnetMask": "255.255.255.0", "StaticGateway": "192.168.11.1", + "UseDHCPForDNSServerNames": True, "StaticPreferredDNSServer": "192.168.11.2", + "StaticAlternateDNSServer": "192.168.11.3"}, + "Ipv6Configuration": {"Enable": True, "EnableAutoConfiguration": True, + "StaticIPAddress": "2607:f2b1:f081:9:1c8c:f1c7:47e:f120", + "StaticPrefixLength": 0, "StaticGateway": "0000::ffff", + "UseDHCPForDNSServerNames": True, + "StaticPreferredDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "StaticAlternateDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121"}, + "ManagementVLAN": {"EnableVLAN": False, "Id": 0}, + "DnsConfiguration": {"RegisterWithDNS": False, "DnsName": "openmanage-enterprise", + "UseDHCPForDNSDomainName": False, "DnsDomainName": "localdomain"}, + "Delay": 0 + } + + @pytest.mark.parametrize("addr_param", [{"in": inp_param, "out": out_param}, + {"in": inp_param1, "out": out_param}]) + def test_ome_application_network_address_main_success_case_01(self, mocker, ome_default_args, addr_param, + ome_connection_mock_for_application_network_address, + ome_response_mock): + IP_CONFIG = "ApplicationService/Network/AddressConfiguration" + JOB_IP_CONFIG = "ApplicationService/Network/AdapterConfigurations" + POST_IP_CONFIG = "ApplicationService/Actions/Network.ConfigureNetworkAdapter" + ome_default_args.update(addr_param["in"]) + ipv4 = {"Enable": True, "EnableDHCP": True, "StaticIPAddress": "192.168.11.20", + "StaticSubnetMask": "255.255.255.0", "StaticGateway": "192.168.11.1", + "UseDHCPForDNSServerNames": True, "StaticPreferredDNSServer": "192.168.11.1", + "StaticAlternateDNSServer": ""} + ipv6 = {"Enable": False, "EnableAutoConfiguration": True, "StaticIPAddress": "", + "StaticPrefixLength": 0, "StaticGateway": "", "UseDHCPForDNSServerNames": True, + "StaticPreferredDNSServer": "", "StaticAlternateDNSServer": ""} + dns = {"RegisterWithDNS": False, "DnsName": "openmanage-enterprise", + "UseDHCPForDNSDomainName": False, "DnsDomainName": "localdomain"} + vlan = {"EnableVLAN": False, "Id": 1} + mocker.patch(MODULE_PATH + "ome_application_network_address.validate_input") + mocker.patch(MODULE_PATH + "ome_application_network_address.get_payload", + return_value=(ipv4, ipv6, dns, vlan)) + mocker.patch(MODULE_PATH + "ome_application_network_address.get_updated_payload", + return_value=(addr_param["out"], "PUT", IP_CONFIG)) + ome_response_mock.json_data = addr_param["out"] + ome_response_mock.success = True + mresult = self.execute_module(ome_default_args) + assert mresult['changed'] is True + assert "msg" in mresult + assert "network_configuration" in mresult and mresult["network_configuration"] == addr_param["out"] + assert mresult["msg"] == "Successfully triggered task to update network address configuration." + + @pytest.mark.parametrize("addr_param", [{"in": inp_param, "out": out_param}]) + def test_ome_application_network_address_main_success_case_02(self, mocker, ome_default_args, addr_param, + ome_connection_mock_for_application_network_address, + ome_response_mock): + POST_IP_CONFIG = "ApplicationService/Actions/Network.ConfigureNetworkAdapter" + ome_default_args.update(addr_param["in"]) + ipv4 = {"Enable": True, "EnableDHCP": True, "StaticIPAddress": "192.168.11.20", + "StaticSubnetMask": "255.255.255.0", "StaticGateway": "192.168.11.1", + "UseDHCPForDNSServerNames": True, "StaticPreferredDNSServer": "192.168.11.1", + "StaticAlternateDNSServer": ""} + ipv6 = {"Enable": False, "EnableAutoConfiguration": True, "StaticIPAddress": "", + "StaticPrefixLength": 0, "StaticGateway": "", "UseDHCPForDNSServerNames": True, + "StaticPreferredDNSServer": "", "StaticAlternateDNSServer": ""} + dns = {"RegisterWithDNS": False, "DnsName": "openmanage-enterprise", + "UseDHCPForDNSDomainName": False, "DnsDomainName": "localdomain"} + vlan = {"EnableVLAN": False, "Id": 1} + mocker.patch(MODULE_PATH + "ome_application_network_address.validate_input") + mocker.patch(MODULE_PATH + "ome_application_network_address.get_payload", + return_value=(ipv4, ipv6, dns, vlan)) + mocker.patch(MODULE_PATH + "ome_application_network_address.get_updated_payload", + return_value=(addr_param["out"], "POST", POST_IP_CONFIG)) + ome_response_mock.json_data = addr_param["out"] + ome_response_mock.success = True + mresult = self.execute_module(ome_default_args) + assert mresult['changed'] is True + assert "msg" in mresult + assert "network_configuration" in mresult and mresult["network_configuration"] == addr_param["out"] + assert mresult["msg"] == "Successfully triggered job to update network address configuration." + + @pytest.mark.parametrize("addr_param", [{"in": inp_param, "out": out_param}]) + def test_get_payload(self, addr_param, ome_default_args): + ome_default_args.update(addr_param["in"]) + f_module = self.get_module_mock(params=addr_param["in"]) + ipv4_payload, ipv6_payload, dns_payload, vlan_payload = self.module.get_payload(f_module) + assert ipv4_payload == addr_param["out"]["Ipv4Configuration"] + assert ipv6_payload == addr_param["out"]["Ipv6Configuration"] + assert dns_payload == addr_param["out"]["DnsConfiguration"] + assert vlan_payload == addr_param["out"]["ManagementVLAN"] + + @pytest.mark.parametrize("addr_param", [{"in": inp_param, "out": out_param}]) + def test_get_updated_payload(self, mocker, ome_default_args, addr_param, + ome_connection_mock_for_application_network_address, + ome_response_mock): + ome_default_args.update(addr_param["in"]) + f_module = self.get_module_mock(params=addr_param["in"]) + ome_response_mock.json_data = {"value": [addr_param["out"]]} + ipv4 = {"Enable": True, "EnableDHCP": True, "StaticIPAddress": "192.168.11.20", + "StaticSubnetMask": "255.255.255.0", "StaticGateway": "192.168.11.1", + "UseDHCPForDNSServerNames": True, "StaticPreferredDNSServer": "192.168.11.2", + "StaticAlternateDNSServer": "192.168.11.3"} + ipv6 = {"Enable": True, "EnableAutoConfiguration": False, + "StaticIPAddress": "2607:f2b1:f081:9:1c8c:f1c7:47e:f12", + "StaticPrefixLength": 0, "StaticGateway": "0000::ffff", "UseDHCPForDNSServerNames": True, + "StaticPreferredDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "StaticAlternateDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f12"} + dns = {"RegisterWithDNS": False, "DnsName": "openmanage-enterprise", + "UseDHCPForDNSDomainName": False, "DnsDomainName": "localdomain"} + vlan = {"EnableVLAN": False, "Id": 1} + current_setting, method, uri = self.module.get_updated_payload( + ome_connection_mock_for_application_network_address, f_module, ipv4, ipv6, dns, vlan) + assert current_setting == addr_param["out"] + + def test_get_updated_payload_when_same_setting_failure_case(self, ome_default_args, + ome_connection_mock_for_application_network_address, + ome_response_mock): + ipv4 = {"Enable": True, "EnableDHCP": True, "StaticIPAddress": "192.168.11.20", + "StaticSubnetMask": "255.255.255.0", "StaticGateway": "192.168.11.1", + "UseDHCPForDNSServerNames": True, "StaticPreferredDNSServer": "192.168.11.2", + "StaticAlternateDNSServer": "192.168.11.3"} + ipv6 = {"Enable": True, "EnableAutoConfiguration": True, + "StaticIPAddress": "2607:f2b1:f081:9:1c8c:f1c7:47e:f120", + "StaticPrefixLength": 0, "StaticGateway": "0000::ffff", "UseDHCPForDNSServerNames": True, + "StaticPreferredDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "StaticAlternateDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121"} + dns = {"RegisterWithDNS": False, "DnsName": "openmanage-enterprise", + "UseDHCPForDNSDomainName": False, "DnsDomainName": "localdomain"} + vlan = {"EnableVLAN": False, "Id": 1} + current_setting = {"value": [{ + "@odata.context": "/api/$metadata#Network.AddressConfiguration/$entity", + "@odata.type": "#Network.AddressConfiguration", + "@odata.id": "/api/ApplicationService/Network/AddressConfiguration", + "EnableNIC": True, + "InterfaceName": "eth0", + "PrimaryInterface": True, + "Ipv4Configuration": ipv4, + "Ipv6Configuration": ipv6, + "DnsConfiguration": dns, + "ManagementVLAN": vlan, + "Delay": 0 + }]} + ome_default_args.update({"enable_nic": True, "interface_name": "eth0"}) + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = current_setting + error_message = "No changes found to be applied." + with pytest.raises(Exception, match=error_message) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_address, f_module, ipv4, ipv6, + dns, vlan) + + @pytest.mark.parametrize("addr_param", + [{"in": inp_param["ipv4_configuration"], "out": out_param["Ipv4Configuration"]}, + {"in": {"enable": True, "enable_auto_configuration": True, + "static_alternate_dns_server": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121", + "static_gateway": "0000::ffff", + "static_ip_address": "2607:f2b1:f081:9:1c8c:f1c7:47e:f120", + "static_preferred_dns_server": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "static_prefix_length": 0, "use_dhcp_for_dns_server_names": True}, + "out": {"Enable": True, "EnableAutoConfiguration": True, + "StaticIPAddress": "2607:f2b1:f081:9:1c8c:f1c7:47e:f120", + "StaticPrefixLength": 0, "StaticGateway": "0000::ffff", + "UseDHCPForDNSServerNames": True, + "StaticPreferredDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "StaticAlternateDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121"}}, + {"in": inp_param["dns_configuration"], "out": out_param["DnsConfiguration"]}, + {"in": None, "out": None}]) + def test_format_payload(self, addr_param): + result = self.module.format_payload(addr_param["in"]) + assert result == addr_param["out"] + + @pytest.mark.parametrize("addr_param", [{"in": inp_param}, + {"in": {"dns_configuration": {"register_with_dns": True}}}, + {"in": {"management_vlan": {"enable_vlan": True}}} + ]) + def test_validate_input_success(self, addr_param): + f_module = self.get_module_mock(params=addr_param["in"]) + self.module.validate_input(f_module) + + def _test_validate_input_fail1(self, ome_default_args): + ome_default_args.update( + {"management_vlan": {"enable_vlan": True}, "dns_configuration": {"register_with_dns": True}}) + f_module = self.get_module_mock(params=ome_default_args) + error_message = "The vLAN settings cannot be updated if the 'register_with_dns' is true. " \ + "The 'register_with_dns' cannot be updated if vLAN settings change." + with pytest.raises(Exception, match=error_message) as err: + self.module.validate_input(f_module) + + def test_validate_input_fail2(self, ome_default_args): + ome_default_args.update({"reboot_delay": -1}) + f_module = self.get_module_mock(params=ome_default_args) + error_message = "Invalid value provided for 'reboot_delay'" + with pytest.raises(Exception, match=error_message) as err: + self.module.validate_input(f_module) + + @pytest.mark.parametrize("addr_param", [{"in": "192.168.0.5", "out": True}, + {"in": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121", "out": False}]) + def test_validate_ip_address(self, addr_param): + ret_val = self.module.validate_ip_address(addr_param["in"]) + assert ret_val == addr_param["out"] + + @pytest.mark.parametrize("addr_param", [{"in": "192.168.0.5", "out": False}, + {"in": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121", "out": True}]) + def test_validate_ip_v6_address(self, addr_param): + ret_val = self.module.validate_ip_v6_address(addr_param["in"]) + assert ret_val == addr_param["out"] + + src_dict1 = {"Enable": False, "EnableDHCP": True, "UseDHCPForDNSServerNames": False, + "StaticGateway": "192.168.11.2", + "StaticIPAddress": "192.168.11.20", "StaticSubnetMask": "255.255.255.0", + "StaticPreferredDNSServer": "192.168.11.3", "EnableAutoConfiguration": True} + new_dict1 = {"Enable": True, "EnableDHCP": False, "StaticGateway": "192.168.11.1", + "UseDHCPForDNSServerNames": True, "StaticPreferredDNSServer": "192.168.11.2", + "StaticAlternateDNSServer": "192.168.11.3"} + src_dict2 = {"StaticIPAddress": "192.168.11.20", "StaticSubnetMask": "255.255.255.0", + "EnableAutoConfiguration": False} + new_dict2 = {"StaticIPAddress": "192.168.11.20", "StaticSubnetMask": "255.255.255.0"} + + @pytest.mark.parametrize("addr_param", [{"src_dict": src_dict1, "new_dict": new_dict1, 'diff': 4}, + {"src_dict": src_dict2, "new_dict": new_dict2, 'diff': False}, + {"src_dict": src_dict2, "new_dict": {}, 'diff': 0}, + {"src_dict": src_dict2, "new_dict": {"EnableDHCP": None}, 'diff': 0} + ]) + def test_update_ipv4_payload(self, addr_param): + ret_val = self.module.update_ipv4_payload(addr_param["src_dict"], addr_param["new_dict"]) + assert ret_val == addr_param['diff'] + + v6src_dict1 = {"Enable": False, "UseDHCPForDNSServerNames": False, + "StaticGateway": "192.168.11.2", + "StaticIPAddress": "192.168.11.20", "StaticSubnetMask": "255.255.255.0", + "StaticPreferredDNSServer": "192.168.11.3", "EnableAutoConfiguration": False} + v6new_dict1 = {"Enable": True, "EnableAutoConfiguration": True, "StaticGateway": "192.168.11.1", + "UseDHCPForDNSServerNames": True, "StaticPreferredDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "StaticAlternateDNSServer": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121"} + + @pytest.mark.parametrize("addr_param", [{"src_dict": v6src_dict1, "new_dict": v6new_dict1, 'diff': 3}, + {"src_dict": v6src_dict1, "new_dict": {}, 'diff': 0}]) + def test_update_ipv6_payload(self, addr_param): + ret_val = self.module.update_ipv6_payload(addr_param["src_dict"], addr_param["new_dict"]) + assert ret_val == addr_param['diff'] + + dns_src = {"RegisterWithDNS": False, "DnsName": "openmanage-enterprise", + "UseDHCPForDNSDomainName": False, "DnsDomainName": "localdomain"} + dns_new = {"RegisterWithDNS": True, "DnsName": "openmanage-enterprise1", + "UseDHCPForDNSDomainName": True, "DnsDomainName": "localdomain1"} + + @pytest.mark.parametrize("addr_param", [{"src_dict": dns_src, "new_dict": dns_new, 'diff': 3}, + {"src_dict": dns_src, "new_dict": {}, 'diff': 0}, + {"src_dict": dns_src, "new_dict": {"RegisterWithDNS": None, + "UseDHCPForDNSDomainName": None}, + 'diff': 0}]) + def test_update_dns_payload(self, addr_param): + ret_val = self.module.update_dns_payload(addr_param["src_dict"], addr_param["new_dict"]) + assert ret_val == addr_param['diff'] + + vlan_src = {"EnableVLAN": False, "Id": 0} + vlan_new = {"EnableVLAN": True, "Id": 1} + + @pytest.mark.parametrize("addr_param", [{"src_dict": vlan_src, "new_dict": vlan_new, 'diff': 2}, + {"src_dict": vlan_src, "new_dict": {}, 'diff': 0}, + {"src_dict": vlan_src, "new_dict": {"EnableVLAN": None}, 'diff': 0}]) + def test_update_vlan_payload(self, addr_param): + ret_val = self.module.update_vlan_payload(addr_param["src_dict"], addr_param["new_dict"]) + assert ret_val == addr_param['diff'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_application_network_address_main_success_failure_case1(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_application_network_address, + ome_response_mock): + ome_default_args.update({"dns_configuration": {"dns_domain_name": "localdomain"}, + "ipv4_configuration": {"enable": True, "enable_dhcp": True}, + "ipv6_configuration": {"enable": False, "enable_auto_configuration": True}}) + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'ome_application_network_address.validate_input', + side_effect=exc_type("url open error")) + ome_default_args.update({"dns_configuration": {"dns_domain_name": "localdomain"}, + "ipv4_configuration": {"enable": True, "enable_dhcp": True}, + "ipv6_configuration": {"enable": False, "enable_auto_configuration": True}}) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'ome_application_network_address.validate_input', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'ome_application_network_address.validate_input', + side_effect=exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'network_configuration' not in result + assert 'msg' in result + + def test_get_network_config_data_case_01(self, ome_connection_mock_for_application_network_address, + ome_response_mock): + param = {} + ome_response_mock.json_data = {"value": [{"PrimaryInterface": "val1"}]} + f_module = self.get_module_mock(params=param) + nt_adp, method, POST_IP_CONFIG = self.module.get_network_config_data( + ome_connection_mock_for_application_network_address, f_module) + assert nt_adp == {'PrimaryInterface': 'val1'} + assert method == "POST" + assert POST_IP_CONFIG == "ApplicationService/Actions/Network.ConfigureNetworkAdapter" + + def test_get_network_config_data_case_02(self, ome_connection_mock_for_application_network_address, + ome_response_mock): + param = {"interface_name": "val1"} + ome_response_mock.json_data = {"value": [{"InterfaceName": "val1"}]} + f_module = self.get_module_mock(params=param) + nt_adp, method, POST_IP_CONFIG = self.module.get_network_config_data( + ome_connection_mock_for_application_network_address, f_module) + assert nt_adp == {'InterfaceName': 'val1'} + assert method == "POST" + assert POST_IP_CONFIG == "ApplicationService/Actions/Network.ConfigureNetworkAdapter" + + def test_get_network_config_data_case_03(self, ome_connection_mock_for_application_network_address, + ome_response_mock): + + param = {"interface_name": "interface_name"} + ome_response_mock.json_data = {"value": [{"InterfaceName": "val2", "PrimaryInterface": "val3"}]} + f_module = self.get_module_mock(params=param) + nt_adp, method, POST_IP_CONFIG = self.module.get_network_config_data( + ome_connection_mock_for_application_network_address, f_module) + assert nt_adp == "val3" + assert method == "POST" + assert POST_IP_CONFIG == "ApplicationService/Actions/Network.ConfigureNetworkAdapter" + + def test_get_network_config_data_case_03(self, ome_connection_mock_for_application_network_address, + ome_response_mock): + param = {} + ome_response_mock.json_data = {"value": []} + f_module = self.get_module_mock(params=param) + nt_adp, method, POST_IP_CONFIG = self.module.get_network_config_data( + ome_connection_mock_for_application_network_address, f_module) + assert nt_adp is None + assert method == "POST" + assert POST_IP_CONFIG == "ApplicationService/Actions/Network.ConfigureNetworkAdapter" + + def test_get_network_config_data_exception_case_01(self, ome_connection_mock_for_application_network_address, + ome_response_mock): + param = {"interface_name": "interface_name_val"} + ome_response_mock.json_data = {"value": []} + f_module = self.get_module_mock(params=param) + msg = "The 'interface_name' value provided interface_name_val is invalid" + with pytest.raises(Exception) as exc: + self.module.get_network_config_data(ome_connection_mock_for_application_network_address, f_module) + assert exc.value.args[0] == msg + + def test_get_network_config_data_exception_case_02(self, ome_connection_mock_for_application_network_address): + param = {} + msg = "exception message" + ome_connection_mock_for_application_network_address.invoke_request.side_effect = Exception("exception message") + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception, match=msg): + self.module.get_network_config_data( + ome_connection_mock_for_application_network_address, f_module) diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py new file mode 100644 index 00000000..f4d32fcd --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json + +import pytest +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_proxy +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' +CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied." +CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No Changes found to be applied." + + +@pytest.fixture +def ome_connection_mock_for_application_network_proxy(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_application_network_proxy.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + ome_connection_mock_obj.get_all_report_details.return_value = {"report_list": []} + return ome_connection_mock_obj + + +class TestOmeTemplate(FakeAnsibleModule): + module = ome_application_network_proxy + + sub_param1 = {"enable_proxy": True, "ip_address": "255.0.0.0", "proxy_port": 443, "proxy_username": "username", + "proxy_password": "password", + "enable_authentication": True} + sub_param2 = {"enable_proxy": False} + + @pytest.mark.parametrize("sub_param", [sub_param1, sub_param2]) + def test_ome_application_network_proxy_main_success_case_01(self, mocker, ome_default_args, sub_param, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + ome_default_args.update(sub_param) + mocker.patch(MODULE_PATH + "ome_application_network_proxy.get_payload", return_value={"key": "val"}) + mocker.patch(MODULE_PATH + "ome_application_network_proxy.get_updated_payload", return_value={"key": "val"}) + ome_response_mock.json_data = {"EnableProxy": True, "IpAddress": "255.0.0.0", "PortNumber": 443, + "Username": "username", "Password": "password", "EnableAuthentication": True} + result = self.execute_module(ome_default_args) + assert result['changed'] is True + assert "msg" in result + assert "proxy_configuration" in result and result["proxy_configuration"] == {"EnableProxy": True, + "IpAddress": "255.0.0.0", + "PortNumber": 443, + "Username": "username", + "Password": "password", + "EnableAuthentication": True} + assert result["msg"] == "Successfully updated network proxy configuration." + + sub_param1 = {"param": {"enable_proxy": True, "ip_address": "255.0.0.0"}, + "msg": 'enable_proxy is True but all of the following are missing: proxy_port'} + sub_param2 = {"param": {"enable_proxy": True, "proxy_port": 443}, + "msg": 'enable_proxy is True but all of the following are missing: ip_address'} + sub_param3 = {"param": {"enable_proxy": True}, + "msg": 'enable_proxy is True but all of the following are missing: ip_address, proxy_port'} + sub_param4 = {"param": {}, "msg": 'missing required arguments: enable_proxy'} + + @pytest.mark.parametrize("param", [sub_param1, sub_param2, sub_param3, sub_param4]) + def test_ome_application_network_proxy_main_failure_case_01(self, mocker, ome_default_args, param, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + sub_param = param["param"] + msg = param["msg"] + ome_default_args.update(sub_param) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == msg + assert "proxy_configuration" not in result + assert result["failed"] is True + + sub_param1 = { + "param": {"enable_proxy": True, "proxy_port": 443, "ip_address": "255.0.0.0", "enable_authentication": True, + "proxy_username": "255.0.0.0"}, + "msg": 'enable_authentication is True but all of the following are missing: proxy_password'} + sub_param2 = { + "param": {"enable_proxy": True, "proxy_port": 443, "ip_address": "255.0.0.0", "enable_authentication": True, + "proxy_password": 443}, + "msg": 'enable_authentication is True but all of the following are missing: proxy_username'} + sub_param3 = { + "param": {"enable_proxy": True, "proxy_port": 443, "ip_address": "255.0.0.0", "enable_authentication": True}, + "msg": 'enable_authentication is True but all of the following are missing: proxy_username, proxy_password'} + + @pytest.mark.parametrize("param", [sub_param1, sub_param2, sub_param3]) + def test_ome_application_network_proxy_main_failure_case_02(self, mocker, ome_default_args, param, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + sub_param = param["param"] + msg = param["msg"] + ome_default_args.update(sub_param) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == msg + assert "proxy_configuration" not in result + assert result["failed"] is True + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_application_network_proxy_main_success_failure_case3(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + ome_default_args.update({"enable_proxy": False}) + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'ome_application_network_proxy.get_payload', + side_effect=exc_type("TEST")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'ome_application_network_proxy.get_payload', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'ome_application_network_proxy.get_payload', + side_effect=exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'proxy_configuration' not in result + assert 'msg' in result + + def test_remove_unwanted_keys(self, ome_default_args): + removable_keys = list(ome_default_args.keys()) + new_param = { + "ip_address": "IpAddress", + "proxy_port": "PortNumber", + "enable_proxy": "EnableProxy", + "proxy_username": "Username", + "proxy_password": "Password", + "enable_authentication": "EnableAuthentication" + } + ome_default_args.update(new_param) + self.module.remove_unwanted_keys(removable_keys, ome_default_args) + assert len(set(new_param.keys()) - set(ome_default_args.keys())) == 0 + + def test_remove_unwanted_keys_case2(self): + """when key not exists should not throw error""" + current_setting = {"@odata.context": "context", "@odata.type": "data_type", "@odata.id": "@odata.id"} + removable_keys = ["@odata.context", "@odata.type", "@odata.id", "Password"] + self.module.remove_unwanted_keys(removable_keys, current_setting) + assert len(current_setting) == 0 + + def test_get_payload(self, ome_default_args): + new_param = { + "ip_address": "192.168.0.2", + "proxy_port": 443, + "enable_proxy": True, + "proxy_username": "username", + "proxy_password": "password", + "enable_authentication": False, + "port": 443 + } + ome_default_args.update(new_param) + f_module = self.get_module_mock(params=ome_default_args) + payload = self.module.get_payload(f_module) + assert ome_default_args == {"ip_address": "192.168.0.2", + "proxy_port": 443, + "enable_proxy": True, + "proxy_username": "username", + "proxy_password": "password", + "enable_authentication": False, + "hostname": "192.168.0.1", + "username": "username", + "password": "password", + "port": 443, + "ca_path": "/path/ca_bundle"} + assert payload == {"EnableProxy": True, "IpAddress": "192.168.0.2", "PortNumber": 443, "Username": "username", + "Password": "password", "EnableAuthentication": False} + + def test_get_updated_payload_success_case(self, mocker, ome_default_args, ome_connection_mock_for_application_network_proxy, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.ProxyConfiguration", + "@odata.type": "#Network.ProxyConfiguration", + "@odata.id": "/api/ApplicationService/Network/ProxyConfiguration", "IpAddress": "255.0.0.0", + "PortNumber": 443, "EnableAuthentication": False, "EnableProxy": True, + "Username": "username1", "Password": "password1"} + payload = {"EnableAuthentication": True, "IpAddress": "192.168.0.1", "PortNumber": 443, 'EnableProxy': True, + 'Username': 'username2', "Password": "password2"} + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = current_setting + mocker.patch(MODULE_PATH + "ome_application_network_proxy.validate_check_mode_for_network_proxy", + return_value=None) + setting = self.module.get_updated_payload(ome_connection_mock_for_application_network_proxy, f_module, payload) + assert setting == payload + + def test_get_updated_payload_enable_auth_disable_success_case(self, mocker, ome_default_args, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + """when EnableAuthentication is False setting will not have Password and UserName even if its passed""" + ome_default_args.update( + {"enable_authentication": False, "proxy_username": 'username2', "proxy_password": "password2"}) + current_setting = {"@odata.context": "/api/$metadata#Network.ProxyConfiguration", + "@odata.type": "#Network.ProxyConfiguration", + "@odata.id": "/api/ApplicationService/Network/ProxyConfiguration", "IpAddress": "255.0.0.0", + "PortNumber": 443, "EnableAuthentication": True, "EnableProxy": True, + "Username": "username1", "Password": "password1"} + payload = {"EnableAuthentication": False, "IpAddress": "192.168.0.1", "PortNumber": 443, 'EnableProxy': True, + 'Username': 'username2', "Password": "password2"} + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = current_setting + mocker.patch(MODULE_PATH + "ome_application_network_proxy.validate_check_mode_for_network_proxy", + return_value=None) + setting = self.module.get_updated_payload(ome_connection_mock_for_application_network_proxy, f_module, payload) + assert setting == {"EnableAuthentication": False, "IpAddress": "192.168.0.1", "PortNumber": 443, + 'EnableProxy': True} + + def test_get_updated_payload_when_same_setting_failure_case1(self, mocker, ome_default_args, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.ProxyConfiguration", + "@odata.type": "#Network.ProxyConfiguration", + "@odata.id": "/api/ApplicationService/Network/ProxyConfiguration", "IpAddress": "255.0.0.0", + "PortNumber": 443, "EnableAuthentication": False, "EnableProxy": True, + "Username": "username", "Password": "password"} + payload = {"IpAddress": "255.0.0.0", "PortNumber": 443, "EnableAuthentication": False, "EnableProxy": True, + "Username": "username", "Password": "password"} + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = current_setting + error_message = "No changes made to proxy configuration as entered values are the same as current " \ + "configuration values." + mocker.patch(MODULE_PATH + "ome_application_network_proxy.validate_check_mode_for_network_proxy", + return_value=None) + with pytest.raises(Exception, match=error_message) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_proxy, f_module, payload) + + def test_get_updated_payload_when_same_setting_failure_case2(self, mocker, ome_default_args, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + """Password are ignored for difference check in payload""" + current_setting = {"@odata.context": "/api/$metadata#Network.ProxyConfiguration", + "@odata.type": "#Network.ProxyConfiguration", + "@odata.id": "/api/ApplicationService/Network/ProxyConfiguration", "IpAddress": "255.0.0.0", + "PortNumber": 443, "EnableAuthentication": False, "EnableProxy": True, + "Username": "username", "Password": "password1"} + payload = {"IpAddress": "255.0.0.0", "PortNumber": 443, "EnableAuthentication": False, "EnableProxy": True, + "Username": "username", "Password": "password2"} + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = current_setting + error_message = "No changes made to proxy configuration as entered values are the same as current " \ + "configuration values." + mocker.patch(MODULE_PATH + "ome_application_network_proxy.validate_check_mode_for_network_proxy", + return_value=None) + with pytest.raises(Exception, match=error_message) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_proxy, f_module, payload) + + def test_get_updated_payload_when_no_diff_failure_case(self, mocker, ome_default_args, + ome_connection_mock_for_application_network_proxy, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.ProxyConfiguration", + "@odata.type": "#Network.ProxyConfiguration", + "@odata.id": "/api/ApplicationService/Network/ProxyConfiguration", "IpAddress": "255.0.0.0", + "PortNumber": 443, "EnableAuthentication": False, "EnableProxy": True, + "Username": "username", "Password": "password"} + payload = {} + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = current_setting + error_message = "Unable to configure the proxy because proxy configuration settings are not provided." + mocker.patch(MODULE_PATH + "ome_application_network_proxy.validate_check_mode_for_network_proxy", + return_value=None) + with pytest.raises(Exception, match=error_message) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_proxy, f_module, payload) + + def test_validate_check_mode_for_network_proxy_case01(self, ome_default_args): + f_module = self.get_module_mock(params={}, check_mode=True) + with pytest.raises(Exception, match=CHECK_MODE_CHANGE_FOUND_MSG): + self.module.validate_check_mode_for_network_proxy(True, f_module) + + def test_validate_check_mode_for_network_proxy_case02(self, ome_default_args): + f_module = self.get_module_mock(params={}, check_mode=True) + with pytest.raises(Exception, match=CHECK_MODE_CHANGE_NOT_FOUND_MSG): + self.module.validate_check_mode_for_network_proxy(False, f_module) + + def test_validate_check_mode_for_network_proxy_case03(self, ome_default_args): + f_module = self.get_module_mock(params={}, check_mode=False) + self.module.validate_check_mode_for_network_proxy(True, f_module) + + def test_validate_check_mode_for_network_proxy_case04(self, ome_default_args): + f_module = self.get_module_mock(params={}, check_mode=False) + self.module.validate_check_mode_for_network_proxy(False, f_module) diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py new file mode 100644 index 00000000..0cd91a7f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py @@ -0,0 +1,381 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.4.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json + +import pytest +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_settings +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +SUCCESS_MSG = "Successfully updated the session timeout settings." +NO_CHANGES = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_application_network_settings.' +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.ome.' + + +@pytest.fixture +def ome_connection_mock_for_ns(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeApplicationNetworkSettings(FakeAnsibleModule): + module = ome_application_network_settings + + responseData = { + "value": [ + { + "@odata.type": "#SessionService.SessionConfiguration", + "SessionType": "GUI", + "MaxSessions": 5, + "SessionTimeout": 1380000, + "MinSessionTimeout": 60000, + "MaxSessionTimeout": 86400000, + "MinSessionsAllowed": 1, + "MaxSessionsAllowed": 100, + "MaxSessionsConfigurable": True, + "SessionTimeoutConfigurable": True + }, + { + "@odata.type": "#SessionService.SessionConfiguration", + "SessionType": "API", + "MaxSessions": 100, + "SessionTimeout": 1380000, + "MinSessionTimeout": 60000, + "MaxSessionTimeout": 86400000, + "MinSessionsAllowed": 1, + "MaxSessionsAllowed": 100, + "MaxSessionsConfigurable": True, + "SessionTimeoutConfigurable": True + }, + { + "@odata.type": "#SessionService.SessionConfiguration", + "SessionType": "UniversalTimeout", + "MaxSessions": 0, + "SessionTimeout": 1380000, + "MinSessionTimeout": -1, + "MaxSessionTimeout": 86400000, + "MinSessionsAllowed": 0, + "MaxSessionsAllowed": 0, + "MaxSessionsConfigurable": False, + "SessionTimeoutConfigurable": True + } + ] + } + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "destination_address": "localhost", "port_number": 25, "use_ssl": True, + "enable_authentication": True, + "credentials": {"username": "username", "password": "password"} + }, + "json_data": responseData + } + ]) + def test_fetch_session_inactivity_settings(self, params, ome_connection_mock_for_ns, ome_response_mock): + ome_response_mock.success = True + ome_response_mock.json_data = params["json_data"] + ret_data = self.module.fetch_session_inactivity_settings(ome_connection_mock_for_ns) + assert ret_data[0].get("SessionType") == "GUI" + assert ret_data[0].get("MaxSessions") == 5 + assert ret_data[0].get("SessionTimeout") == 1380000 + + @pytest.mark.parametrize("params", [ + { + "json_data": responseData.get("value"), + "payload": responseData.get("value"), + } + ]) + def test_update_session_inactivity_settings(self, params, ome_connection_mock_for_ns, ome_response_mock): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + payload = params["payload"] + ret_value = self.module.update_session_inactivity_settings(ome_connection_mock_for_ns, payload) + ret_data = ret_value.json_data + assert ret_data[0].get("SessionType") == "GUI" + assert ret_data[0].get("MaxSessions") == 5 + assert ret_data[0].get("SessionTimeout") == 1380000 + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "enable_universal_timeout": True, + "universal_timeout": 2 + } + }, + "payload": responseData.get("value") + } + ]) + def test_update_payload_ut_enable(self, params, ome_connection_mock_for_ns, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data, diff = self.module.update_payload(f_module, payload) + assert ret_data[2].get("SessionType") == "UniversalTimeout" + assert ret_data[2].get("SessionTimeout") == 120000 + assert diff == 1 + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "enable_universal_timeout": False, + "universal_timeout": 2 + } + }, + "payload": responseData.get("value") + } + ]) + def test_update_payload_ut_disable(self, params, ome_connection_mock_for_ns, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data, diff = self.module.update_payload(f_module, payload) + assert ret_data[2].get("SessionType") == "UniversalTimeout" + assert ret_data[2].get("SessionTimeout") == -1 + assert diff == 1 + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "universal_timeout": 2 + } + }, + "payload": responseData.get("value") + } + ]) + def test_update_payload_no_change(self, params, ome_connection_mock_for_ns, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data, diff = self.module.update_payload(f_module, payload) + assert diff == 0 + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "api_timeout": 2 + } + }, + "payload": responseData.get("value") + } + ]) + def test_update_payload_timeout_change(self, params, ome_connection_mock_for_ns, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data, diff = self.module.update_payload(f_module, payload) + assert ret_data[1].get("SessionTimeout") == 1380000 + assert diff == 0 + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "api_sessions": 90 + } + }, + "payload": responseData.get("value") + } + ]) + def test_update_payload_max_sessions_change(self, params, ome_connection_mock_for_ns, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data, diff = self.module.update_payload(f_module, payload) + assert ret_data[1].get("MaxSessions") == 90 + assert diff == 1 + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "api_timeout": 2, + "api_sessions": 90 + } + }, + "payload": responseData.get("value") + } + ]) + def test_update_payload_timeout_and_max_session_change(self, params, ome_connection_mock_for_ns, ome_response_mock): + f_module = self.get_module_mock(params=params['module_args']) + payload = params["payload"] + ret_data, diff = self.module.update_payload(f_module, payload) + assert ret_data[1].get("SessionTimeout") == 1380000 + assert ret_data[1].get("MaxSessions") == 90 + assert diff == 1 + + @pytest.mark.parametrize("params", [ + { + "session_inactivity_timeout": { + "api_timeout": 2, + "api_sessions": 90 + }, + "payload": responseData.get("value")[0] + } + ]) + def test_get_value_s1(self, params, ome_connection_mock_for_ns, ome_response_mock): + payload = params["payload"] + ret_data = self.module.get_value(params.get("session_inactivity_timeout"), + payload, "api_timeout", "SessionTimeout") + assert ret_data == 120000 + + @pytest.mark.parametrize("params", [ + { + "session_inactivity_timeout": { + "api_sessions": 90 + }, + "payload": responseData.get("value")[0] + } + ]) + def test_get_value_s2(self, params, ome_connection_mock_for_ns, ome_response_mock): + payload = params["payload"] + ret_data = self.module.get_value(params.get("session_inactivity_timeout"), + payload, "api_timeout", "SessionTimeout") + assert ret_data == 1380000 + + @pytest.mark.parametrize("params", [ + { + "session_inactivity_timeout": { + "universal_timeout": -1 + }, + "payload": responseData.get("value")[2] + } + ]) + def test_get_value_s3(self, params, ome_connection_mock_for_ns, ome_response_mock): + payload = params["payload"] + ret_data = self.module.get_value(params.get("session_inactivity_timeout"), + payload, "universal_timeout", "SessionTimeout") + assert ret_data == -1 + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "enable_universal_timeout": True, + "universal_timeout": 2 + }, + }, + "json_data": responseData.get("value"), + "get_json_data": responseData.get("value"), + "update_payload": responseData.get("value"), + } + ]) + def test_module_success(self, mocker, params, ome_connection_mock_for_ns, ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + update_json_data = params["update_payload"] + update_json_data[2]["SessionTimeout"] = 120000 + mocker.patch(MODULE_PATH + 'fetch_session_inactivity_settings', return_value=params["get_json_data"]) + mocker.patch(MODULE_PATH + 'update_payload', return_value=[update_json_data, 1]) + result = self._run_module(ome_default_args) + assert result["msg"] == SUCCESS_MSG + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "enable_universal_timeout": True, + "universal_timeout": 2 + }, + }, + "json_data": responseData.get("value"), + "get_json_data": responseData.get("value"), + "update_payload": responseData.get("value"), + } + ]) + def test_module_no_idempotent(self, mocker, params, ome_connection_mock_for_ns, ome_response_mock, + ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + update_json_data = params["update_payload"] + mocker.patch(MODULE_PATH + 'fetch_session_inactivity_settings', return_value=params["get_json_data"]) + mocker.patch(MODULE_PATH + 'update_payload', return_value=[update_json_data, 0]) + result = self._run_module(ome_default_args) + assert result["msg"] == NO_CHANGES + + @pytest.mark.parametrize("params", [ + { + "module_args": { + "session_inactivity_timeout": { + "enable_universal_timeout": True, + "universal_timeout": 2 + }, + }, + "json_data": responseData.get("value"), + "get_json_data": responseData.get("value"), + "update_payload": responseData.get("value"), + } + ]) + def test_module_check_mode(self, mocker, params, ome_connection_mock_for_ns, ome_response_mock, ome_default_args): + ome_response_mock.success = True + ome_response_mock.status_code = 201 + ome_response_mock.json_data = params["json_data"] + ome_default_args.update(params['module_args']) + f_module = self.get_module_mock(params=ome_default_args) + + f_module.check_mode = True + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 0) + assert err.value.args[0] == NO_CHANGES + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 1) + assert err.value.args[0] == CHANGES_FOUND + + f_module.check_mode = False + + with pytest.raises(Exception) as err: + self.module.process_check_mode(f_module, 0) + assert err.value.args[0] == NO_CHANGES + + @pytest.mark.parametrize("exc_type", + [HTTPError, URLError]) + def test_session_inactivity_settings_main_exception_case(self, mocker, exc_type, ome_connection_mock_for_ns, + ome_response_mock, + ome_default_args): + ome_default_args.update({"session_inactivity_timeout": { + "enable_universal_timeout": True, + "universal_timeout": 2 + }}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'fetch_session_inactivity_settings', side_effect=exc_type("url open")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'fetch_session_inactivity_settings', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'fetch_session_inactivity_settings', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py new file mode 100644 index 00000000..53e32311 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py @@ -0,0 +1,584 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json + +import pytest +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_time +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_mock_for_application_network_time(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_application_network_time.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + ome_connection_mock_obj.get_all_report_details.return_value = {"report_list": []} + return ome_connection_mock_obj + + +class TestOmeTemplate(FakeAnsibleModule): + module = ome_application_network_time + + sub_param1 = {"enable_ntp": False, "time_zone": "TZ_ID_3"} + sub_param2 = {"enable_ntp": False, "system_time": "2020-03-31 21:35:19"} + sub_param3 = {"enable_ntp": False, "time_zone": "TZ_ID_3", "system_time": "2020-03-31 21:35:19"} + + @pytest.mark.parametrize("param1", [sub_param2, sub_param3]) + def test_ome_application_network_time_main_enable_ntp_false_success_case_01(self, mocker, ome_default_args, param1, + ome_connection_mock_for_application_network_time, + ome_response_mock): + ome_default_args.update(param1) + mocker.patch(MODULE_PATH + "ome_application_network_time.validate_input") + mocker.patch(MODULE_PATH + "ome_application_network_time.validate_time_zone") + mocker.patch(MODULE_PATH + "ome_application_network_time.get_payload", return_value={"key": "val"}) + mocker.patch(MODULE_PATH + "ome_application_network_time.get_updated_payload", return_value={"key": "val"}) + time_data = { + "EnableNTP": False, + "JobId": None, + "PrimaryNTPAddress": None, + "SecondaryNTPAddress1": None, + "SecondaryNTPAddress2": None, + "SystemTime": None, + "TimeSource": "Local Clock", + "TimeZone": "TZ_ID_3", + "TimeZoneIdLinux": None, + "TimeZoneIdWindows": None, + "UtcTime": None + } + ome_response_mock.json_data = time_data + result = self.execute_module(ome_default_args) + assert result['changed'] is True + assert "msg" in result + assert "time_configuration" in result and result["time_configuration"] == time_data + assert result["msg"] == "Successfully configured network time." + + @pytest.mark.parametrize("param1", [{"enable_ntp": True, "time_zone": "TZ_ID_66"}]) + @pytest.mark.parametrize("param2", [{"primary_ntp_address": "192.168.0.2"}, + {"secondary_ntp_address1": "192.168.0.3"}, + {"secondary_ntp_address2": "192.168.0.4"}, + {"primary_ntp_address": "192.168.0.2", "secondary_ntp_address1": "192.168.0.3"}, + {"primary_ntp_address": "192.168.0.2", "secondary_ntp_address2": "192.168.0.4"}, + {"primary_ntp_address": "192.168.0.2", "secondary_ntp_address1": "192.168.0.3", + "secondary_ntp_address2": "192.168.0.4"} + ]) + def test_ome_application_network_time_main_enable_ntp_true_success_case_01(self, mocker, ome_default_args, param1, + param2, + ome_connection_mock_for_application_network_time, + ome_response_mock): + ome_default_args.update(param1) + ome_default_args.update(param2) + mocker.patch(MODULE_PATH + "ome_application_network_time.validate_input") + mocker.patch(MODULE_PATH + "ome_application_network_time.validate_time_zone") + mocker.patch(MODULE_PATH + "ome_application_network_time.get_payload", return_value={"key": "val"}) + mocker.patch(MODULE_PATH + "ome_application_network_time.get_updated_payload", return_value={"key": "val"}) + time_data = { + "EnableNTP": True, + "JobId": None, + "PrimaryNTPAddress": "192.168.0.2", + "SecondaryNTPAddress1": "192.168.0.3", + "SecondaryNTPAddress2": "192.168.0.4", + "SystemTime": None, + "TimeSource": "10.136.112.222", + "TimeZone": "TZ_ID_66", + "TimeZoneIdLinux": None, + "TimeZoneIdWindows": None, + "UtcTime": None + } + ome_response_mock.json_data = time_data + result = self.execute_module(ome_default_args) + assert result['changed'] is True + assert "msg" in result + assert "time_configuration" in result and result["time_configuration"] == time_data + assert result["msg"] == "Successfully configured network time." + + sub_param1 = { + "param": {"enable_ntp": True, "primary_ntp_address": "255.0.0.0", "system_time": "2020-03-31 21:35:19"}, "msg": + 'parameters are mutually exclusive: system_time|primary_ntp_address'} + sub_param2 = {"param": {}, "msg": 'missing required arguments: enable_ntp'} + sub_param3 = {"param": {"enable_ntp": False}, + "msg": "enable_ntp is False but any of the following are missing: time_zone, system_time"} + sub_param4 = {"param": {"enable_ntp": True}, + "msg": "enable_ntp is True but any of the following are missing:" + " time_zone, primary_ntp_address, secondary_ntp_address1, secondary_ntp_address2"} + sub_param5 = { + "param": { + "enable_ntp": False, + "primary_ntp_address": "10.136.112.220" + }, + "msg": "enable_ntp is False but any of the following are missing:" + " time_zone, system_time" + } + sub_param6 = { + "param": { + "enable_ntp": False, + "secondary_ntp_address1": "10.136.112.220", + "system_time": "2020-03-31 21:35:19" + }, + "msg": "parameters are mutually exclusive: system_time|secondary_ntp_address1" + } + sub_param7 = { + "param": { + "enable_ntp": False, + "secondary_ntp_address2": "10.136.112.220", + "system_time": "2020-03-31 21:35:19" + }, + "msg": "parameters are mutually exclusive: system_time|secondary_ntp_address2" + } + sub_param8 = {"param": {"enable_ntp": False, "primary_ntp_address": "10.136.112.220", + "secondary_ntp_address1": "10.136.112.220", "system_time": "2020-03-31 21:35:19"}, + "msg": "parameters are mutually exclusive: system_time|primary_ntp_address," + " system_time|secondary_ntp_address1"} + sub_param9 = { + "param": {"enable_ntp": False, "system_time": "2020-03-31 21:35:19", "primary_ntp_address": "10.136.112.220", + "secondary_ntp_address2": "10.136.112.220"}, + "msg": "parameters are mutually exclusive: system_time|primary_ntp_address, system_time|secondary_ntp_address2"} + sub_param10 = { + "param": {"enable_ntp": False, "system_time": "2020-03-31 21:35:19", "primary_ntp_address": "10.136.112.220", + "secondary_ntp_address2": "10.136.112.220", "secondary_ntp_address1": "10.136.112.220"}, + "msg": "parameters are mutually exclusive: system_time|primary_ntp_address," + " system_time|secondary_ntp_address1, system_time|secondary_ntp_address2"} + sub_param11 = { + "param": {"enable_ntp": False, "primary_ntp_address": "255.0.0.0", "system_time": "2020-03-31 21:35:19"}, + "msg": 'parameters are mutually exclusive: system_time|primary_ntp_address'} + + @pytest.mark.parametrize("param", + [sub_param1, sub_param2, sub_param3, sub_param4, sub_param5, sub_param6, sub_param7, + sub_param8, + sub_param9, sub_param10, sub_param11]) + def test_ome_application_network_time_main_failure_case_01(self, mocker, ome_default_args, param, + ome_connection_mock_for_application_network_time, + ome_response_mock): + sub_param = param["param"] + msg = param["msg"] + ome_default_args.update(sub_param) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == msg + assert "time_configuration" not in result + assert result["failed"] is True + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_application_network_time_main_success_exception_case3(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_application_network_time, + ome_response_mock): + mocker.patch(MODULE_PATH + "ome_application_network_time.validate_time_zone") + ome_default_args.update({"enable_ntp": False, "system_time": "2020-03-31 21:35:18"}) + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'ome_application_network_time.get_payload', side_effect=URLError('TESTS')) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + assert 'TESTS' in result['msg'] + assert result['changed'] is False + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'ome_application_network_time.get_payload', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'ome_application_network_time.get_payload', + side_effect=exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'time_configuration' not in result + assert 'msg' in result + + def test_remove_unwanted_keys_default_keys_time(self, ome_default_args): + removable_keys = list(ome_default_args.keys()) + new_param = { + "enable_ntp": True, + "time_zone": "TimeZone", + "primary_ntp_address": "192.168.0.2", + "secondary_ntp_address1": "192.168.0.3", + "secondary_ntp_address2": "192.168.0.4" + } + ome_default_args.update(new_param) + self.module.remove_unwanted_keys(removable_keys, ome_default_args) + assert len(set(new_param.keys()) - set(ome_default_args.keys())) == 0 + + def test_remove_unwanted_keys_unwanted_keys_time(self): + """when key not exists should not throw error""" + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", "TimeZone": "TZ_ID_1", + "TimeZoneIdLinux": "Etc/GMT+12", "TimeZoneIdWindows": "Dateline Standard Time", + "EnableNTP": False, "PrimaryNTPAddress": None, "SecondaryNTPAddress1": None, + "SecondaryNTPAddress2": None, "SystemTime": "2020-03-31 21:37:08.897", + "TimeSource": "Local Clock", "UtcTime": "2020-04-01 09:37:08.897"} + removable_keys = ["@odata.context", "@odata.type", "@odata.id", "TimeZoneIdLinux", "TimeZoneIdWindows", + "TimeSource", "UtcTime"] + self.module.remove_unwanted_keys(removable_keys, current_setting) + assert current_setting == {"TimeZone": "TZ_ID_1", "EnableNTP": False, "PrimaryNTPAddress": None, + "SecondaryNTPAddress1": None, "SecondaryNTPAddress2": None, + "SystemTime": "2020-03-31 21:37:08.897"} + + def test_get_payload_time_case1(self, ome_default_args): + new_param = { + "enable_ntp": False, + "primary_ntp_address": None, + "secondary_ntp_address1": None, + "secondary_ntp_address2": None, + "system_time": "2020-03-31 21:35:19", + "time_zone": "TZ_ID_1", + } + ome_default_args.update(new_param) + f_module = self.get_module_mock(params=ome_default_args) + payload = self.module.get_payload(f_module) + assert f_module.params == ome_default_args + assert payload == {"EnableNTP": False, "TimeZone": "TZ_ID_1", "SystemTime": "2020-03-31 21:35:19"} + + def test_get_payload_time_case2(self, ome_default_args): + new_param = { + "enable_ntp": True, + "primary_ntp_address": "10.136.112.220", + "secondary_ntp_address1": "10.136.112.221", + "secondary_ntp_address2": "10.136.112.222", + "system_time": None, + "time_zone": "TZ_ID_66" + } + ome_default_args.update(new_param) + f_module = self.get_module_mock(params=ome_default_args) + payload = self.module.get_payload(f_module) + assert ome_default_args == { + "enable_ntp": True, + "primary_ntp_address": "10.136.112.220", + "secondary_ntp_address1": "10.136.112.221", + "secondary_ntp_address2": "10.136.112.222", + "system_time": None, + "time_zone": "TZ_ID_66", + "hostname": "192.168.0.1", + "username": "username", + "password": "password", + "ca_path": "/path/ca_bundle"} + assert payload == {"EnableNTP": True, "TimeZone": "TZ_ID_66", "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222" + } + + def test_get_updated_payload_success_case(self, ome_default_args, ome_connection_mock_for_application_network_time, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", "TimeZone": "TZ_ID_02", + "TimeZoneIdLinux": "Asia/Colombo", "TimeZoneIdWindows": "Sri Lanka Standard Time", + "EnableNTP": True, "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", "SecondaryNTPAddress2": "10.136.112.222", + "SystemTime": "2020-04-01 15:39:23.825", "TimeSource": "10.136.112.222", + "UtcTime": "2020-04-01 10:09:23.825"} + payload = {"EnableNTP": True, "TimeZone": "TZ_ID_66", + "SecondaryNTPAddress1": "10.136.112.02", + "SecondaryNTPAddress2": "10.136.112.03" + } + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = current_setting + setting = self.module.get_updated_payload(ome_connection_mock_for_application_network_time, + f_module, payload) + expected_payload = {"EnableNTP": True, "TimeZone": "TZ_ID_66", + "SecondaryNTPAddress1": "10.136.112.02", + "SecondaryNTPAddress2": "10.136.112.03", + "PrimaryNTPAddress": "10.136.112.220", # updated not given key from current_setting + "SystemTime": "2020-04-01 15:39:23.825", # system will be ignore from ome + } + assert setting == expected_payload + + def test_get_updated_payload_check_mode_success_case1(self, ome_default_args, + ome_connection_mock_for_application_network_time, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", + "TimeZone": "TZ_ID_02", "TimeZoneIdLinux": "Asia/Colombo", + "TimeZoneIdWindows": "Sri Lanka Standard Time", + "EnableNTP": True, + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222", + "SystemTime": "2020-04-01 15:39:23.825", + "TimeSource": "10.136.112.222", "UtcTime": "2020-04-01 10:09:23.825"} + payload = {"EnableNTP": True, "TimeZone": "TZ_ID_02", + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222" + } + ome_response_mock.json_data = current_setting + check_mode_no_diff_msg = "No changes found to be applied to the time configuration." + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + with pytest.raises(Exception, match=check_mode_no_diff_msg): + self.module.get_updated_payload(ome_connection_mock_for_application_network_time, + f_module, payload) + + def test_get_updated_payload_check_mode_success_case2(self, ome_default_args, + ome_connection_mock_for_application_network_time, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", + "TimeZone": "TZ_ID_02", "TimeZoneIdLinux": "Asia/Colombo", + "TimeZoneIdWindows": "Sri Lanka Standard Time", + "EnableNTP": True, + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222", + "SystemTime": "2020-04-01 15:39:23.825", + "TimeSource": "10.136.112.222", "UtcTime": "2020-04-01 10:09:23.825"} + payload = {"EnableNTP": True, "PrimaryNTPAddress": "10.136.112.220"} + ome_response_mock.json_data = current_setting + check_mode_no_diff_msg = "No changes found to be applied to the time configuration." + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + with pytest.raises(Exception, match=check_mode_no_diff_msg) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_time, + f_module, payload) + + def test_get_updated_payload_check_mode_success_case3(self, ome_default_args, + ome_connection_mock_for_application_network_time, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", + "TimeZone": "TZ_ID_02", "TimeZoneIdLinux": "Asia/Colombo", + "TimeZoneIdWindows": "Sri Lanka Standard Time", + "EnableNTP": True, + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222", + "SystemTime": "2020-04-01 15:39:23.825", + "TimeSource": "10.136.112.222", "UtcTime": "2020-04-01 10:09:23.825"} + payload = {"EnableNTP": True, "PrimaryNTPAddress": "10.136.112.221"} # change in value + ome_response_mock.json_data = current_setting + check_mode_no_diff_msg = "Changes found to be applied to the time configuration." + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + with pytest.raises(Exception, match=check_mode_no_diff_msg): + self.module.get_updated_payload(ome_connection_mock_for_application_network_time, + f_module, payload) + + def test_get_updated_payload_without_check_mode_success_case(self, ome_default_args, + ome_connection_mock_for_application_network_time, + ome_response_mock): + """without check even there is no difference no exception thrown""" + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", + "TimeZone": "TZ_ID_02", "TimeZoneIdLinux": " Asia/Colombo", + "TimeZoneIdWindows": "Sri Lanka Standard Time", + "EnableNTP": True, + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222", + "SystemTime": "2020-04-01 15:39:23.825", + "TimeSource": "10.136.112.222", "UtcTime": "2020-04-01 10:09:23.825"} + payload = {'EnableNTP': True, + 'PrimaryNTPAddress': '10.136.112.220', + 'SecondaryNTPAddress1': '10.136.112.221', + 'SecondaryNTPAddress2': '10.136.112.222', + 'SystemTime': '2020-04-01 15:39:23.826', + 'TimeZone': 'TZ_ID_02'} + ome_response_mock.json_data = current_setting + f_module = self.get_module_mock(params=ome_default_args, check_mode=False) + current_setting = self.module.get_updated_payload(ome_connection_mock_for_application_network_time, + f_module, payload) + assert current_setting == payload + + @pytest.mark.parametrize("time_zone_val", ["", 0, "invalid", "TZ_ID_100001"]) + def test_validate_time_zone_failure_case01(self, ome_default_args, time_zone_val, ome_response_mock, + ome_connection_mock_for_application_network_time): + param = {"time_zone": time_zone_val} + ome_default_args.update(param) + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = {"@odata.context": "/api/$metadata#Collection(Network.TimeZone)", + "@odata.count": 3, + "value": [{"@odata.type": "#Network.TimeZone", "Utcoffsetminutes": 60, + "Id": "TZ_ID_38", "Name": + "(GMT+01:00) Brussels, Copenhagen, Madrid, Paris"}, + {"@odata.type": "#Network.TimeZone", "Utcoffsetminutes": 60, + "Id": "TZ_ID_39", "Name": + "(GMT+01:00) Sarajevo, Skopje, Warsaw, Zagreb"}, + {"@odata.type": "#Network.TimeZone", "Utcoffsetminutes": 360, + "Id": "TZ_ID_70", "Name": "(GMT+06:00) Novosibirsk"}]} + msg = "Provide valid time zone.Choices are TZ_ID_38,TZ_ID_39,TZ_ID_70" + with pytest.raises(Exception, match=msg): + self.module.validate_time_zone(f_module, ome_connection_mock_for_application_network_time) + + def test_validate_time_zone_successcase01(self, ome_default_args, ome_response_mock, + ome_connection_mock_for_application_network_time): + param = {"time_zone": "TZ_ID_38"} + ome_default_args.update(param) + f_module = self.get_module_mock(params=ome_default_args) + ome_response_mock.json_data = {"@odata.context": "/api/$metadata#Collection(Network.TimeZone)", + "@odata.count": 3, + "value": [{"@odata.type": "#Network.TimeZone", "Utcoffsetminutes": 60, + "Id": "TZ_ID_38", + "Name": "(GMT+01:00) Brussels, Copenhagen, Madrid, Paris"}, + {"@odata.type": "#Network.TimeZone", "Utcoffsetminutes": 60, + "Id": "TZ_ID_39", + "Name": "(GMT+01:00) Sarajevo, Skopje, Warsaw, Zagreb"}, + {"@odata.type": "#Network.TimeZone", "Utcoffsetminutes": 360, + "Id": "TZ_ID_70", "Name": "(GMT+06:00) Novosibirsk"}]} + self.module.validate_time_zone(f_module, ome_connection_mock_for_application_network_time) + assert ome_connection_mock_for_application_network_time.invoke_request.called + + def test_validate_time_zone_successcase02(self, ome_default_args, ome_response_mock, + ome_connection_mock_for_application_network_time): + param = {"enable_ntp": True} + ome_default_args.update(param) + f_module = self.get_module_mock(params=ome_default_args) + self.module.validate_time_zone(f_module, ome_connection_mock_for_application_network_time) + assert not ome_connection_mock_for_application_network_time.invoke_request.called + + def test_validate_time_zone_successcase03(self, ome_default_args, ome_response_mock, + ome_connection_mock_for_application_network_time): + param = {"time_zone": None} + ome_default_args.update(param) + f_module = self.get_module_mock(params=ome_default_args) + self.module.validate_time_zone(f_module, ome_connection_mock_for_application_network_time) + assert not ome_connection_mock_for_application_network_time.invoke_request.called + + def test_validate_input_time_enable_true_case_01(self, ome_default_args): + params = {"enable_ntp": True, "system_time": "2020-04-01 15:39:23.825"} + ome_default_args.update(params) + f_module = self.get_module_mock(params=ome_default_args) + msg = 'When enable NTP is true,the option system time is not accepted.' + with pytest.raises(Exception) as exc: + self.module.validate_input(f_module) + assert exc.value.args[0] == msg + + @pytest.mark.parametrize("sub_param", [ + {"primary_ntp_address": "192.168.02.1", "secondary_ntp_address1": "192.168.02.3", + "secondary_ntp_address2": "192.168.02.2"}, + {"secondary_ntp_address1": "192.168.02.1"}, + {"secondary_ntp_address2": "192.168.02.1"}, + {"primary_ntp_address": "192.168.02.1", "time_zone": "TZ_01"}, + {"primary_ntp_address": "192.168.02.1"}, + {"secondary_ntp_address1": "192.168.02.1", "time_zone": "TZ_01"}, + ]) + def test_validate_input_time_enable_false_case_01(self, ome_default_args, sub_param): + params = {"enable_ntp": False} + params.update(sub_param) + ome_default_args.update(params) + f_module = self.get_module_mock(params=ome_default_args) + msg = "When enable NTP is false,the option(s) primary_ntp_address, secondary_ntp_address1 and secondary_ntp_address2 is not accepted." + with pytest.raises(Exception) as exc: + self.module.validate_input(f_module) + assert exc.value.args[0] == msg + + @pytest.mark.parametrize("sub_param", [{"time_zone": "TZ_01"}, {"primary_ntp_address": "192.168.02.1"}, + {"secondary_ntp_address1": "192.168.02.1"}, + {"secondary_ntp_address2": "192.168.02.1"}, + {"primary_ntp_address": "192.168.02.1", "time_zone": "TZ_01"}, {} + ]) + def test_validate_input_time_enable_true_case_04(self, ome_default_args, sub_param): + """ + exception should not be raised + """ + params = {"enable_ntp": True} + params.update(sub_param) + ome_default_args.update(params) + f_module = self.get_module_mock(params=ome_default_args) + self.module.validate_input(f_module) + + @pytest.mark.parametrize("sub_param", [{"time_zone": "TZI_01"}, {"system_time": "2020-04-01 15:39:23.825"}, + {"time_zone": "TZI_01", "system_time": "2020-04-01 15:39:23.825"}, {}]) + def test_validate_input_time_enable_false_case_03(self, ome_default_args, sub_param): + """success case. if required options passed no exception thrown""" + params = {"enable_ntp": False} + params.update(sub_param) + ome_default_args.update(params) + f_module = self.get_module_mock(params=ome_default_args) + self.module.validate_input(f_module) + + def test_get_updated_payload_non_check_mode_success_case1(self, ome_default_args, + ome_connection_mock_for_application_network_time, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", + "TimeZone": "TZ_ID_02", "TimeZoneIdLinux": "Asia/Colombo", + "TimeZoneIdWindows": "Sri Lanka Standard Time", + "EnableNTP": True, + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222", + "SystemTime": "2020-04-01 15:39:23.825", + "TimeSource": "10.136.112.222", "UtcTime": "2020-04-01 10:09:23.825"} + payload = {"EnableNTP": True, "TimeZone": "TZ_ID_02", + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222" + } + ome_response_mock.json_data = current_setting + check_mode_no_diff_msg = "No changes made to the time configuration as the entered values are the same as the current configuration." + f_module = self.get_module_mock(params=ome_default_args, check_mode=False) + with pytest.raises(Exception, match=check_mode_no_diff_msg): + self.module.get_updated_payload(ome_connection_mock_for_application_network_time, + f_module, payload) + + def test_get_updated_payload_non_check_mode_success_case2(self, ome_default_args, + ome_connection_mock_for_application_network_time, + ome_response_mock): + current_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", + "TimeZone": "TZ_ID_02", "TimeZoneIdLinux": "Asia/Colombo", + "TimeZoneIdWindows": "Sri Lanka Standard Time", + "EnableNTP": True, + "PrimaryNTPAddress": "10.136.112.220", + "SecondaryNTPAddress1": "10.136.112.221", + "SecondaryNTPAddress2": "10.136.112.222", + "SystemTime": "2020-04-01 15:39:23.825", + "TimeSource": "10.136.112.222", "UtcTime": "2020-04-01 10:09:23.825"} + payload = {"EnableNTP": True, "PrimaryNTPAddress": "10.136.112.220"} + ome_response_mock.json_data = current_setting + check_mode_no_diff_msg = "No changes made to the time configuration as the entered values are the same as the current configuration." + f_module = self.get_module_mock(params=ome_default_args, check_mode=False) + with pytest.raises(Exception, match=check_mode_no_diff_msg) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_time, + f_module, payload) + + def test_update_time_config_output(self): + backup_setting = {"@odata.context": "/api/$metadata#Network.TimeConfiguration", + "@odata.type": "#Network.TimeConfiguration", + "@odata.id": "/api/ApplicationService/Network/TimeConfiguration", + "TimeZone": "TZ_ID_1", + "TimeZoneIdLinux": "Etc/GMT+12", + "TimeZoneIdWindows": "Dateline Standard Time", + "EnableNTP": False, + "PrimaryNTPAddress": None, + "SecondaryNTPAddress1": None, + "SecondaryNTPAddress2": None, + "SystemTime": "2020-03-31 21:37:08.897", + "TimeSource": "Local Clock", + "UtcTime": "2020-04-01 09:37:08.897"} + self.module.update_time_config_output(backup_setting) + assert backup_setting == { + "EnableNTP": False, + "JobId": None, + "PrimaryNTPAddress": None, + "SecondaryNTPAddress1": None, + "SecondaryNTPAddress2": None, + "SystemTime": "2020-03-31 21:37:08.897", + "TimeSource": "Local Clock", + "TimeZone": "TZ_ID_1", + "TimeZoneIdLinux": "Etc/GMT+12", + "TimeZoneIdWindows": "Dateline Standard Time", + "UtcTime": "2020-04-01 09:37:08.897"} diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py new file mode 100644 index 00000000..d6fbc368 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.1.3 +# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json + +import pytest +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_webserver +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_mock_for_application_network_webserver(mocker, ome_response_mock): + connection_class_mock = mocker.patch( + MODULE_PATH + 'ome_application_network_webserver.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeAppNetwork(FakeAnsibleModule): + module = ome_application_network_webserver + + sub_param1 = {"webserver_port": 443, "webserver_timeout": 20} + + @pytest.mark.parametrize("sub_param", [sub_param1]) + def test_ome_application_network_webserver_main_success_case_01(self, mocker, ome_default_args, sub_param, + ome_connection_mock_for_application_network_webserver, + ome_response_mock): + ome_default_args.update(sub_param) + resp = {"TimeOut": 25, "PortNumber": 443, "EnableWebServer": True} + port_change = 0 + mocker.patch(MODULE_PATH + "ome_application_network_webserver.get_updated_payload", + return_value=(resp, port_change)) + ome_response_mock.json_data = resp + result = self.execute_module(ome_default_args) + assert result['changed'] is True + assert "msg" in result + assert "webserver_configuration" in result and result["webserver_configuration"] == resp + assert result["msg"] == "Successfully updated network web server configuration." + + in1 = {"webserver_port": 443, "webserver_timeout": 25} + in2 = {"webserver_timeout": 25} + out1 = {"TimeOut": 25, "PortNumber": 443, "EnableWebServer": True} + out2 = {"TimeOut": 25, "PortNumber": 1443, "EnableWebServer": True} + + @pytest.mark.parametrize("sub_param", [{"in": in1, "out": out1}, + {"in": in2, "out": out2}]) + def test_get_updated_payload_success1(self, sub_param, ome_default_args, + ome_connection_mock_for_application_network_webserver, + ome_response_mock): + ome_default_args.update(sub_param["in"]) + ome_response_mock.json_data = {"TimeOut": 20, "PortNumber": 1443, "EnableWebServer": True, + "@odata.context": "$metadata#Network.WebServerConfiguration/$entity", + "@odata.id": "/api/ApplicationService/Network/WebServerConfiguration"} + f_module = self.get_module_mock(params=ome_default_args) + payload, port = self.module.get_updated_payload(ome_connection_mock_for_application_network_webserver, f_module) + assert payload == sub_param["out"] + + def _test_get_updated_payload_when_same_setting_failure_case(self, ome_default_args, + ome_connection_mock_for_application_network_webserver, + ome_response_mock): + new_param = {"webserver_port": 443, "webserver_timeout": 25} + ome_default_args.update(new_param) + ome_response_mock.json_data = {"TimeOut": 25, "PortNumber": 443, "EnableWebServer": True, + "@odata.context": "$metadata#Network.WebServerConfiguration/$entity", + "@odata.id": "/api/ApplicationService/Network/WebServerConfiguration"} + f_module = self.get_module_mock(params=ome_default_args) + error_message = "No changes made to the web server configuration as the entered values are the same as the" \ + " current configuration." + with pytest.raises(Exception, match=error_message) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_webserver, f_module) + + in1 = {"check_mode": True, "timeout": 25} + in2 = {"check_mode": True, "timeout": 30} + in3 = {"check_mode": False, "timeout": 25} + out1 = "No changes found to be applied to the web server." + out2 = "Changes found to be applied to the web server." + out3 = "No changes made to the web server configuration as the entered values" \ + " are the same as the current configuration." + + @pytest.mark.parametrize("sub_param", [{"in": in1, "out": out1}, + {"in": in2, "out": out2}, + {"in": in3, "out": out3}]) + def test_get_updated_payload_check_mode(self, sub_param, ome_default_args, + ome_connection_mock_for_application_network_webserver, ome_response_mock): + new_param = {"webserver_port": 443, "webserver_timeout": sub_param["in"]["timeout"]} + ome_default_args.update(new_param) + ome_response_mock.json_data = {"TimeOut": 25, "PortNumber": 443, "EnableWebServer": True, + "@odata.context": "$metadata#Network.WebServerConfiguration/$entity", + "@odata.id": "/api/ApplicationService/Network/WebServerConfiguration"} + f_module = self.get_module_mock(params=ome_default_args, check_mode=sub_param["in"]["check_mode"]) + error_message = sub_param["out"] + with pytest.raises(Exception, match=error_message) as err: + self.module.get_updated_payload(ome_connection_mock_for_application_network_webserver, f_module) + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_application_network_webserver_main_error_cases(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_application_network_webserver, + ome_response_mock): + json_str = to_text(json.dumps({"info": "error_details"})) + ome_default_args.update({"webserver_port": 443, "webserver_timeout": 25}) + if exc_type == URLError: + mocker.patch( + MODULE_PATH + 'ome_application_network_webserver.get_updated_payload', + side_effect=exc_type("test")) + ome_default_args.update({"webserver_port": 443, "webserver_timeout": 25}) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'ome_application_network_webserver.get_updated_payload', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch( + MODULE_PATH + 'ome_application_network_webserver.get_updated_payload', + side_effect=exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'webserver_configuration' not in result + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py new file mode 100644 index 00000000..ef945ae6 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.4.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_security_settings +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_application_security_settings.' + +SEC_JOB_TRIGGERED = "Successfully triggered the job to apply security settings." +SEC_JOB_COMPLETE = "Successfully applied the security settings." +FIPS_TOGGLED = "Successfully {0} the FIPS mode." +FIPS_CONN_RESET = "The network connection may have changed. Verify the connection and try again." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." + + +@pytest.fixture +def ome_connection_mock_for_security_settings(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeSecuritySettings(FakeAnsibleModule): + module = ome_application_security_settings + + @pytest.mark.parametrize("params", [ + {"module_args": { + "job_wait": False, "job_wait_timeout": 120, + "login_lockout_policy": { + "by_ip_address": False, "by_user_name": False, "lockout_fail_count": 5, + "lockout_fail_window": 30, "lockout_penalty_time": 900}, + "restrict_allowed_ip_range": { + "enable_ip_range": False, "ip_range": None}, + }, + "json_data": { + "JobId": 1234, + "SystemConfiguration": { + "Comments": ["Export type is Normal,JSON"], + "Model": "", "ServiceTag": "", + "Components": [ + { + "FQDD": "MM.Embedded.1", + "Attributes": [ + { + "Name": "LoginSecurity.1#Id", + "Value": "10" + }, + { + "Name": "LoginSecurity.1#LockoutFailCount", + "Value": 3 + }, + { + "Name": "LoginSecurity.1#LockoutFailCountTime", + "Value": 32 + }, + { + "Name": "LoginSecurity.1#LockoutPenaltyTime", + "Value": 850 + }, + { + "Name": "LoginSecurity.1#IPRangeAddr", + "Value": None + }, + { + "Name": "LoginSecurity.1#LockoutByUsernameEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#LockoutByIPEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#IPRangeEnable", + "Value": False + } + ] + } + ] + } + }, "msg": SEC_JOB_TRIGGERED}, + {"module_args": { + "job_wait": False, "job_wait_timeout": 120, + "login_lockout_policy": { + "by_ip_address": False, "by_user_name": False, "lockout_fail_count": 5, + "lockout_fail_window": 30, "lockout_penalty_time": 900}, + "restrict_allowed_ip_range": { + "enable_ip_range": False, "ip_range": None}, + }, + "json_data": { + "JobId": 1234, + "SystemConfiguration": { + "Comments": ["Export type is Normal,JSON"], + "Model": "", "ServiceTag": "", + "Components": [ + { + "FQDD": "MM.Embedded.1", + "Attributes": [ + { + "Name": "LoginSecurity.1#Id", + "Value": "10" + }, + { + "Name": "LoginSecurity.1#LockoutFailCount", + "Value": 5 + }, + { + "Name": "LoginSecurity.1#LockoutFailCountTime", + "Value": 30 + }, + { + "Name": "LoginSecurity.1#LockoutPenaltyTime", + "Value": 900 + }, + { + "Name": "LoginSecurity.1#IPRangeAddr", + "Value": None + }, + { + "Name": "LoginSecurity.1#LockoutByUsernameEnable", + "Value": False + }, + { + "Name": "LoginSecurity.1#LockoutByIPEnable", + "Value": False + }, + { + "Name": "LoginSecurity.1#IPRangeEnable", + "Value": False + } + ] + } + ] + } + }, "msg": NO_CHANGES_MSG}, + {"module_args": { + "job_wait": False, "job_wait_timeout": 120, + "login_lockout_policy": { + "by_ip_address": False, "by_user_name": False, "lockout_fail_count": 5, + "lockout_fail_window": 30, "lockout_penalty_time": 900}, + "restrict_allowed_ip_range": { + "enable_ip_range": False, "ip_range": None}, + }, "check_mode": True, + "json_data": { + "JobId": 1234, + "SystemConfiguration": { + "Comments": ["Export type is Normal,JSON"], + "Model": "", "ServiceTag": "", + "Components": [ + { + "FQDD": "MM.Embedded.1", + "Attributes": [ + { + "Name": "LoginSecurity.1#Id", + "Value": "10" + }, + { + "Name": "LoginSecurity.1#LockoutFailCount", + "Value": 3 + }, + { + "Name": "LoginSecurity.1#LockoutFailCountTime", + "Value": 32 + }, + { + "Name": "LoginSecurity.1#LockoutPenaltyTime", + "Value": 850 + }, + { + "Name": "LoginSecurity.1#IPRangeAddr", + "Value": None + }, + { + "Name": "LoginSecurity.1#LockoutByUsernameEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#LockoutByIPEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#IPRangeEnable", + "Value": False + } + ] + } + ] + } + }, "msg": CHANGES_FOUND}, + {"module_args": { + "job_wait": True, "job_wait_timeout": 120, + "login_lockout_policy": { + "by_ip_address": False, "by_user_name": False, "lockout_fail_count": 5, + "lockout_fail_window": 30, "lockout_penalty_time": 900}, + "restrict_allowed_ip_range": { + "enable_ip_range": False, "ip_range": None}, + }, + "job_failed": False, "job_message": "job_message", + "json_data": { + "JobId": 1234, + "SystemConfiguration": { + "Comments": ["Export type is Normal,JSON"], + "Model": "", "ServiceTag": "", + "Components": [ + { + "FQDD": "MM.Embedded.1", + "Attributes": [ + { + "Name": "LoginSecurity.1#Id", + "Value": "10" + }, + { + "Name": "LoginSecurity.1#LockoutFailCount", + "Value": 3 + }, + { + "Name": "LoginSecurity.1#LockoutFailCountTime", + "Value": 32 + }, + { + "Name": "LoginSecurity.1#LockoutPenaltyTime", + "Value": 850 + }, + { + "Name": "LoginSecurity.1#IPRangeAddr", + "Value": None + }, + { + "Name": "LoginSecurity.1#LockoutByUsernameEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#LockoutByIPEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#IPRangeEnable", + "Value": False + } + ] + } + ] + } + }, "msg": SEC_JOB_COMPLETE}, + {"module_args": { + "job_wait": True, "job_wait_timeout": 120, + "login_lockout_policy": { + "by_ip_address": False, "by_user_name": False, "lockout_fail_count": 5, + "lockout_fail_window": 30, "lockout_penalty_time": 900}, + "restrict_allowed_ip_range": { + "enable_ip_range": False, "ip_range": None}, + }, + "job_failed": True, "job_message": "job_failed", + "json_data": { + "JobId": 1234, + "value": [ + { + "Id": 1234, + "StartTime": "2021-01-01 09:54:08.721", + "EndTime": "2021-01-01 09:54:09.022", + "Key": "This Chassis", + "Value": "job_failed_exec" + } + ], + "SystemConfiguration": { + "Comments": ["Export type is Normal,JSON"], + "Model": "", "ServiceTag": "", + "Components": [ + { + "FQDD": "MM.Embedded.1", + "Attributes": [ + { + "Name": "LoginSecurity.1#Id", + "Value": "10" + }, + { + "Name": "LoginSecurity.1#LockoutFailCount", + "Value": 3 + }, + { + "Name": "LoginSecurity.1#LockoutFailCountTime", + "Value": 32 + }, + { + "Name": "LoginSecurity.1#LockoutPenaltyTime", + "Value": 850 + }, + { + "Name": "LoginSecurity.1#IPRangeAddr", + "Value": None + }, + { + "Name": "LoginSecurity.1#LockoutByUsernameEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#LockoutByIPEnable", + "Value": True + }, + { + "Name": "LoginSecurity.1#IPRangeEnable", + "Value": False + } + ] + } + ] + } + }, "msg": "job_failed_exec"}, + {"module_args": {"fips_mode_enable": True}, + "json_data": {"FipsMode": "OFF"}, + "msg": FIPS_TOGGLED.format("enabled")}, + {"module_args": {"fips_mode_enable": False}, + "json_data": {"FipsMode": "ON"}, + "msg": FIPS_TOGGLED.format("disabled")}, + {"module_args": {"fips_mode_enable": True}, + "json_data": {"FipsMode": "ON"}, + "msg": NO_CHANGES_MSG}, + {"module_args": {"fips_mode_enable": False}, + "json_data": {"FipsMode": "ON"}, + "msg": CHANGES_FOUND, "check_mode": True}, + ]) + def test_ome_application_security_success( + self, + params, + ome_connection_mock_for_security_settings, + ome_response_mock, + ome_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params.get("json_data") + ome_default_args.update(params['module_args']) + ome_connection_mock_for_security_settings.job_tracking.return_value = \ + (params.get('job_failed'), params.get('job_message')) + result = self._run_module( + ome_default_args, check_mode=params.get( + 'check_mode', False)) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("exc_type", + [IOError, + ValueError, + SSLValidationError, + TypeError, + ConnectionError, + HTTPError, + URLError]) + def test_security_settings_main_exception_case( + self, + exc_type, + mocker, + ome_default_args, + ome_connection_mock_for_security_settings, + ome_response_mock): + ome_default_args.update({"restrict_allowed_ip_range": { + "enable_ip_range": False + }}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch( + MODULE_PATH + 'login_security_setting', + side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'login_security_setting', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'login_security_setting', + side_effect=exc_type('http://testhost.com', + 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py new file mode 100644 index 00000000..0d3504b1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.6.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_chassis_slots +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +DEVICE_REPEATED = "Duplicate device entry found for devices with identifiers {0}." +INVALID_SLOT_DEVICE = "Unable to rename one or more slots because either the specified device is invalid or slots " \ + "cannot be configured. The devices for which the slots cannot be renamed are: {0}." +JOBS_TRIG_FAIL = "Unable to initiate the slot name rename jobs." +SUCCESS_MSG = "Successfully renamed the slot(s)." +SUCCESS_REFRESH_MSG = "The rename slot job(s) completed successfully. " \ + "For changes to reflect, refresh the inventory task manually." +FAILED_MSG = "Failed to rename {0} of {1} slot names." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_chassis_slots.' +MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.ome.' + + +@pytest.fixture +def ome_connection_mock_for_chassis_slots(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeChassisSlots(FakeAnsibleModule): + module = ome_chassis_slots + + @pytest.mark.parametrize("params", [{'mparams': {"device_options": [ + {"slot_name": "t1", + "device_service_tag": "ABCD1234"}]}, + "invalid_list": set(["ABCD1234"]), "json_data": { + "value": [{"Id": 10053, "Identifier": "2H5DNX2", "SlotConfiguration": {"ChassisName": None}}, + {"Id": 10054, "Type": 1000, "Identifier": "2H7HNX2", + "SlotConfiguration": {"DeviceType": "1000", "ChassisId": "10053", "SlotNumber": "1", + "SlotName": "my_840c", "SlotType": "2000"}}]}, + 'message': INVALID_SLOT_DEVICE, "success": True}, + {'mparams': {"device_options": [{"slot_name": "s1", "device_id": 10054}, + {"slot_name": "s2", + "device_service_tag": "ABCD1234"}, + {"slot_name": "s1", "device_id": 10052}, + ]}, + "invalid_list": set(["ABCD1234"]), + "json_data": + {"value": [{"Id": 10053, "Identifier": "2H5DNX2", + "SlotConfiguration": {"ChassisName": None}}, + {"Id": 10054, "Type": 1000, "Identifier": "ABCD1234", + "SlotConfiguration": {"DeviceType": "1000", "ChassisId": "10053", "SlotNumber": "1", + "SlotName": "my_840c", "SlotType": "2000"}}]}, 'message': DEVICE_REPEATED, + "success": True}, { + 'mparams': {"device_options": [{"slot_name": "s1", "device_id": 10054}, + {"slot_name": "s2", + "device_service_tag": "ABCD1234"}, + {"slot_name": "s2", + "device_service_tag": "ABCD1234"}, + {"slot_name": "s2", + "device_service_tag": "PQRS1234"}, ]}, + "invalid_list": set(["ABCD1234"]), "json_data": { + "value": [{"Id": 10053, "Identifier": "2H5DNX2", "SlotConfiguration": {"ChassisName": None}}, + {"Id": 10052, "Type": 1000, "Identifier": "PQRS1234", + "SlotConfiguration": {"DeviceType": "1000", "ChassisId": "10053", "SlotNumber": "1", + "SlotName": "my_840c", "SlotType": "2000"}}, + {"Id": 10054, "Type": 1000, "Identifier": "ABCD1234", + "SlotConfiguration": {"DeviceType": "1000", "ChassisId": "10053", "SlotNumber": "1", + "SlotName": "my_840c", "SlotType": "2000"}}]}, 'message': DEVICE_REPEATED, + "success": True}, ]) + def test_get_device_slot_config_errors(self, params, ome_connection_mock_for_chassis_slots, ome_response_mock, + ome_default_args, module_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + ome_connection_mock_for_chassis_slots.get_all_items_with_pagination.return_value = params[ + 'json_data'] + ome_default_args.update(params['mparams']) + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == params['message'].format( + ';'.join(set(params.get("invalid_list")))) + + @pytest.mark.parametrize("params", [{"json_data": {'Name': 'j1', 'Id': 24}, "slot_data": { + "ABC1234": {"ChassisId": "123", "SlotNumber": "1", "SlotType": "2000"}}, "failed_jobs": {}}]) + def test_start_slot_name_jobs( + self, params, ome_connection_mock_for_chassis_slots, ome_response_mock): + ome_response_mock.success = params.get("success", True) + # ome_connection_mock_for_chassis_slots.job_submission.return_value = params['json_data'] + ome_response_mock.json_data = params["json_data"] + failed_jobs = self.module.start_slot_name_jobs( + ome_connection_mock_for_chassis_slots, params.get('slot_data')) + assert failed_jobs == params['failed_jobs'] + + @pytest.mark.parametrize("params", [ + {"json_data": {"value": [{'Name': 'j1', 'Id': 12, "LastRunStatus": {"Id": 2060, "Name": "Completed"}}]}, + "slot_data": {"ABC1234": {"new_name": "s1", "SlotNumber": "1", "SlotType": "2000", "JobId": 12}}, + "failed_jobs": {}}]) + def test_get_job_states( + self, params, ome_connection_mock_for_chassis_slots, ome_response_mock): + ome_response_mock.success = params.get("success", True) + f_module = self.get_module_mock() + ome_response_mock.json_data = params["json_data"] + failed_jobs = self.module.get_job_states(f_module, ome_connection_mock_for_chassis_slots, + params.get('slot_data')) + assert failed_jobs == params['failed_jobs'] + + @pytest.mark.parametrize("params", [{'mparams': {"device_options": [{"slot_name": "my_840c", "device_id": 10054}, + {"slot_name": "my_740c", + "device_service_tag": "ABCD1234"}]}, + "json_data": {"value": [{"Id": 10053, "Identifier": "ABCD1234", + "SlotConfiguration": {"DeviceType": "1000", + "ChassisId": "10053", + "SlotNumber": "1", + "SlotName": "my_740c", + "SlotType": "2000"}}, + {"Id": 10054, "Type": 1000, "Identifier": "PQRS1234", + "SlotConfiguration": {"DeviceType": "1000", + "ChassisId": "10053", + "SlotNumber": "1", + "SlotName": "my_840c", + "SlotType": "2000"}}]}, + 'message': NO_CHANGES_MSG, "check_mode": True}, {'mparams': { + "device_options": [{"slot_name": "my_840", "device_id": 10054}, + {"slot_name": "my_740", + "device_service_tag": "ABCD1234"}]}, + "json_data": {"value": [{"Id": 10053, "Identifier": "ABCD1234", + "SlotConfiguration": {"DeviceType": "1000", + "ChassisId": "10053", "SlotNumber": "1", "SlotName": "my_740c", + "SlotType": "2000"}}, + {"Id": 10054, "Type": 1000, "Identifier": "PQRS1234", + "SlotConfiguration": {"DeviceType": "1000", "ChassisId": "10053", "SlotNumber": "1", + "SlotName": "my_840c", "SlotType": "2000"}}]}, + 'message': CHANGES_FOUND, "check_mode": True}, ]) + def test_check_mode_idempotency( + self, params, ome_connection_mock_for_chassis_slots, ome_default_args): + ome_connection_mock_for_chassis_slots.get_all_items_with_pagination.return_value = params[ + 'json_data'] + ome_default_args.update(params['mparams']) + result = self._run_module( + ome_default_args, check_mode=params.get( + 'check_mode', False)) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("params", [{'mparams': {"device_options": [{"slot_name": "t1", "device_id": 10053}, + {"slot_name": "t1", + "device_service_tag": "ABCD1234"}]}, + "json_data": {"value": [{"Id": 10053, "Identifier": "2H5DNX2", + "SlotConfiguration": {"DeviceType": "1000", + "ChassisId": "10053", + "SlotNumber": "1", + "SlotName": "my_840c", + "SlotType": "2000"}}, + {"Id": 10054, "Identifier": "ABCD1234", + "SlotConfiguration": {"DeviceType": "1000", + "ChassisId": "10053", + "SlotNumber": "1", + "SlotName": "my_840c", + "SlotType": "2000"}}], }, + 'message': SUCCESS_MSG, "success": True}]) + def test_ome_chassis_slots_success_case(self, params, ome_connection_mock_for_chassis_slots, ome_response_mock, + ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + # ome_response_mock.json_data = params['json_data'] + ome_connection_mock_for_chassis_slots.get_all_items_with_pagination.return_value = params[ + 'json_data'] + ome_connection_mock_for_chassis_slots.job_tracking.return_value = ( + False, "job_track_msg") + mocker.patch( + MODULE_PATH + + 'trigger_refresh_inventory', + return_value=[1]) + mocker.patch( + MODULE_PATH + + 'start_slot_name_jobs', + return_value=params.get( + 'start_slot_name_jobs', + {})) + mocker.patch( + MODULE_PATH + + 'get_job_states', + return_value=params.get( + 'get_job_states', + {})) + ome_default_args.update(params['mparams']) + result = self._run_module( + ome_default_args, check_mode=params.get( + 'check_mode', False)) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("params", [{'mparams': {"slot_options": [{"chassis_service_tag": "ABC1234", + "slots": [{"slot_name": "t1", "slot_number": 1}, + {"slot_name": "s1", + "slot_number": 5}]}]}, + "chassi": {'value': [{"Identifier": "ABC1234", "Id": 1234}]}, + "bladeslots": {'ABC1234_1': {"SlotNumber": "1", "SlotName": "myslotty"}}, + "storageslots": {'value': [{"SlotNumber": "5", "SlotName": "stor-slot1"}]}, + "slot_data": {"ABC1234_1": {"SlotNumber": "1", "SlotName": "myslotty"}}}]) + def test_slot_number_config(self, params, ome_connection_mock_for_chassis_slots, ome_response_mock, + ome_default_args, mocker): + mocker.patch( + MODULE_PATH + 'get_device_type', + return_value=params.get('chassi')) + mocker.patch( + MODULE_PATH + 'get_slot_data', + return_value=params.get('bladeslots')) + f_module = self.get_module_mock(params=params["mparams"]) + slot_data = self.module.slot_number_config( + f_module, ome_connection_mock_for_chassis_slots) + assert slot_data == params['slot_data'] + + @pytest.mark.parametrize("params", [{"slot_options": {"chassis_service_tag": "ABC1234", + "slots": [{"slot_name": "t1", "slot_number": 1}, + {"slot_name": "s1", "slot_number": 5}]}, + "chass_id": 1234, "chassi": {'value': [{"Identifier": "ABC1234", "Id": 1234}]}, + "bladeslots": {'value': [{"SlotNumber": "1", "SlotName": "blade-slot1", + "Id": 234}]}, + "storageslots": {'value': [{"ChassisServiceTag": "ABC1234", + "SlotConfiguration": { + "SlotId": "123", "SlotNumber": "5", + "SlotName": "stor-slot1"}}]}, + "slot_dict_diff": {'ABC1234_5': {'SlotNumber': '5', 'SlotName': 'stor-slot1', + 'ChassisId': 1234, 'SlotId': "123", + 'ChassisServiceTag': 'ABC1234', + 'new_name': 's1'}, + 'ABC1234_1': {'SlotNumber': '1', 'SlotName': 'blade-slot1', + 'ChassisId': 1234, 'SlotId': "234", + "Id": 234, + 'ChassisServiceTag': 'ABC1234', + 'new_name': 't1'}}}]) + def test_get_slot_data(self, params, ome_connection_mock_for_chassis_slots, ome_response_mock, ome_default_args, + mocker): + mocker.patch( + MODULE_PATH + 'get_device_type', + return_value=params.get('storageslots')) + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["bladeslots"] + ch_slots = params['slot_options'] + f_module = self.get_module_mock() + slot_dict_diff = self.module.get_slot_data(f_module, ome_connection_mock_for_chassis_slots, ch_slots, + params['chass_id']) + assert slot_dict_diff == params['slot_dict_diff'] + + @pytest.mark.parametrize("params", [{"json_data": {'Name': 'j1', 'Id': 24}, "slot_data": { + "ABC1234": {"ChassisId": "123", "SlotNumber": "1", "ChassisServiceTag": "ABC1234"}}, "jobs": [1]}]) + def test_trigger_refresh_inventory( + self, params, ome_connection_mock_for_chassis_slots, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + jobs = self.module.trigger_refresh_inventory( + ome_connection_mock_for_chassis_slots, params.get('slot_data')) + assert jobs == params['jobs'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_groups_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_chassis_slots, ome_response_mock): + ome_default_args.update( + {"device_options": [{"slot_name": "t1", "device_id": 1234}]}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch( + MODULE_PATH + 'get_device_slot_config', + side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'get_device_slot_config', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_device_slot_config', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py new file mode 100644 index 00000000..51ff166f --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py @@ -0,0 +1,1195 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.2.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import ome_configuration_compliance_baseline +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ssl import SSLError +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_configuration_compliance_baseline.' +INVALID_DEVICES = "{identifier} details are not available." +TEMPLATE_ID_ERROR_MSG = "Template with ID '{0}' not found." +TEMPLATE_NAME_ERROR_MSG = "Template '{0}' not found." +NAMES_ERROR = "Only delete operations accept multiple baseline names. All the other operations accept only a single " \ + "baseline name." +BASELINE_CHECK_MODE_CHANGE_MSG = "Baseline '{name}' already exists." +CHECK_MODE_CHANGES_MSG = "Changes found to be applied." +CHECK_MODE_NO_CHANGES_MSG = "No changes found to be applied." +BASELINE_CHECK_MODE_NOCHANGE_MSG = "Baseline '{name}' does not exist." +CREATE_MSG = "Successfully created the configuration compliance baseline." +DELETE_MSG = "Successfully deleted the configuration compliance baseline(s)." +TASK_PROGRESS_MSG = "The initiated task for the configuration compliance baseline is in progress." +CREATE_FAILURE_PROGRESS_MSG = "The initiated task for the configuration compliance baseline has failed" +INVALID_IDENTIFIER = "Target with {identifier} {invalid_val} not found." +IDEMPOTENCY_MSG = "The specified configuration compliance baseline details are the same as the existing settings." +INVALID_COMPLIANCE_IDENTIFIER = "Unable to complete the operation because the entered target {0} {1}" \ + " is not associated with the baseline '{2}'." +INVALID_TIME = "job_wait_timeout {0} is not valid." +REMEDIATE_MSG = "Successfully completed the remediate operation." +MODIFY_MSG = "Successfully modified the configuration compliance baseline." +JOB_FAILURE_PROGRESS_MSG = "The initiated task for the configuration compliance baseline has failed." + +device_info = { + "value": [ + { + "Id": Constants.device_id1, + "Type": 2000, + "Identifier": Constants.service_tag1, + "DeviceServiceTag": Constants.service_tag1, + "ChassisServiceTag": None, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "DeviceCapabilities": [33, 11], + "ManagedState": 3000, + "Status": 1000, + "ConnectionState": True, + "SystemId": 2031, + "DeviceName": "MX-MOCK" + }, + { + "Id": Constants.device_id2, + "Type": 2000, + "Identifier": Constants.service_tag2, + "DeviceServiceTag": Constants.service_tag2, + "ChassisServiceTag": None, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "ManagedState": 3000, + "Status": 1000, + "ConnectionState": True, + "SystemId": 2031, + "DeviceName": "MX-MOCK" + } + ] +} + +group_info = { + "@odata.count": 2, + "value": [ + { + "Id": Constants.device_id1, + "Name": "Network Mock", + }, + { + "Id": Constants.device_id2, + "Name": "OEM Mock", + } + ] +} + +baseline_info = { + "@odata.count": 1, + "value": [ + { + "@odata.type": "#TemplateService.Baseline", + "@odata.id": "/api/TemplateService/Baselines(30)", + "Id": 30, + "Name": "baseline5", + "Description": None, + "TemplateId": 102, + "TemplateName": "one", + "TemplateType": 2, + "TaskId": 26606, + "PercentageComplete": "100", + "TaskStatus": 2070, + "LastRun": "2021-03-02 19:29:31.503", + "BaselineTargets": [ + { + "Id": 10074, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "ConfigComplianceSummary": { + "ComplianceStatus": "OK", + "NumberOfCritical": 0, + "NumberOfWarning": 0, + "NumberOfNormal": 0, + "NumberOfIncomplete": 0 + }, + "DeviceConfigComplianceReports@odata.navigationLink": "/api/TemplateService/Baselines(30)/DeviceConfigComplianceReports" + } + ] +} + +baseline_output = { + "Id": 30, + "Name": "baseline5", + "Description": None, + "TemplateId": 102, + "TemplateName": "one", + "TemplateType": 2, + "TaskId": 26606, + "PercentageComplete": "100", + "TaskStatus": 2070, + "LastRun": "2021-03-02 19:29:31.503", + "BaselineTargets": [ + { + "Id": 10074, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "ConfigComplianceSummary": { + "ComplianceStatus": "OK", + "NumberOfCritical": 0, + "NumberOfWarning": 0, + "NumberOfNormal": 0, + "NumberOfIncomplete": 0 + }, +} + +compliance_report = { + "@odata.count": 2, + "value": [ + { + "@odata.id": "/api/TemplateService/Baselines(30)/DeviceConfigComplianceReports({0})".format( + Constants.device_id1), + "Id": Constants.device_id1, + "DeviceName": "mock_devicename", + "Model": "mock_model", + "ServiceTag": Constants.service_tag1, + "ComplianceStatus": "COMPLIANT", + "DeviceType": 1000, + "InventoryTime": "2021-03-10 21:39:16.958627", + }, + { + "@odata.id": "/api/TemplateService/Baselines(30)/DeviceConfigComplianceReports({0})".format( + Constants.device_id2), + "Id": Constants.device_id2, + "DeviceName": "mock_devicename", + "Model": "mock_model", + "ServiceTag": Constants.service_tag2, + "ComplianceStatus": "NONCOMPLIANT", + "DeviceType": 1000, + "InventoryTime": "2021-03-10 21:39:16.958627", + } + ] +} + + +@pytest.fixture +def ome_connection_mock_for_compliance(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeConfigCompBaseline(FakeAnsibleModule): + module = ome_configuration_compliance_baseline + + @pytest.mark.parametrize("params", [{"name": "baseline", "template_name": "iDRAC 13G Enable Low Latency Profile"}, + {"name": "baseline", "template_id": 1}]) + def test_ome_configuration_get_template_details_case1(self, params, ome_response_mock, + ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params=params) + template_info = { + "@odata.count": 1, + "value": [{ + "Id": 1, + "Name": "iDRAC 13G Enable Low Latency Profile", + "Description": "Tune workload for High Performance Computing Environment", + "SourceDeviceId": 0, + "TypeId": 2, + "ViewTypeId": 4, + }] + } + ome_response_mock.json_data = template_info + template_data = self.module.get_template_details(f_module, ome_connection_mock_for_compliance) + assert template_data == template_info["value"][0] + + @pytest.mark.parametrize("params", [{"names": "baseline", "template_name": "iDRAC 13G Enable Low Latency Profile"}, + {"names": "baseline", "template_id": 1}]) + def test_ome_configuration_get_template_details_case2(self, params, ome_response_mock, + ome_connection_mock_for_compliance): + """ + when invalid template name and ids are provided + """ + f_module = self.get_module_mock(params=params) + template_info = { + "@odata.count": 1, + "value": [] + } + ome_response_mock.json_data = template_info + with pytest.raises(Exception) as err: + self.module.get_template_details(f_module, ome_connection_mock_for_compliance) + if "template_id" in params: + assert err.value.args[0] == TEMPLATE_ID_ERROR_MSG.format(params['template_id']) + else: + assert err.value.args[0] == TEMPLATE_NAME_ERROR_MSG.format(params['template_name']) + + def test_validate_identifiers_case01(self): + """ + No exception thrown when valid device ids are passed + """ + requested_values = [Constants.device_id1, Constants.device_id2] + f_module = self.get_module_mock(params={"device_ids": requested_values}) + available_values = dict([(item["Id"], item["Identifier"]) for item in device_info["value"]]) + self.module.validate_identifiers(available_values.keys(), requested_values, "device_ids", f_module) + + def test_validate_identifiers_case02(self): + """ + No exception thrown when valid device se tagsrvice are passed + """ + requested_values = [Constants.service_tag2, Constants.service_tag1] + available_values = dict([(item["Id"], item["Identifier"]) for item in device_info["value"]]) + f_module = self.get_module_mock(params={"device_service_tags": requested_values}) + self.module.validate_identifiers(available_values.values(), requested_values, "device_service_tags", f_module) + + @pytest.mark.parametrize("val", [[Constants.service_tag1, "abc", "xyz"], ["abc", "xyz"]]) + def test_validate_identifiers_case03(self, val): + """ + Exception should be thrown when invalid service tags are passed + """ + requested_values = val + f_module = self.get_module_mock(params={"device_service_tags": requested_values}) + available_values = dict([(item["Id"], item["Identifier"]) for item in device_info["value"]]) + with pytest.raises(Exception) as err: + self.module.validate_identifiers(available_values.values(), requested_values, "device_service_tags", + f_module) + assert err.value.args[0].find("Target with device_service_tags") != -1 + + def test_get_identifiers_case01(self): + """ + get the device id from serivice tags + """ + available_identifiers_map = dict([(item["Id"], item["Identifier"]) for item in device_info["value"]]) + requested_values = [Constants.service_tag1] + val = self.module.get_identifiers(available_identifiers_map, requested_values) + assert val == [Constants.device_id1] + + def test_get_identifiers_case02(self): + """ + get the group id from group Names + """ + available_identifiers_map = dict([(item["Id"], item["Name"]) for item in group_info["value"]]) + requested_values = ["OEM Mock"] + val = self.module.get_identifiers(available_identifiers_map, requested_values) + assert val == [Constants.device_id2] + + def test_get_group_ids(self, ome_connection_mock_for_compliance): + """ + success case + """ + f_module = self.get_module_mock(params={"device_group_names": ["OEM Mock"], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": group_info["@odata.count"], "value": group_info["value"]} + value = self.module.get_group_ids(f_module, ome_connection_mock_for_compliance) + assert value == [Constants.device_id2] + + def test_get_group_ids_failure_case1(self, ome_connection_mock_for_compliance): + """ + success case + """ + f_module = self.get_module_mock(params={"device_group_names": ["OEM Mock Invalid"], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": group_info["@odata.count"], + "value": group_info["value"] + } + with pytest.raises(Exception) as err: + self.module.get_group_ids(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == "Target with device_group_names OEM Mock Invalid not found." + + def test_get_group_ids_failure_case2(self, ome_connection_mock_for_compliance): + """ + success case + """ + f_module = self.get_module_mock(params={"device_group_names": ["OEM Mock Invalid"], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": group_info["@odata.count"], + "value": [] + } + with pytest.raises(Exception) as err: + self.module.get_group_ids(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == INVALID_DEVICES.format(identifier="Group") + + def test_get_device_ids_case01(self, ome_response_mock, ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"device_ids": [Constants.device_id2, Constants.device_id1], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_report_details.return_value = { + "resp_obj": ome_response_mock, "report_list": device_info["value"]} + value, compatible_map = self.module.get_device_ids(f_module, ome_connection_mock_for_compliance) + assert value == [Constants.device_id2, Constants.device_id1] + assert compatible_map == {"capable": [Constants.device_id1], "non_capable": [Constants.device_id2]} + + def test_get_device_ids_case2(self, ome_response_mock, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"device_service_tags": [Constants.service_tag1], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_report_details.return_value = { + "resp_obj": ome_response_mock, "report_list": device_info["value"]} + value, compatible_map = self.module.get_device_ids(f_module, ome_connection_mock_for_compliance) + assert value == [Constants.device_id1] + assert compatible_map == {"capable": [Constants.service_tag1], "non_capable": [Constants.service_tag2]} + + def test_get_device_ids_case01_failurecase(self, ome_response_mock, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"device_ids": [100], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_report_details.return_value = { + "resp_obj": ome_response_mock, "report_list": device_info["value"]} + with pytest.raises(Exception) as err: + self.module.get_device_ids(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == "Target with device_ids 100 not found." + + def test_get_device_ids_case2_failure_case(self, ome_response_mock, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"device_service_tags": ["xyz"], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_report_details.return_value = { + "resp_obj": ome_response_mock, "report_list": device_info["value"]} + with pytest.raises(Exception) as err: + self.module.get_device_ids(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == "Target with device_service_tags xyz not found." + + def test_get_device_ids_failure_case(self, ome_response_mock, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"device_ids": [Constants.device_id2], "command": "create", + "template_id": 2}) + ome_connection_mock_for_compliance.get_all_report_details.return_value = { + "resp_obj": ome_response_mock, "report_list": []} + with pytest.raises(Exception) as err: + self.module.get_device_ids(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == INVALID_DEVICES.format(identifier="Device") + + def test_create_payload_case1(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"device_ids": [Constants.device_id1, Constants.device_id2], "command": "create", + "template_id": 2, "names": ["baseline1"]}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id1, Constants.device_id2], {})) + mocker.patch(MODULE_PATH + 'validate_capability', + return_value=None) + mocker.patch(MODULE_PATH + 'get_template_details', + return_value={"Id": 2, "Name": "template1"}) + payload = self.module.create_payload(f_module, ome_connection_mock_for_compliance) + assert payload == { + "Name": "baseline1", + "TemplateId": 2, + "BaselineTargets": [{"Id": Constants.device_id1}, + {"Id": Constants.device_id2}] + } + + def test_create_payload_case2(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"device_service_tags": [Constants.service_tag1, Constants.service_tag2], "command": "create", + "template_id": 2, "names": ["baseline1"]}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id1, Constants.device_id2], "map")) + mocker.patch(MODULE_PATH + 'validate_capability', + return_value=None) + mocker.patch(MODULE_PATH + 'get_template_details', + return_value={"Id": 2, "Name": "template1"}) + payload = self.module.create_payload(f_module, ome_connection_mock_for_compliance) + assert payload == { + "Name": "baseline1", + "TemplateId": 2, + "BaselineTargets": [{"Id": Constants.device_id1}, + {"Id": Constants.device_id2}] + } + + def test_create_payload_case3(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"device_group_names": ["xyz"], "command": "create", + "template_id": 2, "names": ["baseline1"]}) + mocker.patch(MODULE_PATH + 'get_group_ids', + return_value=[Constants.device_id1, Constants.device_id2]) + mocker.patch(MODULE_PATH + 'get_template_details', + return_value={"Id": 2, "Name": "template1"}) + payload = self.module.create_payload(f_module, ome_connection_mock_for_compliance) + assert payload == { + "Name": "baseline1", + "TemplateId": 2, + "BaselineTargets": [{"Id": Constants.device_id1}, + {"Id": Constants.device_id2}] + } + + def test_get_baseline_compliance_info(self, ome_connection_mock_for_compliance): + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = baseline_info + val = self.module.get_baseline_compliance_info(ome_connection_mock_for_compliance, "baseline5", "Name") + assert val == baseline_output + + def test_get_baseline_compliance_info_case2(self, ome_connection_mock_for_compliance): + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = baseline_info + val = self.module.get_baseline_compliance_info(ome_connection_mock_for_compliance, 30, "Id") + assert val == baseline_output + + def test_track_compliance_task_completion_case01(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"device_group_names": ["xyz"], "command": "create", + "template_id": 2, "names": ["baseline1"], "job_wait": True, + "job_wait_timeout": 600}) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + msg, info = self.module.track_compliance_task_completion(ome_connection_mock_for_compliance, 30, f_module) + assert msg == CREATE_MSG + assert info == baseline_output + + def test_track_compliance_task_completion_case02(self, mocker, ome_connection_mock_for_compliance): + baseline_output["PercentageComplete"] = 25 + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', return_value=baseline_output) + f_module = self.get_module_mock(params={"device_group_names": ["xyz"], "command": "create", + "template_id": 2, "names": ["baseline1"], "job_wait": True, + "job_wait_timeout": 600}) + msg, info = self.module.track_compliance_task_completion(ome_connection_mock_for_compliance, 30, f_module) + assert msg == TASK_PROGRESS_MSG + assert info == baseline_output + assert info["PercentageComplete"] == 25 + + def test_track_compliance_task_completion_case03(self, mocker, ome_connection_mock_for_compliance): + baseline_output["PercentageComplete"] = 25 + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', return_value=baseline_output) + f_module = self.get_module_mock(params={"device_group_names": ["xyz"], "command": "create", + "template_id": 2, "names": ["baseline1"], "job_wait": False, + "job_wait_timeout": 600}) + msg, info = self.module.track_compliance_task_completion(ome_connection_mock_for_compliance, 30, f_module) + assert msg == TASK_PROGRESS_MSG + assert info == baseline_output + assert info["PercentageComplete"] == 25 + + @pytest.mark.parametrize('val', [True, False]) + def test_validate_create_baseline_idempotency(self, mocker, val, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["baseline5"]}, check_mode=val) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + with pytest.raises(Exception) as err: + self.module.validate_create_baseline_idempotency(f_module, + ome_connection_mock_for_compliance) + assert err.value.args[0] == BASELINE_CHECK_MODE_CHANGE_MSG.format(name=baseline_output["Name"]) + + def test_validate_create_baseline_idempotency_case2(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["baseline5"]}, check_mode=True) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + with pytest.raises(Exception) as err: + self.module.validate_create_baseline_idempotency(f_module, + ome_connection_mock_for_compliance) + assert err.value.args[0] == BASELINE_CHECK_MODE_CHANGE_MSG.format(name="baseline5") + + def test_create_baseline_case01(self, mocker, ome_response_mock, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'validate_create_baseline_idempotency', + return_value=None) + mocker.patch(MODULE_PATH + 'create_payload', + return_value={}) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + f_module = self.get_module_mock(params={"names": ["baseline5"], "job_wait": False, "job_wait_timeout": 600}, + check_mode=False) + ome_response_mock.json_data = {"Id": 1} + ome_connection_mock_for_compliance.job_tracking.return_value = False, "message" + with pytest.raises(Exception) as err: + self.module.create_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == TASK_PROGRESS_MSG + + @pytest.mark.parametrize("val", + [(False, "Job completed successfully."), (False, "other message."), (True, "message2")]) + def test_create_baseline_case02(self, val, mocker, ome_response_mock, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'validate_create_baseline_idempotency', + return_value=None) + mocker.patch(MODULE_PATH + 'create_payload', + return_value={}) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + f_module = self.get_module_mock(params={"job_wait": True, "job_wait_timeout": 600}, check_mode=False) + ome_connection_mock_for_compliance.job_tracking.return_value = val[0], val[1] + ome_response_mock.json_data = {"Id": 1} + with pytest.raises(Exception) as err: + self.module.create_baseline(f_module, ome_connection_mock_for_compliance) + if val[0] is False and "successfully" in val[1]: + assert err.value.args[0] == CREATE_MSG + elif val[0] is False and "successfully" not in val[1]: + assert err.value.args[0] == val[1] + else: + assert err.value.args[0] == val[1] + + def test_validate_names(self): + f_module = self.get_module_mock(params={"names": ["abc"]}, check_mode=False) + self.module.validate_names("create", f_module) + + def test_validate_names_case02(self): + f_module = self.get_module_mock(params={"names": ["abc", "xyz"]}, check_mode=False) + with pytest.raises(Exception) as err: + self.module.validate_names("create", f_module) + assert err.value.args[0] == NAMES_ERROR + + @pytest.mark.parametrize("command", ["create"]) + def test_compliance_operation(self, mocker, command, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "create"}, check_mode=False) + mocker.patch(MODULE_PATH + 'validate_job_time', + return_value=None) + mock_create = mocker.patch(MODULE_PATH + 'create_baseline', + return_value=None) + self.module.compliance_operation(f_module, ome_connection_mock_for_compliance) + assert mock_create.called + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_compliance_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_compliance, ome_response_mock): + ome_default_args.update({"template_name": "t1", "names": "baseline1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'compliance_operation', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'compliance_operation', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'compliance_operation', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + + def test_compliance_create_argument_exception_case1(self, ome_default_args): + ome_default_args.update({"template_name": "t1"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "missing required arguments: names" + + def test_compliance_create_argument_exception_case2(self, ome_default_args): + ome_default_args.update({"template_id": 1}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "missing required arguments: names" + + def test_compliance_create_argument_exception_case3(self, ome_default_args): + ome_default_args.update({"names": "baseline1"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "command is create but any of the following are missing: template_name, template_id" + + def test_compliance_create_argument_exception_case4(self, ome_default_args): + ome_default_args.update({"names": "baseline1", "template_name": "t1", "template_id": 1}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: template_id|template_name" + + def test_compliance_create_argument_exception_case5(self, ome_default_args): + ome_default_args.update({"names": "baseline1", "device_ids": 1, "template_name": "t1", + "device_service_tags": "xyz"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: device_ids|device_service_tags" + + def test_compliance_create_argument_exception_case6(self, ome_default_args): + ome_default_args.update({"names": "baseline1", "template_name": "t1", "device_ids": 1, + "device_group_names": "xyz"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: device_ids|device_group_names" + + def test_compliance_create_argument_exception_case7(self, ome_default_args): + ome_default_args.update({"names": "baseline1", "template_name": "t1", "device_service_tags": "abc", + "device_group_names": "xyz"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: device_service_tags|device_group_names" + + def test_compliance_create_argument_exception_case8(self, ome_default_args): + ome_default_args.update( + {"names": "baseline1", "template_name": "t1", "device_ids": 1, "device_service_tags": "xyz", + "device_group_names": "abc"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: device_ids|device_service_tags, " \ + "device_ids|device_group_names, device_service_tags|device_group_names" + + @pytest.mark.parametrize("command", ["delete"]) + def test_compliance_operation_delete(self, mocker, command, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "delete"}, check_mode=False) + mock_delete_compliance = mocker.patch(MODULE_PATH + 'delete_compliance', + return_value=None) + mocker.patch(MODULE_PATH + 'validate_job_time', + return_value=None) + self.module.compliance_operation(f_module, ome_connection_mock_for_compliance) + assert mock_delete_compliance.called + + def test_delete_idempotency_check_case01(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_identifiers', + return_value=[30]) + f_module = self.get_module_mock(params={"names": ["baseline5"]}, check_mode=False) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = baseline_info + val = self.module.delete_idempotency_check(f_module, ome_connection_mock_for_compliance) + assert val == [30] + + def test_delete_idempotency_check_case02(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_identifiers', + return_value=[30]) + f_module = self.get_module_mock(params={"names": ["baseline5"]}, check_mode=True) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = baseline_info + with pytest.raises(Exception) as err: + self.module.delete_idempotency_check(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_CHANGES_MSG + + def test_delete_idempotency_check_case03(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_identifiers', + return_value=[]) + f_module = self.get_module_mock(params={"names": ["baseline5"]}, check_mode=True) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = baseline_info + with pytest.raises(Exception) as err: + self.module.delete_idempotency_check(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_NO_CHANGES_MSG + + def test_delete_compliance_case01(self, mocker, ome_connection_mock_for_compliance, ome_response_mock): + mocker.patch(MODULE_PATH + 'delete_idempotency_check', + return_value=[30]) + f_module = self.get_module_mock(params={"names": ["baseline5"]}, check_mode=False) + ome_response_mock.json_data = None + ome_response_mock.status_code = 204 + with pytest.raises(Exception) as err: + self.module.delete_compliance(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == DELETE_MSG + + def test_compliance_operation_modify(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify"}, check_mode=False) + mock_modify = mocker.patch(MODULE_PATH + 'modify_baseline', + return_value=None) + mocker.patch(MODULE_PATH + 'validate_job_time', + return_value=None) + self.module.compliance_operation(f_module, ome_connection_mock_for_compliance) + assert mock_modify.called + + @pytest.mark.parametrize("val", [(False, "Job completed successfully."), (False, "message1"), (True, "message2")]) + def test_modify_baseline_case01(self, val, mocker, ome_response_mock, ome_connection_mock_for_compliance): + payload = { + "Name": "baseline1", + "TemplateId": 2, + "BaselineTargets": [{"Id": Constants.device_id1}, + {"Id": Constants.device_id2}] + } + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify", "job_wait": True, + "job_wait_timeout": 600}, check_mode=False) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + mocker.patch(MODULE_PATH + 'create_payload', + return_value=payload) + mocker.patch(MODULE_PATH + 'idempotency_check_for_command_modify', + return_value=None) + ome_connection_mock_for_compliance.job_tracking.return_value = val[0], val[1] + ome_response_mock.json_data = {"Id": 1} + with pytest.raises(Exception) as err: + self.module.modify_baseline(f_module, ome_connection_mock_for_compliance) + if val[0] is False and "successfully" in val[1]: + assert err.value.args[0] == MODIFY_MSG + elif val[0] is False and "successfully" not in val[1]: + assert err.value.args[0] == val[1] + else: + assert err.value.args[0] == val[1] + + def test_modify_baseline_case02(self, mocker, ome_response_mock, ome_connection_mock_for_compliance): + payload = { + "Name": "baseline1", + "TemplateId": 2, + "BaselineTargets": [{"Id": Constants.device_id1}, + {"Id": Constants.device_id2}] + } + f_module = self.get_module_mock( + params={"names": ["abc"], "command": "modify", "job_wait": False, + "job_wait_timeout": 600}, check_mode=False) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + mocker.patch(MODULE_PATH + 'create_payload', + return_value=payload) + mocker.patch(MODULE_PATH + 'idempotency_check_for_command_modify', + return_value=None) + ome_response_mock.json_data = {"Id": 1} + with pytest.raises(Exception) as err: + self.module.modify_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == TASK_PROGRESS_MSG + + def test_modify_baseline_case03(self, mocker, ome_response_mock, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify"}, check_mode=False) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value={}) + with pytest.raises(Exception) as err: + self.module.modify_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == BASELINE_CHECK_MODE_NOCHANGE_MSG.format(name="abc") + + def test_modify_baseline_case04(self, mocker, ome_response_mock, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify"}, check_mode=False) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value={}) + with pytest.raises(Exception) as err: + self.module.modify_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == BASELINE_CHECK_MODE_NOCHANGE_MSG.format(name="abc") + + def test_idempotency_check_for_command_modify_case1(self, mocker): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify"}, check_mode=True) + mocker.patch(MODULE_PATH + 'compare_payloads', + return_value="diff") + with pytest.raises(Exception) as err: + self.module.idempotency_check_for_command_modify("current_payload", "expected_payload", f_module) + assert err.value.args[0] == CHECK_MODE_CHANGES_MSG + + def test_idempotency_check_for_command_modify_case2(self, mocker): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify"}, check_mode=True) + mocker.patch(MODULE_PATH + 'compare_payloads', + return_value=None) + with pytest.raises(Exception) as err: + self.module.idempotency_check_for_command_modify("current_payload", "expected_payload", f_module) + assert err.value.args[0] == CHECK_MODE_NO_CHANGES_MSG + + def test_idempotency_check_for_command_modify_case3(self, mocker): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify"}, check_mode=False) + mocker.patch(MODULE_PATH + 'compare_payloads', + return_value={}) + with pytest.raises(Exception) as err: + self.module.idempotency_check_for_command_modify("current_payload", "expected_payload", f_module) + assert err.value.args[0] == IDEMPOTENCY_MSG + + @pytest.mark.parametrize("modify_payload", [{"Id": 29, "Name": "baselin9", "TemplateId": 102}, + {"Id": 29, "Name": "baselin8", "TemplateId": 103}, + {"Id": 29, "Name": "baselin8", "TemplateId": 102, + "BaselineTargets": [{"Id": 10074}]}, + {"Id": 29, "Name": "baselin8", "TemplateId": 102, + "BaselineTargets": [{"Id": 10079}]}, + {"Id": 29, "Name": "baselin8", "TemplateId": 102, + "BaselineTargets": [{"Id": 10075}, + {"Id": 10074}]} + ]) + def test_compliance_compare_payloads_diff_case_01(self, modify_payload): + current_payload = { + "Id": 29, + "Name": "baselin8", + "Description": "desc", + "TemplateId": 102, + "BaselineTargets": [ + { + "Id": 10075 + } + ] + } + val = self.module.compare_payloads(modify_payload, current_payload) + assert val is True + + @pytest.mark.parametrize("current_payload", [{"Id": 29, "Name": "baselin8", "Description": "desc1"}, + {"Id": 29, "Name": "baselin9", "TemplateId": 102}, + {"Id": 29, "Name": "baselin8", "TemplateId": 103}, + {"Id": 29, "Name": "baselin8", "TemplateId": 102, + "BaselineTargets": [{"Id": 10074}]}, + {"Id": 29, "Name": "baselin8", "TemplateId": 102, + "BaselineTargets": [{"Id": 10079}]}]) + def test_compliance_compare_payloads_diff_case_02(self, current_payload): + modify_payload = { + "Id": 29, + "Name": "baselin8", + "Description": "desc", + "TemplateId": 102, + "BaselineTargets": [ + { + "Id": 10075 + } + ] + } + val = self.module.compare_payloads(modify_payload, current_payload) + assert val is True + + @pytest.mark.parametrize("modify_payload", [{"Id": 29, "Name": "baselin8", "TemplateId": 102}, + {"Id": 29, "Name": "baselin8", "Description": "desc"}, + {"Id": 29, "Name": "baselin8", + "BaselineTargets": [{"Id": 10075}]}]) + def test_compliance_compare_payloads_no_diff_case_03(self, modify_payload): + current_payload = { + "Id": 29, + "Name": "baselin8", + "Description": "desc", + "TemplateId": 102, + "BaselineTargets": [ + { + "Id": 10075 + } + ] + } + val = self.module.compare_payloads(modify_payload, current_payload) + assert val is False + + def test_get_ome_version(self, ome_response_mock, ome_connection_mock_for_compliance): + ome_response_mock.json_data = { + "Name": "OM Enterprise", + "Description": "OpenManage Enterprise", + "Vendor": "Dell, Inc.", + "ProductType": 1, + "Version": "3.4.1", + "BuildNumber": "24", + "OperationJobId": 0 + } + version = self.module.get_ome_version(ome_connection_mock_for_compliance) + assert version == "3.4.1" + + def validate_validate_remediate_idempotency_with_device_ids(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"device_ids": [Constants.device_id2, Constants.device_id1], "command": "remediate", + "names": ["baseline1"]}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], "map")) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": compliance_report["@odata.count"], "value": compliance_report["value"]} + noncomplaint_devices, baseline_info = self.module.validate_remediate_idempotency(f_module, + ome_connection_mock_for_compliance) + assert noncomplaint_devices == [Constants.device_id2] + assert baseline_info == baseline_output + + def validate_validate_remediate_idempotency_with_service_tags(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"device_ids": [Constants.service_tag1, Constants.service_tag2], "command": "remediate", + "names": ["baseline1"]}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], "map")) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": compliance_report["@odata.count"], "value": compliance_report["value"]} + noncomplaint_devices, baseline_info = self.module.validate_remediate_idempotency(f_module, + ome_connection_mock_for_compliance) + assert noncomplaint_devices == [Constants.device_id2] + assert baseline_info == baseline_output + + def validate_validate_remediate_idempotency_without_devices(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"command": "remediate", "names": ["baseline1"]}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], "map")) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": compliance_report["@odata.count"], "value": compliance_report["value"]} + noncomplaint_devices, baseline_info = self.module.validate_remediate_idempotency(f_module, + ome_connection_mock_for_compliance) + assert noncomplaint_devices == [Constants.device_id2] + assert baseline_info == baseline_output + + def validate_validate_remediate_idempotency_wen_all_complaint(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"command": "remediate", "names": ["baseline1"]}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], "map")) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + report = { + "@odata.count": 2, + "value": [ + { + "Id": Constants.device_id1, + "ServiceTag": Constants.service_tag1, + "ComplianceStatus": "COMPLIANT" + }, + { + "Id": Constants.device_id2, + "ServiceTag": Constants.service_tag2, + "ComplianceStatus": "COMPLIANT" + } + ] + } + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": report["@odata.count"], "value": report["value"]} + with pytest.raises(Exception) as err: + self.module.validate_remediate_idempotency(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_NO_CHANGES_MSG + + def validate_validate_remediate_idempotency_without_devices_check_mode(self, mocker, + ome_connection_mock_for_compliance): + f_module = self.get_module_mock( + params={"command": "remediate", "names": ["baseline1"]}, check_mode=True) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], "map")) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": compliance_report["@odata.count"], "value": compliance_report["value"]} + with pytest.raises(Exception) as err: + self.module.validate_remediate_idempotency(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_CHANGES_MSG + + @pytest.mark.parametrize("val", ["3.4.1", "3.4.5", "3.4.0", "3.4", "3.3", "3.3.0", "3.0.0", "2.1"]) + def test_create_remediate_payload_case01_for_old_releases(self, val, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_ome_version', + return_value=val) + payload = self.module.create_remediate_payload([Constants.device_id1], baseline_output, + ome_connection_mock_for_compliance) + assert "TargetIds" in payload + + @pytest.mark.parametrize("val", ["3.5.1", "3.5.5", "3.5.0", "3.5"]) + def test_create_remediate_payload_case01_for_new_releases(self, val, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_ome_version', + return_value=val) + payload = self.module.create_remediate_payload([Constants.device_id1], baseline_output, + ome_connection_mock_for_compliance) + assert "DeviceIds" in payload + + def test_remediate_baseline_case1(self, mocker, ome_connection_mock_for_compliance, ome_response_mock): + f_module = self.get_module_mock( + params={"command": "remediate", "names": ["baseline1"], "job_wait": True, "job_wait_timeout": 600}, + check_mode=True) + mocker.patch(MODULE_PATH + 'validate_remediate_idempotency', + return_value=([Constants.device_id1], baseline_output)) + mocker.patch(MODULE_PATH + 'create_remediate_payload', + return_value="payload") + ome_response_mock.json_data = 1234 + ome_connection_mock_for_compliance.job_tracking.return_value = True, "job fail message" + with pytest.raises(Exception) as err: + self.module.remediate_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == "job fail message" + + def test_remediate_baseline_case2(self, mocker, ome_connection_mock_for_compliance, ome_response_mock): + f_module = self.get_module_mock( + params={"command": "remediate", "names": ["baseline1"], "job_wait": True, "job_wait_timeout": 600}, + check_mode=True) + mocker.patch(MODULE_PATH + 'validate_remediate_idempotency', + return_value=([Constants.device_id1], baseline_output)) + mocker.patch(MODULE_PATH + 'create_remediate_payload', + return_value="payload") + ome_response_mock.json_data = 1234 + ome_connection_mock_for_compliance.job_tracking.return_value = False, "Job completed successfully." + with pytest.raises(Exception) as err: + self.module.remediate_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == REMEDIATE_MSG + + def test_remediate_baseline_case3(self, mocker, ome_connection_mock_for_compliance, ome_response_mock): + f_module = self.get_module_mock( + params={"command": "remediate", "names": ["baseline1"], "job_wait": False, "job_wait_timeout": 600}, + check_mode=True) + mocker.patch(MODULE_PATH + 'validate_remediate_idempotency', + return_value=([Constants.device_id1], baseline_output)) + mocker.patch(MODULE_PATH + 'create_remediate_payload', + return_value="payload") + ome_response_mock.json_data = 1234 + with pytest.raises(Exception) as err: + self.module.remediate_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == TASK_PROGRESS_MSG + + @pytest.mark.parametrize("inparams", [{"command": "create", "names": ["baseline1"], + "job_wait": True, "job_wait_timeout": 0}, + {"command": "modify", "names": ["baseline1"], "job_wait": True, + "job_wait_timeout": 0}]) + def test_validate_job_time(self, inparams): + command = inparams['command'] + f_module = self.get_module_mock( + params=inparams) + with pytest.raises(Exception) as err: + self.module.validate_job_time(command, f_module) + assert err.value.args[0] == INVALID_TIME.format(inparams["job_wait_timeout"]) + + @pytest.mark.parametrize("command", ["remediate"]) + def test_compliance_remediate_operation(self, mocker, command, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["abc"], "command": "remediate"}, check_mode=False) + mocker.patch(MODULE_PATH + 'validate_job_time', + return_value=None) + mock_remediate = mocker.patch(MODULE_PATH + 'remediate_baseline', + return_value=None) + self.module.compliance_operation(f_module, ome_connection_mock_for_compliance) + assert mock_remediate.called + + @pytest.mark.parametrize("inparams", [{"command": "modify", "names": ["baseline1"], "job_wait": True, + "job_wait_timeout": 1}, + {"command": "modify", "names": ["baseline1"], "job_wait": False, + "job_wait_timeout": 1}, + {"command": "delete", "names": ["baseline1"], "job_wait": True, + "job_wait_timeout": 1}, + ]) + def test_validate_job_time_no_err_case(self, inparams): + command = inparams['command'] + f_module = self.get_module_mock( + params=inparams) + self.module.validate_job_time(command, f_module) + + def test_remediate_baseline_case4(self, mocker, ome_connection_mock_for_compliance, ome_response_mock): + f_module = self.get_module_mock( + params={"command": "remediate", "names": ["baseline1"], "job_wait": True, "job_wait_timeout": 600}, + check_mode=True) + mocker.patch(MODULE_PATH + 'validate_remediate_idempotency', + return_value=([Constants.device_id1], baseline_output)) + mocker.patch(MODULE_PATH + 'create_remediate_payload', + return_value="payload") + ome_response_mock.json_data = 1234 + ome_connection_mock_for_compliance.job_tracking.return_value = False, "Job is running." + with pytest.raises(Exception) as err: + self.module.remediate_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == "Job is running." + + def test_modify_baseline_case05(self, mocker, ome_response_mock, ome_connection_mock_for_compliance): + payload = { + "Name": "baseline1", + "TemplateId": 2 + } + f_module = self.get_module_mock(params={"names": ["abc"], "command": "modify", "job_wait": False, + "job_wait_timeout": 600}, check_mode=False) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + mocker.patch(MODULE_PATH + 'create_payload', + return_value=payload) + mocker.patch(MODULE_PATH + 'idempotency_check_for_command_modify', + return_value=None) + ome_response_mock.json_data = {"Id": 1} + with pytest.raises(Exception) as err: + self.module.modify_baseline(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == TASK_PROGRESS_MSG + + def test_validate_create_baseline_idempotency_case3(self, mocker, ome_connection_mock_for_compliance): + f_module = self.get_module_mock(params={"names": ["baseline5"]}, check_mode=True) + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value={}) + with pytest.raises(Exception) as err: + self.module.validate_create_baseline_idempotency(f_module, + ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_CHANGES_MSG + + def test_validate_capability_no_err_case01(self): + capability_map = {"capable": [Constants.device_id1], "non_capable": [Constants.device_id2], } + f_module = self.get_module_mock(params={"device_ids": [Constants.device_id1]}, check_mode=True) + self.module.validate_capability(f_module, capability_map) + + def test_validate_capability_no_err_case02(self): + capability_map = {"capable": [Constants.service_tag1], "non_capable": [Constants.service_tag2]} + f_module = self.get_module_mock(params={"device_service_tags": [Constants.service_tag1]}, check_mode=True) + self.module.validate_capability(f_module, capability_map) + + def test_validate_capability_err_case01(self): + NO_CAPABLE_DEVICES = "Target device_service_tags contains devices which cannot be used for a baseline " \ + "compliance operation." + capability_map = {"capable": [Constants.service_tag2], "non_capable": [Constants.service_tag1]} + f_module = self.get_module_mock(params={"device_service_tags": [Constants.service_tag1]}, check_mode=True) + with pytest.raises(Exception) as err: + self.module.validate_capability(f_module, capability_map) + assert err.value.args[0] == NO_CAPABLE_DEVICES + + def test_validate_remediate_idempotency_case01(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value={}) + f_module = self.get_module_mock(params={"names": ["name1"]}, check_mode=True) + with pytest.raises(Exception) as err: + self.module.validate_remediate_idempotency(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == BASELINE_CHECK_MODE_NOCHANGE_MSG.format(name="name1") + + def test_validate_remediate_idempotency_case02(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + compliance_status = [ + { + "Id": Constants.device_id1, + "DeviceName": "XX.XXX.X.XXX", + "IpAddresses": [ + "XX.XXX.X.XXX" + ], + "Model": "PowerEdge MX840c", + "ServiceTag": Constants.service_tag1, + "ComplianceStatus": 1, + "DeviceType": 1000, + "InventoryTime": "2020-10-05 18:28:09.842072" + } + ] + f_module = self.get_module_mock(params={"names": ["name1"], "device_ids": [Constants.device_id1]}, + check_mode=True) + capability_map = {"capable": [Constants.service_tag1], "non_capable": [Constants.service_tag2]} + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], capability_map)) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": 1, "value": compliance_status} + with pytest.raises(Exception) as err: + self.module.validate_remediate_idempotency(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_NO_CHANGES_MSG + + def test_validate_remediate_idempotency_case03(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + compliance_status = [ + { + "Id": Constants.device_id1, + "DeviceName": "XX.XXX.X.XXX", + "IpAddresses": [ + "XX.XXX.X.XXX" + ], + "Model": "PowerEdge MX840c", + "ServiceTag": Constants.service_tag1, + "ComplianceStatus": 2, + "DeviceType": 1000, + "InventoryTime": "2020-10-05 18:28:09.842072" + } + ] + f_module = self.get_module_mock(params={"names": ["name1"], "device_ids": [Constants.device_id1]}, + check_mode=True) + capability_map = {"capable": [Constants.service_tag1], "non_capable": [Constants.service_tag2]} + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], capability_map)) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": 1, "value": compliance_status} + with pytest.raises(Exception) as err: + self.module.validate_remediate_idempotency(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_CHANGES_MSG + + def test_validate_remediate_idempotency_case04(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + compliance_status = [ + { + "Id": Constants.device_id1, + "DeviceName": "XX.XXX.X.XXX", + "IpAddresses": [ + "XX.XXX.X.XXX" + ], + "Model": "PowerEdge MX840c", + "ServiceTag": Constants.service_tag1, + "ComplianceStatus": 2, + "DeviceType": 1000, + "InventoryTime": "2020-10-05 18:28:09.842072" + } + ] + f_module = self.get_module_mock(params={"names": ["name1"], "device_service_tags": [Constants.service_tag1]}, + check_mode=True) + capability_map = {"capable": [Constants.service_tag1], "non_capable": [Constants.service_tag2]} + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], capability_map)) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": 1, "value": compliance_status} + with pytest.raises(Exception) as err: + self.module.validate_remediate_idempotency(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_CHANGES_MSG + + def test_validate_remediate_idempotency_case05(self, mocker, ome_connection_mock_for_compliance): + mocker.patch(MODULE_PATH + 'get_baseline_compliance_info', + return_value=baseline_output) + compliance_status = [ + { + "Id": Constants.device_id1, + "DeviceName": "XX.XXX.X.XXX", + "IpAddresses": [ + "XX.XXX.X.XXX" + ], + "Model": "PowerEdge MX840c", + "ServiceTag": Constants.service_tag1, + "ComplianceStatus": 2, + "DeviceType": 1000, + "InventoryTime": "2020-10-05 18:28:09.842072" + } + ] + f_module = self.get_module_mock(params={"names": ["name1"]}, + check_mode=True) + capability_map = {"capable": [Constants.service_tag1], "non_capable": [Constants.service_tag2]} + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=([Constants.device_id2, Constants.device_id1], capability_map)) + ome_connection_mock_for_compliance.get_all_items_with_pagination.return_value = { + "total_count": 1, "value": compliance_status} + with pytest.raises(Exception) as err: + self.module.validate_remediate_idempotency(f_module, ome_connection_mock_for_compliance) + assert err.value.args[0] == CHECK_MODE_CHANGES_MSG diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py new file mode 100644 index 00000000..b038b119 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_configuration_compliance_info +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \ + AnsibleFailJSonException +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_configuration_compliance_info.' + + +@pytest.fixture +def ome_connection_mock_for_compliance_info(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + ome_connection_mock_obj.get_all_report_details.return_value = {"report_list": []} + ome_connection_mock_obj.get_all_items_with_pagination.return_value = {"value": []} + return ome_connection_mock_obj + + +class TestBaselineComplianceInfo(FakeAnsibleModule): + module = ome_configuration_compliance_info + + def test_validate_device(self, ome_connection_mock_for_compliance_info): + value_list = [{"Id": 25011, "ServiceTag": "FGHREF"}] + report = ome_connection_mock_for_compliance_info.get_all_items_with_pagination.return_value = {"value": value_list} + f_module = self.get_module_mock(params={'baseline': "baseline_one", "device_id": 25011}) + device = self.module.validate_device(f_module, report, + device_id=25011, service_tag=None, base_id=None) + service_tag = self.module.validate_device(f_module, report, + device_id=None, service_tag="FGHREF", base_id=None) + with pytest.raises(Exception) as exc: + self.module.validate_device(f_module, report, + device_id=25012, service_tag=None, base_id=None) + assert device == 25011 + assert service_tag == 25011 + assert exc.value.args[0] == "Unable to complete the operation because the entered " \ + "target device id or service tag '25012' is invalid." + + def test_get_baseline_id(self, ome_connection_mock_for_compliance_info): + report_list = [{"Id": 1, "Name": "baseline_one", "TemplateId": 1}] + ome_connection_mock_for_compliance_info.get_all_report_details.return_value = {"report_list": report_list} + f_module = self.get_module_mock(params={'baseline': "baseline_one"}) + base_id, template_id = self.module.get_baseline_id(f_module, "baseline_one", ome_connection_mock_for_compliance_info) + with pytest.raises(Exception) as exc: + self.module.get_baseline_id(f_module, "baseline_two", ome_connection_mock_for_compliance_info) + assert exc.value.args[0] == "Unable to complete the operation because the entered " \ + "target baseline name 'baseline_two' is invalid." + assert base_id == 1 + + def test_compliance_report(self, ome_connection_mock_for_compliance_info, mocker, ome_response_mock): + value_list = [{"Id": 25011, "TemplateId": 1}] + ome_connection_mock_for_compliance_info.get_all_items_with_pagination.return_value = {"value": value_list} + mocker.patch(MODULE_PATH + "get_baseline_id", return_value=25011) + f_module = self.get_module_mock(params={'baseline': "baseline_one"}) + ome_response_mock.json_data = {"value": [{"Id": 25011, "TemplateId": 1}]} + mocker.patch(MODULE_PATH + 'get_baseline_id', return_value=(1, 1)) + report = self.module.compliance_report(f_module, ome_connection_mock_for_compliance_info) + assert report == [{'Id': 25011, 'ComplianceAttributeGroups': None, 'TemplateId': 1}] + + def test_main_exception(self, ome_connection_mock_for_compliance_info, mocker, + ome_response_mock, ome_default_args): + ome_default_args.update({"baseline": "baseline_one", "device_id": 25011}) + response = mocker.patch(MODULE_PATH + 'compliance_report') + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_data = {"report": "compliance_report"} + report = self._run_module(ome_default_args) + assert report["changed"] is False diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py new file mode 100644 index 00000000..f92a0abe --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ssl import SSLError +from io import StringIO +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_group +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \ + AnsibleFailJSonException +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text + +netaddr = pytest.importorskip("netaddr") + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_group.' +ADD_STATIC_GROUP_MESSAGE = "Devices can be added only to the static device groups created using OpenManage Enterprise." +REMOVE_STATIC_GROUP_MESSAGE = "Devices can be removed only from the static device groups created using OpenManage Enterprise." +INVALID_IP_FORMAT = "The format {0} of the IP address provided is not supported or invalid." +IP_NOT_EXISTS = "The IP addresses provided do not exist in OpenManage Enterprise." +try: + from netaddr import IPAddress, IPNetwork, IPRange + from netaddr.core import AddrFormatError + + HAS_NETADDR = True +except ImportError: + HAS_NETADDR = False + + +@pytest.fixture +def ome_connection_mock_for_device_group(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + ome_connection_mock_obj.get_all_report_details.return_value = {"report_list": []} + return ome_connection_mock_obj + + +class TestOMEDeviceGroup(FakeAnsibleModule): + module = ome_device_group + + def test_ome_device_group_get_group_id_case01(self, ome_connection_mock_for_device_group, ome_response_mock): + f_module = self.get_module_mock(params={"name": "Storage Services", + "device_ids": [25011], "device_service_tags": []}) + ome_response_mock.json_data = {"value": []} + with pytest.raises(Exception) as exc: + self.module.get_group_id(ome_connection_mock_for_device_group, f_module) + assert exc.value.args[0] == "Unable to complete the operation because the entered " \ + "target group name 'Storage Services' is invalid." + ome_response_mock.json_data = {"value": [{"Id": 25011, "CreatedBy": "user", + "TypeId": 3000, "MembershipTypeId": 12}]} + resp = self.module.get_group_id(ome_connection_mock_for_device_group, f_module) + assert resp == 25011 + + def test_ome_device_group_get_group_id_case02(self, ome_connection_mock_for_device_group, ome_response_mock): + f_module = self.get_module_mock(params={"group_id": 1234, + "device_ids": [25011], "device_service_tags": []}) + ome_connection_mock_for_device_group.invoke_request.side_effect = HTTPError('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(to_text(json.dumps( + {"info": "error_details"})))) + with pytest.raises(Exception) as exc1: + self.module.get_group_id(ome_connection_mock_for_device_group, f_module) + assert exc1.value.args[0] == "Unable to complete the operation because the entered " \ + "target group Id '1234' is invalid." + + def test_ome_device_group_get_group_id_case03(self, ome_connection_mock_for_device_group, ome_response_mock): + f_module = self.get_module_mock(params={"group_id": 1234, + "device_ids": [25011], "device_service_tags": []}) + ome_response_mock.json_data = {"Id": 1234, "CreatedBy": "user", + "TypeId": 3000, "MembershipTypeId": 12} + resp = self.module.get_group_id(ome_connection_mock_for_device_group, f_module) + assert resp == 1234 + + def test_ome_device_group_get_device_id(self, ome_connection_mock_for_device_group): + report_list = [{"Id": 25011, "DeviceServiceTag": "SEFRG2"}, {"Id": 25012, "DeviceServiceTag": "SEFRG3"}] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + f_module = self.get_module_mock(params={"name": "Storage Services", + "device_ids": [25011, 25012]}) + device_list, key = self.module.get_device_id(ome_connection_mock_for_device_group, f_module) + assert device_list == [25011, 25012] + assert key == "Id" + f_module = self.get_module_mock(params={"name": "Storage Services", + "device_service_tags": ["SEFRG2", "SEFRG3"]}) + device_list, key = self.module.get_device_id(ome_connection_mock_for_device_group, f_module) + assert device_list == [25011, 25012] + assert key == "DeviceServiceTag" + + f_module = self.get_module_mock(params={"name": "Storage Services", + "device_ids": [25011, 25000]}) + with pytest.raises(Exception) as exc: + self.module.get_device_id(ome_connection_mock_for_device_group, f_module) + assert exc.value.args[0] == "Unable to complete the operation because the entered target " \ + "device id(s) '25000' are invalid." + + def test_ome_device_group_add_member_to_group(self, ome_connection_mock_for_device_group, ome_response_mock): + report_list = [{"Id": 25011, "DeviceServiceTag": "SEFRG2"}] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + f_module = self.get_module_mock(params={"name": "Storage Services", + "device_ids": [25011]}) + ome_response_mock.status_code = 204 + ome_response_mock.success = True + with pytest.raises(Exception) as exc: + self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, + 1, [25011], "Id") + assert exc.value.args[0] == "No changes found to be applied." + + f_module.check_mode = True + with pytest.raises(Exception) as exc: + self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, + 1, [25011], "Id") + assert exc.value.args[0] == "No changes found to be applied." + + f_module.check_mode = False + report_list = [{"Id": 25013, "DeviceServiceTag": "SEFRG4"}, {"Id": 25014, "DeviceServiceTag": "SEFRG5"}] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + resp, [] = self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, + 1, [25011, 25012], "Id") + assert resp.status_code == 204 + + f_module.check_mode = True + with pytest.raises(Exception) as exc: + self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, + 1, [25011, 25012], "Id") + assert exc.value.args[0] == "Changes found to be applied." + + def test_ome_device_group_main_exception(self, ome_connection_mock_for_device_group, mocker, + ome_response_mock, ome_default_args): + ome_default_args.update({"name": "Storage Services", "device_ids": [25011, 25012]}) + ome_response_mock.status_code = 204 + ome_response_mock.success = True + mocker.patch(MODULE_PATH + 'get_group_id', return_value=1) + mocker.patch(MODULE_PATH + 'get_device_id', return_value=[25011, 25012]) + mocker.patch(MODULE_PATH + 'add_member_to_group', return_value=(ome_response_mock, [])) + result = self._run_module(ome_default_args) + assert result['msg'] == "Successfully added member(s) to the device group." + + def test_ome_device_group_argument_exception_case1(self, ome_default_args): + ome_default_args.update({"name": "Storage Services", "device_ids": [25011, 25012], "group_id": 1234}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: name|group_id" + + def test_ome_device_group_argument_exception_case2(self, ome_default_args): + ome_default_args.update( + {"device_ids": [25011, 25012], "group_id": 1234, "device_service_tags": [Constants.service_tag1]}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: device_ids|device_service_tags|ip_addresses" + + def test_ome_device_group_argument_exception_case3(self, ome_default_args): + ome_default_args.update({"device_ids": [25011, 25012]}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "one of the following is required: name, group_id" + + def test_ome_device_group_argument_exception_case4(self, ome_default_args): + ome_default_args.update( + {"group_id": 1234}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "one of the following is required: device_ids, device_service_tags, ip_addresses" + + def test_ome_device_group_argument_exception_case5(self, ome_default_args): + ome_default_args.update( + {"device_ids": None, "group_id": 1234, "device_service_tags": None}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: device_ids|device_service_tags|ip_addresses" + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_device_group_argument_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_device_group, + ome_response_mock): + ome_default_args.update({"name": "Storage Services", "device_ids": [25011, 25012]}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'get_group_id', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'get_group_id', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_group_id', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + + @pytest.mark.parametrize("inp", [{"TypeId": 3000, "MembershipTypeId": 24}, + {"TypeId": 1000, "MembershipTypeId": 24}, + {"TypeId": 2000, "MembershipTypeId": 12}]) + def test_validate_group_case01(self, inp, ome_response_mock): + group_resp = {"Id": 25011, "CreatedBy": "user", "TypeId": inp["TypeId"], + "MembershipTypeId": inp["MembershipTypeId"]} + f_module = self.get_module_mock(params={"name": "group1", + "device_ids": [25011]}) + with pytest.raises(Exception) as exc: + self.module.validate_group(group_resp, f_module, "name", "group1") + assert exc.value.args[0] == ADD_STATIC_GROUP_MESSAGE + + @pytest.mark.parametrize("inp", [{"TypeId": 3000, "MembershipTypeId": 24}, + {"TypeId": 1000, "MembershipTypeId": 24}, + {"TypeId": 2000, "MembershipTypeId": 12}]) + def test_validate_group_case02(self, inp, ome_response_mock): + group_resp = {"Id": 25011, "CreatedBy": "user", "TypeId": inp["TypeId"], + "MembershipTypeId": inp["MembershipTypeId"]} + f_module = self.get_module_mock(params={"name": "group1", + "device_ids": [25011], + "state": "absent"}) + with pytest.raises(Exception) as exc: + self.module.validate_group(group_resp, f_module, "name", "group1") + assert exc.value.args[0] == REMOVE_STATIC_GROUP_MESSAGE + + @pytest.mark.parametrize("inp,out", [(['192.168.2.0'], [IPAddress('192.168.2.0')]), + (['fe80::ffff:ffff:ffff:ffff'], [IPAddress('fe80::ffff:ffff:ffff:ffff')]), + (['192.168.2.0/24'], [IPNetwork('192.168.2.0/24')]), + (['fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff'], + [IPRange('fe80::ffff:ffff:ffff:1111', 'fe80::ffff:ffff:ffff:ffff')]), + (['192.168.2.0', 'fe80::ffff:ffff:ffff:ffff', + '192.168.2.0/24', 'fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff', + '2002:c000:02e6::1/48'], [IPAddress('192.168.2.0'), + IPAddress('fe80::ffff:ffff:ffff:ffff'), + IPNetwork('192.168.2.0/24'), + IPRange('fe80::ffff:ffff:ffff:1111', + 'fe80::ffff:ffff:ffff:ffff'), + IPNetwork( + '2002:c000:02e6::1/48')])]) + def test_get_all_ips_success_case(self, inp, out): + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": inp}) + res = self.module.get_all_ips(inp, f_module) + assert res == out + + @pytest.mark.parametrize("inp", [["abc"], [""], ["266.128"], ["100:1bcd:xyz"], ["192.168.0.0--192.168.0.1"], + ["-192.168.0.0-192.168.0.1"], ["-192.168.0.0192.168.0.1"], + ["192.168.0.0-192.168.0.1-"], ["192.168.0.0192.168.0.1-"], + ["192.168.0.1//24"], + ["\192.168.0.1//24"], + ["192.168.0.1/\24"], + ["/192.168.0.1/24"], + ["1.12.1.36/255.255.255.88"]], + ids=["abc", "", "266.128", "100:1bcd:xyz", "192.168.0.0--192.168.0.1", + "-192.168.0.0-192.168.0.1", "-192.168.0.0192.168.0.1", "192.168.0.0-192.168.0.1-", + "192.168.0.0192.168.0.1-", "192.168.0.1//24", "\192.168.0.1//24", + "192.168.0.1/\24", "/192.168.0.1/24", "1.12.1.36/255.255.255.88"]) + def test_get_all_ips_failure_case(self, inp): + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": inp}) + with pytest.raises(Exception, match=INVALID_IP_FORMAT.format(inp[0])) as err: + self.module.get_all_ips(inp, f_module) + + def test_get_device_id_from_ip_success_case(self): + device_list = [ + { + "Id": 1111, + "Identifier": "device1", + "DeviceServiceTag": "device1", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.2.255", + } + ], + }, + { + "Id": 2222, + "Identifier": "device2", + "DeviceServiceTag": "device2", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.4.10", + } + ], + }, + { + "Id": 3333, + "Identifier": "device3", + "DeviceServiceTag": "device3", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.2.10", + } + ], + }, + { + "Id": 4444, + "Identifier": "device4", + "DeviceServiceTag": "device4", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.3.10", + } + ], + }, + { + "Id": 5555, + "Identifier": "device5", + "DeviceServiceTag": "device5", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.4.3", + } + ], + }, + { + "Id": 6666, + "Identifier": "device6", + "DeviceServiceTag": "device6", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.3.11", + } + ], + }, + { + "Id": 7777, + "Identifier": "device7", + "DeviceServiceTag": "device7", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.3.0", + } + ], + }, + { + "Id": 8888, + "Identifier": "device8", + "DeviceServiceTag": "device8", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.4.1", + } + ], + }, + { + "Id": 9999, + "Identifier": "device9", + "DeviceServiceTag": "device9", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.4.5", + } + ], + }, + { + "Id": 1010, + "Identifier": "device10", + "DeviceServiceTag": "device10", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.4.9", + } + ], + }, + { + "Id": 1011, + "Identifier": "device11", + "DeviceServiceTag": "device11", + "DeviceManagement": [ + { + "NetworkAddress": "[fe80::de0:b6b3:a764:0]", + } + ], + }, + { + "Id": 1012, + "Identifier": "device11", + "DeviceServiceTag": "device11", + "DeviceManagement": [ + { + "NetworkAddress": "[fe90::de0:b6b3:a764:0]", + } + ], + } + ] + output = {3333: "192.168.2.10", 4444: "192.168.3.10", + 5555: "192.168.4.3", 6666: "192.168.3.11", 7777: "192.168.3.0", + 8888: "192.168.4.1", 9999: "192.168.4.5", 1010: "192.168.4.9", + 1011: "fe80::de0:b6b3:a764:0"} + ip_addresses = [IPNetwork("::ffff:192.168.2.0/125"), IPAddress("192.168.2.10"), + IPAddress('fe80::ffff:ffff:ffff:ffff'), + IPNetwork('fe80::ffff:ffff:ffff:ffff/24'), + IPNetwork('192.168.3.0/24'), IPRange('192.168.4.1', '192.168.4.9')] + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": ["::ffff:192.168.2.0/125", + "192.168.2.10", + 'fe80::ffff:ffff:ffff:ffff', + '192.168.3.0/24', + '192.168.4.1-192.168.4.9', + 'fe80::ffff:ffff:ffff:ffff/24']}) + res = self.module.get_device_id_from_ip(ip_addresses, device_list, f_module) + assert res == output + + def test_get_device_id_from_ip_failure_case(self): + device_list = [ + { + "Id": 1111, + "Identifier": "device1", + "DeviceServiceTag": "device1", + "DeviceManagement": [ + { + "NetworkAddress": "192.168.2.255", + } + ], + }, + ] + ip_addresses = [IPNetwork("::ffff:192.168.2.0/125"), IPAddress("192.168.2.10"), + IPAddress('fe80::ffff:ffff:ffff:ffff'), + IPNetwork('fe80::ffff:ffff:ffff:ffff/24'), + IPNetwork('192.168.3.0/24'), IPRange('192.168.4.1', '192.168.4.9')] + with pytest.raises(Exception, match=IP_NOT_EXISTS): + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": ["::ffff:192.168.2.0/125", + "192.168.2.10", + 'fe80::ffff:ffff:ffff:ffff', + '192.168.3.0/24', + '192.168.4.1-192.168.4.9', + 'fe80::ffff:ffff:ffff:ffff/24']}) + self.module.get_device_id_from_ip(ip_addresses, device_list, f_module) + + # def test_add_member_to_group_case01(self, ome_connection_mock_for_device_group, ome_response_mock): + # report_list = [{"Id": 3333, "DeviceServiceTag": "device1", + # "DeviceManagement": [{"NetworkAddress": "192.168.2.10"}, + # ]}, + # {"Id": 1013, "DeviceServiceTag": "device1", + # "DeviceManagement": [{"NetworkAddress": "192.168.5.10"}, + # ]} + # ] + # ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + # f_module = self.get_module_mock(params={"name": "group1", + # "ip_addresses": ["::ffff:192.168.2.0/125", + # "192.168.2.10", + # 'fe80::ffff:ffff:ffff:ffff', + # '192.168.3.0/24', + # '192.168.4.1-192.168.4.9', + # 'fe80::ffff:ffff:ffff:ffff/24']}) + # device_id = {3333: "192.168.2.10", 4444: "192.168.3.10", + # 5555: "192.168.4.3", + # 1011: "fe80::de0:b6b3:a764:0"} + # ome_response_mock.status_code = 204 + # added_ips_out = ["192.168.3.10", "192.168.4.3", "fe80::de0:b6b3:a764:0"] + # resp, added_ips = self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, 1, device_id, + # "IPAddresses") + # assert resp.status_code == 204 + # assert added_ips == added_ips_out + + def test_add_member_to_group_checkmode_case01(self, ome_connection_mock_for_device_group, ome_response_mock): + report_list = [{"Id": 3333, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.2.10"}, + ]}, + {"Id": 1013, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.5.10"}, + ]} + ] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": ["::ffff:192.168.2.0/125", + "192.168.2.10", + 'fe80::ffff:ffff:ffff:ffff', + '192.168.3.0/24', + '192.168.4.1-192.168.4.9', + 'fe80::ffff:ffff:ffff:ffff/24']}, check_mode=True) + device_id = {3333: "192.168.2.10", 4444: "192.168.3.10", + 5555: "192.168.4.3", + 1011: "fe80::de0:b6b3:a764:0"} + with pytest.raises(Exception, match="Changes found to be applied."): + self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, 1, device_id, "IPAddresses") + + def test_add_member_to_group_checkmode_case02(self, ome_connection_mock_for_device_group, ome_response_mock): + report_list = [{"Id": 3333, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.2.10"}, + ]}, + {"Id": 1013, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.5.10"}, + ]} + ] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": ["192.168.2.10"]}, check_mode=True) + device_id = {3333: "192.168.2.10"} + with pytest.raises(Exception, match="No changes found to be applied."): + self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, 1, device_id, "IPAddresses") + + def test_add_member_to_group_idempotency_case(self, ome_connection_mock_for_device_group, ome_response_mock): + report_list = [{"Id": 3333, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.2.10"}, + ]}, + {"Id": 1013, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.5.10"}, + ]} + ] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": ["192.168.2.10"]}) + device_id = {3333: "192.168.2.10"} + with pytest.raises(Exception) as exc: + self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, 1, device_id, "IPAddresses") + + assert exc.value.args[0] == "No changes found to be applied." + + def test_ome_device_group_main_ip_address_case(self, ome_connection_mock_for_device_group, mocker, + ome_response_mock, ome_default_args): + ome_default_args.update({"name": "Storage Services", "ip_addresses": ["192.168.2.10"]}) + ome_response_mock.status_code = 204 + ome_response_mock.success = True + mocker.patch(MODULE_PATH + 'get_group_id', return_value=1) + mocker.patch(MODULE_PATH + 'get_device_id', return_value=[25011, 25012]) + mocker.patch(MODULE_PATH + 'add_member_to_group', return_value=(ome_response_mock, ["192.168.2.10"])) + result = self._run_module(ome_default_args) + assert result['msg'] == "Successfully added member(s) to the device group." + assert result['ip_addresses_added'] == ["192.168.2.10"] + + def test_get_device_id_ip_address_case(self, ome_connection_mock_for_device_group, mocker): + f_module = self.get_module_mock(params={"name": "group1", + "ip_addresses": ["192.168.2.10"]}) + mocker.patch(MODULE_PATH + 'get_all_ips', return_value=[IPAddress("192.168.2.10")]) + mocker.patch(MODULE_PATH + 'get_device_id_from_ip', return_value={1111: "192.168.2.10"}) + each_device_list, key = self.module.get_device_id(ome_connection_mock_for_device_group, f_module) + assert key == "IPAddresses" + assert each_device_list == {1111: "192.168.2.10"} + + def test_get_current_member_of_group(self, ome_connection_mock_for_device_group, ome_response_mock): + report_list = [{"Id": 3333, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.2.10"}, + ]}, + {"Id": 1013, "DeviceServiceTag": "device1", + "DeviceManagement": [{"NetworkAddress": "192.168.5.10"}, + ]} + ] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + group_id = 1011 + device_id_list = self.module.get_current_member_of_group(ome_connection_mock_for_device_group, group_id) + assert device_id_list == [3333, 1013] + + def test_ome_device_group_remove_member_from_group(self, ome_connection_mock_for_device_group, ome_response_mock): + report_list = [{"Id": 25011, "DeviceServiceTag": "SEFRG2"}] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + f_module = self.get_module_mock(params={"name": "Storage Services", + "device_ids": [25011], + "state": "absent"}) + group_id = 1011 + device_ids = [25011] + current_device_list = [25011] + ome_response_mock.status_code = 204 + ome_response_mock.success = True + resp = self.module.remove_member_from_group(f_module, ome_connection_mock_for_device_group, + group_id, device_ids, current_device_list) + assert resp.status_code == 204 + + f_module.check_mode = True + with pytest.raises(Exception, match="Changes found to be applied.") as exc: + self.module.remove_member_from_group(f_module, ome_connection_mock_for_device_group, + group_id, device_ids, current_device_list) + + f_module.check_mode = False + report_list = [{"Id": 25013, "DeviceServiceTag": "SEFRG4"}, {"Id": 25014, "DeviceServiceTag": "SEFRG5"}] + device_ids = [10000, 24000, 25013, 12345, 25014] + current_device_list = [25013, 25014] + ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list} + resp = self.module.remove_member_from_group(f_module, ome_connection_mock_for_device_group, + group_id, device_ids, current_device_list) + assert resp.status_code == 204 + + current_device_list = [25013, 25014] + device_ids = [25011] + f_module.check_mode = True + with pytest.raises(Exception, match="No changes found to be applied.") as exc: + self.module.remove_member_from_group(f_module, ome_connection_mock_for_device_group, + group_id, device_ids, current_device_list) + + current_device_list = [25013, 25014] + f_module.check_mode = False + device_ids = [] + with pytest.raises(Exception, match="No changes found to be applied.") as exc: + self.module.remove_member_from_group(f_module, ome_connection_mock_for_device_group, + group_id, device_ids, current_device_list) + + def test_ome_device_group_main_absent_case(self, ome_connection_mock_for_device_group, mocker, + ome_response_mock, ome_default_args): + ome_default_args.update({"name": "Storage Services", "device_ids": [25011, 25012], "state": "absent"}) + ome_response_mock.status_code = 200 + ome_response_mock.success = True + mocker.patch(MODULE_PATH + 'get_group_id', return_value=1) + mocker.patch(MODULE_PATH + 'get_device_id', return_value=[25011, 25012]) + mocker.patch(MODULE_PATH + 'get_current_member_of_group', return_value=[25011, 25012]) + mocker.patch(MODULE_PATH + 'remove_member_from_group', return_value=(ome_response_mock)) + result = self._run_module(ome_default_args) + assert result['msg'] == "Successfully removed member(s) from the device group." diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py new file mode 100644 index 00000000..bb41b51a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py @@ -0,0 +1,281 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +resource_basic_inventory = {"basic_inventory": "DeviceService/Devices"} +resource_detailed_inventory = {"detailed_inventory:": {"device_id": {Constants.device_id1: None}, + "device_service_tag": { + Constants.device_id2: Constants.service_tag1}}} + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestOmeDeviceInfo(FakeAnsibleModule): + module = ome_device_info + + @pytest.fixture + def validate_device_inputs_mock(self, mocker): + validate_device_inputs_mock = mocker.patch(MODULE_PATH + 'ome_device_info._validate_inputs') + validate_device_inputs_mock.return_value = None + + @pytest.fixture + def get_device_resource_parameters_mock(self, mocker): + response_class_mock = mocker.patch(MODULE_PATH + 'ome_device_info._get_resource_parameters', + return_value=resource_basic_inventory) + return response_class_mock + + def test_main_basic_inventory_success_case(self, ome_default_args, module_mock, validate_device_inputs_mock, + ome_connection_mock, + get_device_resource_parameters_mock, ome_response_mock): + ome_response_mock.json_data = {"@odata.context": "/api/$metadata#Collection(DeviceService.Device)", + "@odata.count": 1} + increment_device_details = {"resp_obj": ome_response_mock, + "report_list": [{"DeviceServiceTag": Constants.service_tag1, + "Id": Constants.device_id1}]} + ome_connection_mock.get_all_report_details.return_value = increment_device_details + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'device_info' in result + assert result["device_info"] == {"@odata.context": "/api/$metadata#Collection(DeviceService.Device)", + "@odata.count": 1, + "value": [{"DeviceServiceTag": Constants.service_tag1, + "Id": Constants.device_id1}]} + + def test_main_basic_inventory_query_param_success_case(self, mocker, ome_default_args, module_mock, + validate_device_inputs_mock, ome_connection_mock, + get_device_resource_parameters_mock, ome_response_mock): + quer_param_mock = mocker.patch(MODULE_PATH + 'ome_device_info._get_query_parameters') + quer_param_mock.return_value = {"filter": "Type eq '1000'"} + ome_response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'device_info' in result + assert result["device_info"] == {"value": [{"device_id1": "details", "device_id2": "details"}]} + + def test_main_basic_inventory_failure_case(self, ome_default_args, module_mock, validate_device_inputs_mock, + ome_connection_mock, + get_device_resource_parameters_mock, ome_response_mock): + ome_response_mock.status_code = 500 + ome_response_mock.json_data = {"@odata.context": "/api/$metadata#Collection(DeviceService.Device)", + "@odata.count": 0} + ome_connection_mock.get_all_report_details.return_value = {"resp_obj": ome_response_mock, "report_list": []} + result = self._run_module(ome_default_args) + assert result['msg'] == 'No devices present.' + + def test_main_detailed_inventory_success_case(self, ome_default_args, module_mock, validate_device_inputs_mock, + ome_connection_mock, + get_device_resource_parameters_mock, ome_response_mock): + ome_default_args.update( + {"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [Constants.device_id1], + "device_service_tag": [ + Constants.service_tag1]}}) + detailed_inventory = {"detailed_inventory:": { + "device_id": {Constants.device_id1: "DeviceService/Devices(Constants.device_id1)/InventoryDetails"}, + "device_service_tag": {Constants.service_tag1: "DeviceService/Devices(4321)/InventoryDetails"}}} + get_device_resource_parameters_mock.return_value = detailed_inventory + ome_response_mock.json_data = { + "value": [{"device_id": {"1234": "details"}}, {"device_service_tag": {Constants.service_tag1: "details"}}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'device_info' in result + + def test_main_detailed_inventory_http_error_case(self, ome_default_args, module_mock, validate_device_inputs_mock, + ome_connection_mock, + get_device_resource_parameters_mock, ome_response_mock): + ome_default_args.update( + {"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [Constants.device_id1], + "device_service_tag": [ + Constants.service_tag1]}}) + detailed_inventory = {"detailed_inventory:": { + "device_id": {Constants.device_id1: "DeviceService/Devices(Constants.device_id1)/InventoryDetails"}, + "device_service_tag": {Constants.service_tag1: "DeviceService/Devices(4321)/InventoryDetails"}}} + get_device_resource_parameters_mock.return_value = detailed_inventory + ome_connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, '', {}, None) + result = self._run_module(ome_default_args) + assert 'device_info' in result + + def test_main_HTTPError_error_case(self, ome_default_args, module_mock, validate_device_inputs_mock, + ome_connection_mock, + get_device_resource_parameters_mock, ome_response_mock): + ome_connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, '', {}, None) + ome_response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]} + ome_response_mock.status_code = 400 + result = self._run_module(ome_default_args) + assert 'device_info' in result + + @pytest.mark.parametrize("fact_subset, mutually_exclusive_call", + [("basic_inventory", False), ("detailed_inventory", True)]) + def test_validate_inputs(self, fact_subset, mutually_exclusive_call, mocker): + module_params = {"fact_subset": fact_subset} + check_mutually_inclusive_arguments_mock = mocker.patch(MODULE_PATH + + 'ome_device_info._check_mutually_inclusive_arguments') + check_mutually_inclusive_arguments_mock.return_value = None + self.module._validate_inputs(module_params) + if mutually_exclusive_call: + check_mutually_inclusive_arguments_mock.assert_called() + else: + check_mutually_inclusive_arguments_mock.assert_not_called() + check_mutually_inclusive_arguments_mock.reset_mock() + + system_query_options_params = [{"system_query_options": None}, {"system_query_options": {"device_id": None}}, + {"system_query_options": {"device_service_tag": None}}] + + @pytest.mark.parametrize("system_query_options_params", system_query_options_params) + def test_check_mutually_inclusive_arguments(self, system_query_options_params): + module_params = {"fact_subset": "subsystem_health"} + required_args = ["device_id", "device_service_tag"] + module_params.update(system_query_options_params) + with pytest.raises(ValueError) as ex: + self.module._check_mutually_inclusive_arguments(module_params["fact_subset"], module_params, + ["device_id", "device_service_tag"]) + assert "One of the following {0} is required for {1}".format(required_args, + module_params["fact_subset"]) == str(ex.value) + + params = [{"fact_subset": "basic_inventory", "system_query_options": {"device_id": [Constants.device_id1]}}, + {"fact_subset": "subsystem_health", + "system_query_options": {"device_service_tag": [Constants.service_tag1]}}, + {"fact_subset": "detailed_inventory", + "system_query_options": {"device_id": [Constants.device_id1], "inventory_type": "serverDeviceCards"}}] + + @pytest.mark.parametrize("module_params", params) + def test_get_resource_parameters(self, module_params, ome_connection_mock): + self.module._get_resource_parameters(module_params, ome_connection_mock) + + @pytest.mark.parametrize("module_params,data", [({"system_query_options": None}, None), + ({"system_query_options": {"fileter": None}}, None), + ({"system_query_options": {"filter": "abc"}}, "$filter")]) + def test_get_query_parameters(self, module_params, data): + res = self.module._get_query_parameters(module_params) + if data is not None: + assert data in res + else: + assert res is None + + @pytest.mark.parametrize("module_params", params) + def test_get_device_identifier_map(self, module_params, ome_connection_mock, mocker): + get_device_id_from_service_tags_mock = mocker.patch(MODULE_PATH + + 'ome_device_info._get_device_id_from_service_tags') + get_device_id_from_service_tags_mock.return_value = None + res = self.module._get_device_identifier_map(module_params, ome_connection_mock) + assert isinstance(res, dict) + + def test_check_duplicate_device_id(self): + self.module._check_duplicate_device_id([Constants.device_id1], + {Constants.device_id1: Constants.service_tag1}) + assert self.module.device_fact_error_report[Constants.service_tag1] == "Duplicate report of device_id: 1234" + + @pytest.mark.parametrize("val,expected_res", [(123, True), ("abc", False)]) + def test_is_int(self, val, expected_res): + actual_res = self.module.is_int(val) + assert actual_res == expected_res + + def test_get_device_id_from_service_tags(self, ome_connection_mock, ome_response_mock, mocker): + mocker.patch(MODULE_PATH + 'ome_device_info.update_device_details_with_filtering') + ome_response_mock.json_data.update({"@odata.context": "/api/$metadata#Collection(DeviceService.Device)"}) + ome_response_mock.json_data.update({"@odata.count": 1}) + ome_connection_mock.get_all_report_details.return_value = {"resp_obj": ome_response_mock, "report_list": [ + {"DeviceServiceTag": Constants.service_tag1, + "Id": Constants.device_id1}]} + self.module._get_device_id_from_service_tags([Constants.service_tag1, "INVALID"], ome_connection_mock) + + def test_get_device_id_from_service_tags_error_case(self, ome_connection_mock, ome_response_mock): + ome_connection_mock.get_all_report_details.side_effect = HTTPError('http://testhost.com', 400, '', {}, None) + with pytest.raises(HTTPError) as ex: + self.module._get_device_id_from_service_tags(["INVALID"], ome_connection_mock) + + def test_update_device_details_with_filtering_success_case_01(self, ome_connection_mock, ome_response_mock): + non_available_tags = [Constants.service_tag2] + service_tag_dict = {Constants.device_id1: Constants.service_tag1} + ome_response_mock.json_data = { + "value": [{"DeviceServiceTag": Constants.service_tag2, "Id": Constants.device_id2}]} + self.module.update_device_details_with_filtering(non_available_tags, service_tag_dict, ome_connection_mock) + assert service_tag_dict[Constants.device_id1] == Constants.service_tag1 + assert service_tag_dict[Constants.device_id2] == Constants.service_tag2 + assert len(non_available_tags) == 0 + + def test_update_device_details_with_filtering_success_case_02(self, ome_connection_mock, ome_response_mock): + non_available_tags = ["MX700"] + service_tag_dict = {Constants.device_id1: Constants.service_tag1} + ome_response_mock.json_data = {"value": [{"DeviceServiceTag": "MX7000", "Id": Constants.device_id2}]} + self.module.update_device_details_with_filtering(non_available_tags, service_tag_dict, ome_connection_mock) + assert service_tag_dict[Constants.device_id1] == Constants.service_tag1 + assert Constants.device_id2 not in service_tag_dict + assert len(non_available_tags) == 1 + + def test_update_device_details_with_filtering_failure_case_01(self, ome_connection_mock, ome_response_mock): + error_msg = '400: Bad Request' + service_tag_dict = {} + non_available_tags = [Constants.service_tag2] + ome_connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, error_msg, {}, None) + with pytest.raises(HTTPError, match=error_msg) as ex: + self.module.update_device_details_with_filtering(non_available_tags, service_tag_dict, ome_connection_mock) + + def test_main_detailed_inventory_device_fact_error_report_case_01(self, ome_default_args, module_mock, + validate_device_inputs_mock, ome_connection_mock, + get_device_resource_parameters_mock, + ome_response_mock): + ome_default_args.update( + {"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [Constants.device_id1], + "device_service_tag": [ + Constants.service_tag1]}}) + detailed_inventory = { + "detailed_inventory:": { + "device_id": { + Constants.device_id1: "DeviceService/Devices(Constants.device_id1)/InventoryDetails" + }, + "device_service_tag": { + Constants.service_tag1: "DeviceService/Devices(4321)/InventoryDetails" + } + } + } + get_device_resource_parameters_mock.return_value = detailed_inventory + ome_response_mock.json_data = {"value": [{"device_id": {Constants.device_id1: "details"}}, + {"device_service_tag": {Constants.service_tag1: "details"}}]} + ome_response_mock.status_code = 200 + self.module.device_fact_error_report = { + Constants.service_tag1: "Duplicate report of device_id: {0}".format(Constants.device_id1)} + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'device_info' in result + + def test_main_detailed_inventory_device_fact_error_report_case_02(self, ome_default_args, module_mock, + validate_device_inputs_mock, + ome_connection_mock, + get_device_resource_parameters_mock, + ome_response_mock): + ome_default_args.update( + {"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [Constants.device_id1], + "device_service_tag": [ + Constants.service_tag1]}}) + detailed_inventory = { + "device_service_tag": { + Constants.service_tag1: "DeviceService/Devices(4321)/InventoryDetails" + } + } + get_device_resource_parameters_mock.return_value = detailed_inventory + ome_response_mock.json_data = {"value": [{"device_id": {Constants.device_id1: "details"}}, + {"device_service_tag": {Constants.service_tag1: "details"}}]} + ome_response_mock.status_code = 200 + self.module.device_fact_error_report = { + Constants.service_tag1: "Duplicate report of device_id: {0}".format(Constants.device_id1)} + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'device_info' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py new file mode 100644 index 00000000..23bae781 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_local_access_configuration +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_local_access_configuration.' + + +@pytest.fixture +def ome_conn_mock_lac(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEMDevicePower(FakeAnsibleModule): + + module = ome_device_local_access_configuration + + def test_check_domain_service(self, ome_conn_mock_lac, ome_default_args): + f_module = self.get_module_mock() + result = self.module.check_domain_service(f_module, ome_conn_mock_lac) + assert result is None + + def test_get_chassis_device(self, ome_conn_mock_lac, ome_default_args, mocker, ome_response_mock): + mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1") + ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD", + "PublicAddress": ["192.168.1.1"]}, + {"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE", + "PublicAddress": ["192.168.1.2"]}]} + param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.get_chassis_device(f_module, ome_conn_mock_lac) + assert err.value.args[0] == "Unable to retrieve the device information." + + def test_get_ip_from_host(self, ome_conn_mock_lac, ome_default_args, ome_response_mock): + result = self.module.get_ip_from_host("192.168.0.1") + assert result == "192.168.0.1" + + def test_get_device_details(self, ome_conn_mock_lac, ome_default_args, ome_response_mock, mocker): + param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True} + f_module = self.get_module_mock(params=param) + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_data = { + "value": [], "SettingType": "LocalAccessConfiguration", "EnableChassisDirect": False, + "EnableChassisPowerButton": False, "EnableKvmAccess": True, "EnableLcdOverridePin": False, + "LcdAccess": "VIEW_ONLY", "LcdCustomString": "LCD Text", "LcdLanguage": "en", } + with pytest.raises(Exception) as err: + self.module.get_device_details(ome_conn_mock_lac, f_module) + assert err.value.args[0] == "Unable to complete the operation because the entered target " \ + "device id '25012' is invalid." + param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True} + f_module = self.get_module_mock(params=param) + ome_response_mock.json_data = {"value": [{"Id": 25012, "DeviceServiceTag": "GHRT2RL"}], "EnableKvmAccess": True} + mocker.patch(MODULE_PATH + 'check_mode_validation', return_value={"EnableKvmAccess": True}) + resp = self.module.get_device_details(ome_conn_mock_lac, f_module) + assert resp.json_data["EnableKvmAccess"] is True + param = {"hostname": "192.168.1.6", "enable_kvm_access": True} + f_module = self.get_module_mock(params=param) + mocker.patch(MODULE_PATH + 'get_chassis_device', return_value=("Id", 25011)) + resp = self.module.get_device_details(ome_conn_mock_lac, f_module) + assert resp.json_data["EnableKvmAccess"] is True + + def test_check_mode_validation(self, ome_conn_mock_lac, ome_default_args, ome_response_mock, mocker): + loc_data = {"EnableKvmAccess": True, "EnableChassisDirect": True, "EnableChassisPowerButton": True, + "EnableLcdOverridePin": True, "LcdAccess": True, "LcdCustomString": "LCD Text", + "LcdLanguage": "en", "LcdOverridePin": 123456, "LcdPresence": "Present", + "QuickSync": {"QuickSyncAccess": True, "TimeoutLimit": 10, "EnableInactivityTimeout": True, + "TimeoutLimitUnit": "MINUTES", "EnableReadAuthentication": True, + "EnableQuickSyncWifi": True, "QuickSyncHardware": "Present"}, } + param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data) + assert err.value.args[0] == "No changes found to be applied." + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data) + assert err.value.args[0] == "No changes found to be applied." + param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": False} + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data) + assert err.value.args[0] == "Changes found to be applied." + f_module.check_mode = False + result = self.module.check_mode_validation(f_module, loc_data) + assert result["EnableKvmAccess"] is False + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_device_power_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_conn_mock_lac, ome_response_mock): + ome_default_args.update({"device_id": 25011, "enable_kvm_access": True}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_domain_service', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py new file mode 100644 index 00000000..8133e016 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.3.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_location +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_location.' + + +@pytest.fixture +def ome_conn_mock_location(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEMDeviceLocation(FakeAnsibleModule): + + module = ome_device_location + + def test_check_domain_service(self, ome_conn_mock_location, ome_default_args, mocker): + f_module = self.get_module_mock() + result = self.module.check_domain_service(f_module, ome_conn_mock_location) + assert result is None + + def test_standalone_chassis(self, ome_conn_mock_location, ome_default_args, mocker, ome_response_mock): + mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1") + ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD", + "PublicAddress": ["192.168.1.1"]}, + {"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE", + "PublicAddress": ["192.168.1.2"]}]} + + param = {"data_center": "data center 1", "rack_slot": 2, "device_id": 25012, "hostname": "192.168.1.6", + "room": "room 1", "aisle": "aisle 1", "rack": "rack 1", "location": "location 1"} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.standalone_chassis(f_module, ome_conn_mock_location) + assert err.value.args[0] == "Failed to fetch the device information." + + def test_validate_dictionary(self, ome_conn_mock_location, ome_default_args, mocker): + param = {"data_center": "data center 1", "rack_slot": 2, + "room": "room 1", "aisle": "aisle 1", "rack": "rack 1", "location": "location 1"} + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + loc_resp = {"DataCenter": "data center 1", "RackSlot": 2, "Room": "room 1", + "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"} + with pytest.raises(Exception) as err: + self.module.validate_dictionary(f_module, loc_resp) + loc_resp = {"DataCenter": "data center 1", "RackSlot": 3, "Room": "room 1", + "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"} + with pytest.raises(Exception) as err: + self.module.validate_dictionary(f_module, loc_resp) + assert err.value.args[0] == "Changes found to be applied." + loc_resp = {"DataCenter": "data center 1", "RackSlot": 2, "Room": "room 1", + "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"} + f_module.check_mode = False + with pytest.raises(Exception) as err: + self.module.validate_dictionary(f_module, loc_resp) + assert err.value.args[0] == "No changes found to be applied." + loc_resp = {"DataCenter": "data center 1", "RackSlot": 3, "Room": "room 1", + "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"} + result = self.module.validate_dictionary(f_module, loc_resp) + assert result == {"DataCenter": "data center 1", "RackSlot": 2, + "Room": "room 1", "Aisle": "aisle 1", "RackName": "rack 1", + "Location": "location 1", "SettingType": "Location"} + + def test_device_validation(self, ome_conn_mock_location, ome_default_args, mocker, ome_response_mock): + mocker.patch(MODULE_PATH + "validate_dictionary", + return_value={"DataCenter": "data center 1", "RackSlot": 2, "Room": "room 1", + "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1", + "SettingType": "Location"}) + param = {"data_center": "data center 1", "rack_slot": 2, "device_id": 25012, + "room": "room 1", "aisle": "aisle 1", "rack": "rack 1", "location": "location 1"} + ome_default_args.update(param) + f_module = self.get_module_mock(params=param) + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_data = { + "value": [], "DataCenter": "data center 1", + "RackSlot": 3, "Room": "room 1", "Aisle": "aisle 1", "RackName": "rack 1", + "Location": "location 1", "SettingType": "Location", "result": {"RackSlot": 4}} + with pytest.raises(Exception) as err: + self.module.device_validation(f_module, ome_conn_mock_location) + assert err.value.args[0] == "Unable to complete the operation because the entered target " \ + "device id '25012' is invalid." + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_device_location_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_conn_mock_location, ome_response_mock): + ome_default_args.update({"device_id": 25011, "data_center": "data center 1", + "room": "room 1", "aisle": "aisle 1", "rack": "rack 1", + "rack_slot": "2", "location": "location 1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_domain_service', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py new file mode 100644 index 00000000..69206143 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py @@ -0,0 +1,408 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.2.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_mgmt_network +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_mgmt_network.' + +DEVICE_NOT_FOUND = "Device with {0} '{1}' not found." +NON_CONFIG_NETWORK = "Network settings for {0} is not configurable." +SUCCESS_MSG = "Successfully applied the network settings." +INVALID_IP = "Invalid {0} address provided for the {1}" +DNS_SETT_ERR1 = "'SecondaryDNS' requires 'PrimaryDNS' to be provided." +DNS_SETT_ERR2 = "'TertiaryDNS' requires both 'PrimaryDNS' and 'SecondaryDNS' to be provided." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +SERVER = 1000 +CHASSIS = 2000 +IO_MODULE = 4000 + + +@pytest.fixture +def ome_connection_mock_for_device_network(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeDeviceMgmtNetwork(FakeAnsibleModule): + module = ome_device_mgmt_network + dns_configuration = {"dns_domain_name": "localdomain", "dns_name": "openmanage-enterprise", + "register_with_dns": False, "auto_negotiation": False, + "network_speed": "10_MB", "use_dhcp_for_dns_domain_name": False} + ipv4_configuration = {"enable_ipv4": True, "enable_dhcp": False, "use_dhcp_to_obtain_dns_server_address": False, + "static_ip_address": "192.168.11.20", "static_subnet_mask": "255.255.255.0", + "static_gateway": "192.168.11.1", "static_preferred_dns_server": "192.168.11.2", + "static_alternate_dns_server": "192.168.11.3"} + ipv6_configuration = {"enable_ipv6": True, "enable_auto_configuration": False, + "static_alternate_dns_server": "2607:f2b1:f081:9:1c8c:f1c7:47e:f121", + "static_gateway": "0000::ffff", + "static_ip_address": "2607:f2b1:f081:9:1c8c:f1c7:47e:f120", + "static_preferred_dns_server": "2607:f2b1:f081:9:1c8c:f1c7:47e:f122", + "static_prefix_length": 0, "use_dhcpv6_to_obtain_dns_server_address": False} + dns_server_settings = {"preferred_dns_server": "192.96.20.181", "alternate_dns_server1": "192.96.20.182"} + management_vlan = {"enable_vlan": True, "vlan_id": 0} + inp_param = { + "hostname": "192.1.2.3", + "password": "password", + "port": 443, + "username": "root", + "device_service_tag": Constants.service_tag1, + "delay": 10, + "dns_configuration": dns_configuration, + "ipv4_configuration": ipv4_configuration, + "ipv6_configuration": ipv6_configuration, + "management_vlan": management_vlan, + "dns_server_settings": dns_server_settings + } + chassis = { + "SettingType": "Network", + "MgmtVLANId": "1", + "EnableVLAN": True, + "Ipv4Settings": { + "EnableIPv4": True, + "EnableDHCP": False, + "StaticIPAddress": "192.196.24.176", + "StaticSubnetMask": "255.255.254.0", + "StaticGateway": "192.196.24.1", + "UseDHCPObtainDNSServerAddresses": False, + "StaticPreferredDNSServer": "", + "StaticAlternateDNSServer": "" + }, + "Ipv6Settings": { + "EnableIPv6": False, + "EnableAutoconfiguration": False, + "StaticIPv6Address": "", + "StaticPrefixLength": "0", + "StaticGateway": "", + "UseDHCPv6ObtainDNSServerAddresses": False, + "StaticPreferredDNSServer": "", + "StaticAlternateDNSServer": "" + }, + "GeneralSettings": { + "EnableNIC": True, + "RegisterDNS": False, + "DnsName": "MX-6H5S6Z2", + "UseDHCPForDomainName": False, + "DnsDomainName": "", + "AutoNegotiation": True, + "NetworkSpeed": "1_GB", + "Delay": 0 + } + } + server = {"SettingType": "Network", + "useDHCPToObtainDNSIPv6": "Disabled", + "staticPreferredDNSIPv6": "::", + "currentGatewayIPv4": "192.92.24.1", + "vlanId": "1", + "staticPreferredDNSIPv4": "10.8.8.8", + "staticSubnetMaskIPv4": "255.255.254.0", + "currentIPAddressIPv4": "192.92.24.177", + "enableDHCPIPv4": "Disabled", + "currentIPAddressIPv6": "::", + "staticIPAddressIPv6": "::", + "staticIPAddressIPv4": "192.92.24.177", + "useDHCPToObtainDNSIPv4": "Disabled", + "staticGatewayIPv6": "::", + "staticPrefixLengthIPv6": "64", + "vlanEnable": "Disabled", + "enableAutoConfigurationIPv6": "Enabled", + "staticGatewayIPv4": "192.92.24.1", + "enableIPv6": "Disabled", + "staticAlternateDNSIPv6": "::", + "enableIPv4": "Enabled", + "enableNIC": "Enabled", + "staticAlternateDNSIPv4": "192.96.7.7"} + iom = {"SettingType": "Network", + "MgmtVLANId": "", + "EnableMgmtVLANId": False, + "IomIPv4Settings": { + "EnableIPv4": True, + "EnableDHCP": True, + "StaticIPAddress": "192.96.24.35", + "StaticSubnetMask": "255.255.254.0", + "StaticGateway": "192.96.24.1" + }, + "IomIPv6Settings": { + "EnableIPv6": True, + "StaticIPv6Address": "2607:f2b1:f2b1:9:f2b1:f2b1:f2b1:be45", + "StaticPrefixLength": "64", + "StaticGateway": "fe80::f2b1:f2b1:f2b1:9", + "UseDHCPv6": False + }, + "IomDNSSettings": { + "PrimaryDNS": "", + "SecondaryDNS": "", + "TertiaryDNS": "" + }} + + @pytest.mark.parametrize("params", [ + {"module_args": inp_param, "dvc": {"Type": 2000}, "msg": SUCCESS_MSG}, + {"module_args": inp_param, "dvc": {"Type": 1000}, "msg": SUCCESS_MSG}, + {"module_args": inp_param, "dvc": {"Type": 4000}, "msg": SUCCESS_MSG} + ]) + def test_ome_device_mgmt_network_success(self, params, ome_connection_mock_for_device_network, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = {"IPAddress": "192.1.2.3"} + mocker.patch(MODULE_PATH + 'get_device_details', return_value=params.get("dvc", {"Type": 2000})) + mocker.patch(MODULE_PATH + 'get_network_payload', return_value={"Type": 2000}) + ome_default_args.update(params['module_args']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("params", [ + {"module_args": inp_param, "dvc": {"Type": 3000, "Model": "Unsupported"}, "msg": NON_CONFIG_NETWORK}, ]) + def test_ome_device_mgmt_network_fails(self, params, ome_connection_mock_for_device_network, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = {"IPAddress": "192.1.2.3"} + dvc = params.get("dvc") + mocker.patch(MODULE_PATH + 'get_device_details', return_value=dvc) + mocker.patch(MODULE_PATH + 'get_network_payload', return_value={}) + ome_default_args.update(params['module_args']) + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == params['msg'].format(dvc.get('Model')) + + @pytest.mark.parametrize("params", [ + {"module_args": {"device_id": 123, "dns_server_settings": {"alternate_dns_server1": "192.96.20.182"}}, + "json_data": {"IomDNSSettings": {"PrimaryDNS": None, "SecondaryDNS": "", "TertiaryDNS": ""}}, + "dvc": {"Type": 4000}, "msg": DNS_SETT_ERR1}]) + def _test_ome_device_mgmt_iom_dns_failure(self, params, ome_connection_mock_for_device_network, + ome_response_mock, ome_default_args, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params.get("json_data") + dvc = params.get("dvc") + mocker.patch(MODULE_PATH + 'get_device_details', return_value=dvc) + ome_default_args.update(params['module_args']) + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("addr_param", [{"in": inp_param}, + {"in": {"dns_configuration": {"register_with_dns": True}}}, + {"in": {"management_vlan": {"enable_vlan": True}}} + ]) + def test_validate_input_success(self, addr_param): + f_module = self.get_module_mock(params=addr_param["in"]) + self.module.validate_input(f_module) + + @pytest.mark.parametrize("param", [{"in": inp_param, "device": chassis, "enable_nic": False, "delay": 5, + "diff": {'EnableNIC': False, 'Delay': 5}}, + {"in": inp_param, "device": chassis, "enable_nic": True, + "diff": {'StaticAlternateDNSServer': '2607:f2b1:f081:9:1c8c:f1c7:47e:f121', + 'StaticPreferredDNSServer': '2607:f2b1:f081:9:1c8c:f1c7:47e:f122', + 'StaticGateway': '0000::ffff', 'StaticSubnetMask': '255.255.255.0', + 'StaticIPAddress': '192.168.11.20', + 'StaticIPv6Address': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120', + 'StaticPrefixLength': 0, 'EnableIPv6': True, 'NetworkSpeed': '10_MB', + 'DnsName': 'openmanage-enterprise', 'AutoNegotiation': False, + 'DnsDomainName': 'localdomain', 'MgmtVLANId': 0}}, + {"in": {"ipv6_configuration": ipv6_configuration}, "device": chassis, + "enable_nic": True, + "diff": {'StaticAlternateDNSServer': '2607:f2b1:f081:9:1c8c:f1c7:47e:f121', + 'StaticPreferredDNSServer': '2607:f2b1:f081:9:1c8c:f1c7:47e:f122', + 'StaticGateway': '0000::ffff', + 'StaticIPv6Address': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120', + 'StaticPrefixLength': 0, 'EnableIPv6': True}}, + {"in": {"ipv4_configuration": ipv4_configuration}, "device": chassis, + "enable_nic": True, + "diff": {'StaticAlternateDNSServer': '192.168.11.3', + 'StaticPreferredDNSServer': '192.168.11.2', + 'StaticGateway': '192.168.11.1', 'StaticSubnetMask': '255.255.255.0', + 'StaticIPAddress': '192.168.11.20'}}, + {"in": {"dns_configuration": dns_configuration}, "device": chassis, + "enable_nic": True, + "diff": {'NetworkSpeed': '10_MB', 'DnsName': 'openmanage-enterprise', + 'AutoNegotiation': False, 'DnsDomainName': 'localdomain'}}, + {"in": {"management_vlan": management_vlan}, "device": chassis, + "enable_nic": True, + "diff": {'MgmtVLANId': 0}}]) + def test_update_chassis_payload_success(self, param): + inp = param["in"] + inp['enable_nic'] = param.get("enable_nic") + inp['delay'] = param.get('delay', 0) + f_module = self.get_module_mock(params=inp) + diff = self.module.update_chassis_payload(f_module, param["device"]) + assert diff == param.get("diff") + + @pytest.mark.parametrize("param", [{"in": inp_param, "device": server, "enable_nic": False, + "diff": {'enableNIC': 'Disabled'}}, + {"in": inp_param, "device": server, "enable_nic": True, + "diff": {'staticIPAddressIPv4': '192.168.11.20', + 'staticSubnetMaskIPv4': '255.255.255.0', + 'staticGatewayIPv4': '192.168.11.1', + 'staticPreferredDNSIPv4': '192.168.11.2', + 'staticAlternateDNSIPv4': '192.168.11.3', + 'enableAutoConfigurationIPv6': 'Disabled', + 'vlanEnable': 'Enabled', + 'staticPreferredDNSIPv6': '2607:f2b1:f081:9:1c8c:f1c7:47e:f122', + 'staticAlternateDNSIPv6': '2607:f2b1:f081:9:1c8c:f1c7:47e:f121', + 'staticIPAddressIPv6': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120', + 'staticPrefixLengthIPv6': 0, 'staticGatewayIPv6': '0000::ffff', + 'enableIPv6': 'Enabled', + 'vlanId': 0}}, + {"in": {"ipv6_configuration": ipv6_configuration}, "device": server, + "enable_nic": True, + "diff": {'staticPreferredDNSIPv6': '2607:f2b1:f081:9:1c8c:f1c7:47e:f122', + 'staticAlternateDNSIPv6': '2607:f2b1:f081:9:1c8c:f1c7:47e:f121', + 'staticIPAddressIPv6': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120', + 'staticPrefixLengthIPv6': 0, 'staticGatewayIPv6': '0000::ffff', + 'enableAutoConfigurationIPv6': 'Disabled', 'enableIPv6': 'Enabled'}}, + {"in": {"ipv4_configuration": ipv4_configuration}, "device": server, + "enable_nic": True, "diff": {'staticIPAddressIPv4': '192.168.11.20', + 'staticSubnetMaskIPv4': '255.255.255.0', + 'staticGatewayIPv4': '192.168.11.1', + 'staticPreferredDNSIPv4': '192.168.11.2', + 'staticAlternateDNSIPv4': '192.168.11.3'}}, + {"in": {"management_vlan": management_vlan}, "device": server, + "enable_nic": True, "diff": {'vlanEnable': 'Enabled', 'vlanId': 0}} + ]) + def test_update_server_payload_success(self, param): + inp = param["in"] + inp['enable_nic'] = param.get("enable_nic") + f_module = self.get_module_mock(params=inp) + diff = self.module.update_server_payload(f_module, param["device"]) + assert diff == param.get("diff") + + @pytest.mark.parametrize("param", [{"in": inp_param, "device": iom, "enable_nic": False, + "diff": {'StaticGateway': '0000::ffff', 'StaticIPAddress': '192.168.11.20', + 'StaticSubnetMask': '255.255.255.0', 'EnableDHCP': False, + 'EnableMgmtVLANId': True, + 'StaticPrefixLength': 0, + 'StaticIPv6Address': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120', + 'MgmtVLANId': 0, 'SecondaryDNS': '192.96.20.182', + 'PrimaryDNS': '192.96.20.181'}}, + {"in": inp_param, "device": iom, "enable_nic": True, + "diff": {'StaticGateway': '0000::ffff', 'StaticIPAddress': '192.168.11.20', + 'StaticSubnetMask': '255.255.255.0', 'EnableDHCP': False, + 'StaticPrefixLength': 0, 'EnableMgmtVLANId': True, + 'StaticIPv6Address': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120', + 'MgmtVLANId': 0, 'SecondaryDNS': '192.96.20.182', + 'PrimaryDNS': '192.96.20.181'}}, + {"in": {"ipv6_configuration": ipv6_configuration}, "device": iom, + "enable_nic": True, "diff": {'StaticGateway': '0000::ffff', + 'StaticPrefixLength': 0, + 'StaticIPv6Address': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120'}}, + {"in": {"ipv4_configuration": ipv4_configuration}, "device": iom, + "enable_nic": True, + "diff": {'StaticGateway': '192.168.11.1', 'StaticIPAddress': '192.168.11.20', + 'StaticSubnetMask': '255.255.255.0', 'EnableDHCP': False}}, + {"in": {"management_vlan": management_vlan}, "device": iom, + "enable_nic": True, "diff": {'EnableMgmtVLANId': True, 'MgmtVLANId': 0}} + ]) + def test_update_iom_payload_success(self, param): + inp = param["in"] + inp['enable_nic'] = param.get("enable_nic") + f_module = self.get_module_mock(params=inp) + diff = self.module.update_iom_payload(f_module, param["device"]) + assert diff == param.get("diff") + + @pytest.mark.parametrize("params", [{"mparams": { + 'dns_configuration': {'dns_domain_name': 'localdomain', 'dns_name': 'openmanage-enterprise', + 'register_with_dns': True, 'auto_negotiation': True, + 'network_speed': '10_MB', 'use_dhcp_for_dns_domain_name': True}, + 'ipv4_configuration': {'enable_ipv4': False, 'enable_dhcp': True, 'use_dhcp_to_obtain_dns_server_address': True, + 'static_ip_address': '192.168.11.20', 'static_subnet_mask': '255.255.255.0', + 'static_gateway': '192.168.11.1', 'static_preferred_dns_server': '192.168.11.2', + 'static_alternate_dns_server': '192.168.11.3'}, + 'ipv6_configuration': {'enable_ipv6': False, 'enable_auto_configuration': True, + 'static_alternate_dns_server': '2607:f2b1:f081:9:1c8c:f1c7:47e:f121', + 'static_gateway': '0000::ffff', 'static_ip_address': '2607:f2b1:f081:9:1c8c:f1c7:47e:f120', + 'static_preferred_dns_server': '2607:f2b1:f081:9:1c8c:f1c7:47e:f122', + 'static_prefix_length': 0, 'use_dhcpv6_to_obtain_dns_server_address': True}, + 'management_vlan': {'enable_vlan': False, 'vlan_id': 0}, + 'dns_server_settings': {'preferred_dns_server': '192.96.20.181', + 'alternate_dns_server1': '192.96.20.182'}}, + "res": {'dns_configuration': {'dns_name': 'openmanage-enterprise', + 'register_with_dns': True, 'auto_negotiation': True, + 'use_dhcp_for_dns_domain_name': True}, + 'ipv4_configuration': {'enable_ipv4': False}, + 'ipv6_configuration': {'enable_ipv6': False}, + 'management_vlan': {'enable_vlan': False}, + 'dns_server_settings': {'preferred_dns_server': '192.96.20.181', + 'alternate_dns_server1': '192.96.20.182'}}}]) + def test_validate_dependency(self, params): + mparams = params["mparams"] + result = self.module.validate_dependency(mparams) + assert result == params["res"] + + @pytest.mark.parametrize("params", [{"mparams": {"device_id": 123}, "success": True, "json_data": { + "value": [{"Name": "vlan_name1", "Id": 124, "Identifier": "ABCD345"}, + {"Name": "vlan_name", "Id": 123, "Identifier": "ABCD123"}]}, "res": + {"Name": "vlan_name", "Id": 123, "Identifier": "ABCD123"}}, { + "mparams": {"device_service_tag": "ABCD123"}, "success": True, + "json_data": {"value": [{"Name": "vlan_name", "Id": 123, "Identifier": "ABCD123"}]}, + "res": {"Name": "vlan_name", "Id": 123, "Identifier": "ABCD123"}}]) + def test_get_device_details( + self, params, ome_connection_mock_for_device_network, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.get_device_details( + f_module, ome_connection_mock_for_device_network) + assert result == params["res"] + + @pytest.mark.parametrize("params", [ + {"mparams": {"device_id": 123}, "success": True, + "json_data": {"Type": 2000, "Id": 123, "Identifier": "ABCD123"}, + "res": {"Type": 2000, "Id": 123, "Identifier": "ABCD123"}, + "diff": {"IPV4": "1.2.3.4"}}, + {"mparams": {"device_id": 123}, "success": True, + "json_data": {"Type": 4000, "Id": 123, "Identifier": "ABCD123"}, + "res": {"Type": 4000, "Id": 123, "Identifier": "ABCD123"}, + "diff": {"IPV4": "1.2.3.4"}}, + ]) + def test_get_network_payload( + self, params, ome_connection_mock_for_device_network, ome_response_mock, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + ome_connection_mock_for_device_network.strip_substr_dict.return_value = params.get("json_data") + mocker.patch(MODULE_PATH + 'update_chassis_payload', return_value=params['diff']) + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.get_network_payload( + f_module, ome_connection_mock_for_device_network, {"Id": 123, "Type": 2000}) + assert result == params.get("res") + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLValidationError, TypeError, ConnectionError, HTTPError, URLError]) + def test_device_network_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_device_network, ome_response_mock): + ome_default_args.update({"device_service_tag": Constants.service_tag1}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'validate_input', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'validate_input', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'validate_input', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py new file mode 100644 index 00000000..0a68ac9d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pdb + +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_network_services +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_network_services.' + + +@pytest.fixture +def ome_conn_mock_network(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEMDeviceNetworkService(FakeAnsibleModule): + + module = ome_device_network_services + + def test_check_domain_service(self, ome_conn_mock_network, ome_default_args): + f_module = self.get_module_mock() + result = self.module.check_domain_service(f_module, ome_conn_mock_network) + assert result is None + + def test_check_domain_service_http(self, ome_conn_mock_network, ome_default_args, mocker): + f_module = self.get_module_mock() + err_message = {'error': {'@Message.ExtendedInfo': [{'MessageId': 'CGEN1006'}]}} + ome_conn_mock_network.invoke_request.side_effect = HTTPError('http://testhost.com', 400, + json.dumps(err_message), + {"accept-type": "application/json"}, None) + mocker.patch(MODULE_PATH + 'json.loads', return_value=err_message) + with pytest.raises(Exception) as err: + self.module.check_domain_service(f_module, ome_conn_mock_network) + assert err.value.args[0] == "The device location settings operation is supported only on " \ + "OpenManage Enterprise Modular." + + def test_get_chassis_device(self, ome_conn_mock_network, ome_default_args, mocker, ome_response_mock): + mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1") + ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD", + "PublicAddress": ["192.168.1.1"]}, + {"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE", + "PublicAddress": ["192.168.1.2"]}]} + param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.get_chassis_device(f_module, ome_conn_mock_network) + assert err.value.args[0] == "Failed to retrieve the device information." + ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD", + "PublicAddress": ["192.18.1.1"]}]} + param = {"hostname": "192.18.1.1", "remote_racadm_settings": {"enabled": True}} + f_module = self.get_module_mock(params=param) + key, value = self.module.get_chassis_device(f_module, ome_conn_mock_network) + assert key == "Id" + assert value == 25011 + + def test_main_validation(self, ome_conn_mock_network, ome_default_args, ome_response_mock, mocker): + resp = self._run_module_with_fail_json(ome_default_args) + assert resp['msg'] == "one of the following is required: snmp_settings, " \ + "ssh_settings, remote_racadm_settings" + mocker.patch(MODULE_PATH + "check_domain_service", return_value=None) + mocker.patch(MODULE_PATH + "fetch_device_details", return_value=ome_response_mock) + ome_response_mock.json_data = {"value": [{"Id": 25011, "DeviceServiceTag": "XE3FRS"}], + "EnableRemoteRacadm": True, "SettingType": "NetworkServices", + "SnmpConfiguration": {"PortNumber": 161, "SnmpEnabled": True, + "SnmpV1V2Credential": {"CommunityName": "public"}}, + "SshConfiguration": {"IdleTimeout": 60, "MaxAuthRetries": 3, "MaxSessions": 1, + "PortNumber": 22, "SshEnabled": False}} + ome_default_args.update({"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}, + "snmp_settings": {"enabled": True, "port_number": 161, "community_name": "public"}, + "ssh_settings": {"enabled": True, "port_number": 22, "max_sessions": 1, + "max_auth_retries": 3, "idle_timeout": 60}}) + resp = self._run_module(ome_default_args) + assert resp['msg'] == "Successfully updated the network services settings." + + def test_fetch_device_details(self, ome_conn_mock_network, ome_default_args, ome_response_mock, mocker): + param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}} + f_module = self.get_module_mock(params=param) + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"Id": 25011, "DeviceServiceTag": "XE3FRS"}], + "EnableRemoteRacadm": True, "SettingType": "NetworkServices", + "SnmpConfiguration": {"PortNumber": 161, "SnmpEnabled": True, + "SnmpV1V2Credential": {"CommunityName": "public"}}, + "SshConfiguration": {"IdleTimeout": 60, "MaxAuthRetries": 3, "MaxSessions": 1, + "PortNumber": 22, "SshEnabled": False}} + with pytest.raises(Exception) as err: + self.module.fetch_device_details(f_module, ome_conn_mock_network) + assert err.value.args[0] == "Unable to complete the operation because the entered target " \ + "device id '25012' is invalid." + ome_response_mock.strip_substr_dict.return_value = {"EnableRemoteRacadm": True} + ome_response_mock.json_data = {"value": [{"Id": 25012, "DeviceServiceTag": "XE3FRS"}], + "EnableRemoteRacadm": True, "SnmpConfiguration": {}, "SshConfiguration": {}} + resp = self.module.fetch_device_details(f_module, ome_conn_mock_network) + assert resp.json_data["SnmpConfiguration"] == {} + param = {"hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}} + f_module = self.get_module_mock(params=param) + mocker.patch(MODULE_PATH + "get_chassis_device", return_value=("Id", "25012")) + resp = self.module.fetch_device_details(f_module, ome_conn_mock_network) + assert resp.json_data["SnmpConfiguration"] == {} + + def test_get_ip_from_host(self, ome_conn_mock_network, ome_default_args, ome_response_mock): + result = self.module.get_ip_from_host("192.168.0.1") + assert result == "192.168.0.1" + + def test_check_mode_validation(self, ome_conn_mock_network, ome_default_args, ome_response_mock): + param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}, + "snmp_settings": {"enabled": True, "port_number": 161, "community_name": "public"}, + "ssh_settings": {"enabled": True, "port_number": 22, "max_sessions": 1, + "max_auth_retries": 3, "idle_timeout": 120}} + f_module = self.get_module_mock(params=param) + loc_data = {"EnableRemoteRacadm": True, "SettingType": "NetworkServices", + "SnmpConfiguration": {"PortNumber": 161, "SnmpEnabled": True, + "SnmpV1V2Credential": {"CommunityName": "public"}}, + "SshConfiguration": {"IdleTimeout": 7200, "MaxAuthRetries": 3, "MaxSessions": 1, + "PortNumber": 22, "SshEnabled": True}} + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data, ome_conn_mock_network) + assert err.value.args[0] == "No changes found to be applied." + f_module.check_mode = True + loc_data["SshConfiguration"]["IdleTimeout"] = 7200 + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data, ome_conn_mock_network) + assert err.value.args[0] == "No changes found to be applied." + loc_data = {"EnableRemoteRacadm": True, "SettingType": "NetworkServices", + "SnmpConfiguration": {"PortNumber": 161, "SnmpEnabled": False, + "SnmpV1V2Credential": {"CommunityName": "public"}}, + "SshConfiguration": {"IdleTimeout": 60, "MaxAuthRetries": 3, "MaxSessions": 1, + "PortNumber": 22, "SshEnabled": False}} + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data, ome_conn_mock_network) + assert err.value.args[0] == "Changes found to be applied." + param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": False}, + "snmp_settings": {"enabled": False, "port_number": 161, "community_name": "public"}, + "ssh_settings": {"enabled": False, "port_number": 22, "max_sessions": 1, + "max_auth_retries": 3, "idle_timeout": 60}} + f_module = self.get_module_mock(params=param) + resp = self.module.check_mode_validation(f_module, loc_data, ome_conn_mock_network) + assert resp["SnmpConfiguration"]["PortNumber"] == 161 + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_device_network_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_conn_mock_network, ome_response_mock): + ome_default_args.update({"device_id": 25011, "remote_racadm_settings": {"enabled": True}}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_domain_service', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py new file mode 100644 index 00000000..928c407c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_power_settings +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock, patch, Mock + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_power_settings.' + + +@pytest.fixture +def ome_conn_mock_power(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEMDevicePower(FakeAnsibleModule): + + module = ome_device_power_settings + + def test_check_domain_service(self, ome_conn_mock_power, ome_default_args): + f_module = self.get_module_mock() + result = self.module.check_domain_service(f_module, ome_conn_mock_power) + assert result is None + + def test_get_chassis_device(self, ome_conn_mock_power, ome_default_args, mocker, ome_response_mock): + mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1") + ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD", + "PublicAddress": ["192.168.1.1"]}, + {"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE", + "PublicAddress": ["192.168.1.2"]}]} + param = {"device_id": 25012, "hostname": "192.168.1.6", + "power_configuration": {"enable_power_cap": True, "power_cap": 3424}} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.get_chassis_device(f_module, ome_conn_mock_power) + assert err.value.args[0] == "Failed to fetch the device information." + + def test_check_mode_validation(self, ome_conn_mock_power, ome_default_args, ome_response_mock): + loc_data = {"PowerCap": "3424", "MinPowerCap": "3291", "MaxPowerCap": "3424", + "RedundancyPolicy": "NO_REDUNDANCY", "EnablePowerCapSettings": True, + "EnableHotSpare": True, "PrimaryGrid": "GRID_1", "PowerBudgetOverride": False} + param = {"power_configuration": {"enable_power_cap": True, "power_cap": 3424}} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data) + param = {"hot_spare_configuration": {"enable_hot_spare": False}} + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data) + assert err.value.args[0] == "Changes found to be applied." + param = {"redundancy_configuration": {"redundancy_policy": "NO_REDUNDANCY"}} + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, loc_data) + assert err.value.args[0] == "No changes found to be applied." + + def test_fetch_device_details(self, ome_conn_mock_power, ome_default_args, ome_response_mock): + param = {"device_id": 25012, "hostname": "192.168.1.6", + "power_configuration": {"enable_power_cap": True, "power_cap": 3424}} + f_module = self.get_module_mock(params=param) + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [], "PowerCap": "3424", "MinPowerCap": "3291", + "MaxPowerCap": "3424", "RedundancyPolicy": "NO_REDUNDANCY", + "EnablePowerCapSettings": True, "EnableHotSpare": True, + "PrimaryGrid": "GRID_1", "PowerBudgetOverride": False} + with pytest.raises(Exception) as err: + self.module.fetch_device_details(f_module, ome_conn_mock_power) + assert err.value.args[0] == "Unable to complete the operation because the entered target " \ + "device id '25012' is invalid." + + def test_get_ip_from_host(self, ome_conn_mock_power, ome_default_args, ome_response_mock): + result = self.module.get_ip_from_host("192.168.0.1") + assert result == "192.168.0.1" + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_device_power_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_conn_mock_power, ome_response_mock): + ome_default_args.update({"device_id": 25011, "power_configuration": {"enable_power_cap": True, + "power_cap": 3424}}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_domain_service', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py new file mode 100644 index 00000000..97b611ce --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py @@ -0,0 +1,173 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.0.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_quick_deploy +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_quick_deploy.' + + +@pytest.fixture +def ome_conn_mock_qd(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEMDevicePower(FakeAnsibleModule): + + module = ome_device_quick_deploy + + def test_check_domain_service(self, ome_conn_mock_qd, ome_default_args): + f_module = self.get_module_mock() + result = self.module.check_domain_service(f_module, ome_conn_mock_qd) + assert result is None + + def test_get_chassis_device(self, ome_conn_mock_qd, ome_default_args, mocker, ome_response_mock): + mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1") + ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD", + "PublicAddress": ["192.168.1.1"]}, + {"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE", + "PublicAddress": ["192.168.1.2"]}]} + param = {"device_id": 25012, "hostname": "192.168.1.6"} + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.get_chassis_device(f_module, ome_conn_mock_qd) + assert err.value.args[0] == "Unable to retrieve the device information." + + def test_get_ip_from_host(self, ome_conn_mock_qd, ome_default_args, ome_response_mock): + result = self.module.get_ip_from_host("192.168.0.1") + assert result == "192.168.0.1" + + def test_validate_ip_address(self, ome_conn_mock_qd, ome_response_mock, ome_default_args): + result = self.module.validate_ip_address("192.168.0.1", "IPV4") + assert result is True + result = self.module.validate_ip_address("192.168.0.1.1", "IPV4") + assert result is False + result = self.module.validate_ip_address("::", "IPV6") + assert result is True + + def test_ip_address_field(self, ome_conn_mock_qd, ome_response_mock, ome_default_args, mocker): + param = {"device_id": 25011, "setting_type": "ServerQuickDeploy", + "quick_deploy_options": {"ipv4_enabled": False, "ipv4_subnet_mask": "192.168.0.1", + "ipv4_gateway": "0.0.0.0.0"}, "slots": [{"vlan_id": 1}]} + fields = [("ipv4_subnet_mask", "IPV4"), ("ipv4_gateway", "IPV4"), ("ipv6_gateway", "IPV6")] + f_module = self.get_module_mock(params=param) + mocker.patch(MODULE_PATH + "validate_ip_address", return_value=False) + with pytest.raises(Exception) as err: + self.module.ip_address_field(f_module, fields, param["quick_deploy_options"], slot=False) + assert err.value.args[0] == "Invalid '192.168.0.1' address provided for the ipv4_subnet_mask." + + def test_get_device_details(self, ome_conn_mock_qd, ome_response_mock, ome_default_args, mocker): + param = {"device_id": 25012, "hostname": "192.168.1.6", "setting_type": "ServerQuickDeploy", + "quick_deploy_options": {"ipv4_enabled": False, "ipv4_subnet_mask": "192.168.0.1", + "ipv4_gateway": "0.0.0.0"}, "slots": [{"vlan_id": 1}]} + f_module = self.get_module_mock(params=param) + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [], "SettingType": "ServerQuickDeploy", + "ProtocolTypeV4": "true", "NetworkTypeV4": "Static", + "IpV4Gateway": "192.168.0.1", "IpV4SubnetMask": "255.255.255.0"} + mocker.patch(MODULE_PATH + 'get_chassis_device', return_value=("Id", 25011)) + mocker.patch(MODULE_PATH + "check_mode_validation", return_value=({}, {})) + mocker.patch(MODULE_PATH + "job_payload_submission", return_value=12345) + with pytest.raises(Exception) as err: + self.module.get_device_details(ome_conn_mock_qd, f_module) + assert err.value.args[0] == "Unable to complete the operation because the entered " \ + "target device id '25012' is invalid." + param.update({"job_wait": False}) + ome_response_mock.json_data.update({"value": [{"Id": 25012}]}) + f_module = self.get_module_mock(params=param) + result = self.module.get_device_details(ome_conn_mock_qd, f_module) + assert result == (12345, None) + param.update({"job_wait": True}) + + def test_job_payload_submission(self, ome_conn_mock_qd, ome_response_mock, ome_default_args): + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_data = {"Id": 12345} + ome_conn_mock_qd.job_submission.return_value = ome_response_mock + payload = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "255.255.255.0", + "IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": True, "NetworkTypeV6": "Static", + "PrefixLength": "1", "IpV6Gateway": "0.0.0.0"} + slot_payload = [{"SlotId": 1, "IPV4Address": "192.168.0.2", "IPV6Address": "::", "VlanId": 1}] + resp_data = {"Slots": [ + {"SlotId": 1, "IPV4Address": "192.168.0.2", "IPV6Address": "::", "VlanId": 1, "SlotSelected": False}, + {"SlotId": 1, "IPV4Address": "192.168.0.2", "IPV6Address": "::", "VlanId": 1, "SlotSelected": False}, + ]} + result = self.module.job_payload_submission(ome_conn_mock_qd, payload, slot_payload, + "ServerQuickDeploy", 25012, resp_data) + assert result == 12345 + + def test_check_mode_validation(self, ome_conn_mock_qd, ome_response_mock, ome_default_args): + param = {"device_id": 25012, "hostname": "192.168.1.6", "setting_type": "ServerQuickDeploy", + "quick_deploy_options": { + "ipv4_enabled": True, "ipv4_network_type": "Static", "ipv4_subnet_mask": "255.255.255.0", + "ipv4_gateway": "0.0.0.0", "ipv6_enabled": True, "ipv6_network_type": "Static", + "ipv6_prefix_length": "1", "ipv6_gateway": "0.0.0.0", + "slots": [{"slot_id": 1, "slot_ipv4_address": "192.168.0.1", + "slot_ipv6_address": "::", "vlan_id": "1"}]}} + f_module = self.get_module_mock(params=param) + deploy_data = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "255.255.255.0", + "IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": True, "NetworkTypeV6": "Static", + "PrefixLength": "1", "IpV6Gateway": "0.0.0.0", + "Slots": [{"SlotId": 1, "SlotIPV4Address": "192.168.0.1", "SlotIPV6Address": "::", "VlanId": "1"}]} + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, deploy_data) + assert err.value.args[0] == "No changes found to be applied." + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, deploy_data) + assert err.value.args[0] == "No changes found to be applied." + param["quick_deploy_options"]["ipv6_prefix_length"] = "2" + with pytest.raises(Exception) as err: + self.module.check_mode_validation(f_module, deploy_data) + assert err.value.args[0] == "Changes found to be applied." + f_module.check_mode = False + result = self.module.check_mode_validation(f_module, deploy_data) + assert result[0]["NetworkTypeV4"] == "Static" + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_device_power_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_conn_mock_qd, ome_response_mock): + ome_default_args.update({"device_id": 25011, "setting_type": "ServerQuickDeploy", "validate_certs": False, + "quick_deploy_options": {"ipv4_enabled": False, + "slots": [{"slot_id": 1, "vlan_id": 1}]}}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_domain_service', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py new file mode 100644 index 00000000..94e76df1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py @@ -0,0 +1,467 @@ +# -*- coding: utf-8 -*- + +# +# Dell OpenManage Ansible Modules +# Version 6.1.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import CHANGES_MSG, NO_CHANGES_MSG +from ansible_collections.dellemc.openmanage.plugins.modules import ome_devices +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +DELETE_SUCCESS = "The devices(s) are removed successfully." +INVALID_DEV_ST = "Unable to complete the operation because the entered target device(s) '{0}' are invalid." +JOB_DESC = "The {0} task initiated from OpenManage Ansible Modules for devices with the ids '{1}'." +APPLY_TRIGGERED = "Successfully initiated the device action job." +JOB_SCHEDULED = "The job is scheduled successfully." +SUCCESS_MSG = "The device operation is performed successfully." + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_devices.' + + +@pytest.fixture +def ome_connection_mock_for_devices(mocker, ome_response_mock): + connection_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.ome_devices.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeDevices(FakeAnsibleModule): + module = ome_devices + + @pytest.mark.parametrize("params", [ + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG1", "Type": 1000}]}, + 'message': DELETE_SUCCESS, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], 'state': 'absent'}}, + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG1", "Type": 1000}]}, + 'message': CHANGES_MSG, "success": True, + 'check_mode': True, + 'mparams': {"device_service_tags": ["ABCTAG1", "BCDTAG2"], 'state': 'absent'}}, + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG1", "Type": 1000}]}, + 'message': NO_CHANGES_MSG, "success": True, + 'mparams': {"device_service_tags": ["ABCTAG2", "BCDTAG2"], 'state': 'absent'}}, + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG2", "Type": 1001}]}, + 'message': INVALID_DEV_ST.format(",".join(map(str, ["ABCTAG2"]))), "success": True, + 'mparams': {"device_service_tags": ["ABCTAG2"], 'state': 'present'}}, + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG2", "Type": 1001}]}, + 'message': INVALID_DEV_ST.format(",".join(map(str, [24, 25]))), "success": True, + 'mparams': {"device_ids": [24, 25], 'state': 'present'}}, + {"json_data": {"value": []}, + 'message': INVALID_DEV_ST.format(",".join(map(str, [24]))), "success": True, + 'mparams': {"device_ids": [24], 'state': 'present'}} + ]) + def test_ome_devices_delete(self, params, ome_connection_mock_for_devices, ome_response_mock, ome_default_args, + module_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + ome_connection_mock_for_devices.get_all_items_with_pagination.return_value = params['json_data'] + ome_default_args.update(params['mparams']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("params", [ + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG1", "Type": 1000}, + {'Id': 25, 'Identifier': "BCDTAG2", "Type": 1000}]}, + 'message': APPLY_TRIGGERED, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": False, + "job_name": "my test job", "job_description": "My job description" + }, "check_similar_job": {}}, + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG1", "Type": 1000}, + {'Id': 25, 'Identifier': "BCDTAG2", "Type": 1000}]}, + 'message': APPLY_TRIGGERED, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": False + }, "check_similar_job": {}}, + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG1", "Type": 1000}, + {'Id': 25, 'Identifier': "BCDTAG2", "Type": 1000}]}, + 'message': JOB_SCHEDULED, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": False, + "job_schedule": "my cron task" + }, "check_similar_job": {}}, + {"json_data": {"value": [{'Id': 24, 'Identifier': "ABCTAG1", "Type": 1000}, + {'Id': 25, 'Identifier': "BCDTAG2", "Type": 1000}]}, + 'message': CHANGES_MSG, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": False}, + "check_similar_job": {}, "check_mode": True + }, + {"json_data": { + "value": [ + { + "Id": 14874, + "JobName": "Refresh inventory", + "JobDescription": JOB_DESC.format("Refresh inventory", "13216"), + "Schedule": "startnow", + "State": "Enabled", + "Targets": [ + { + "JobId": 14874, + "Id": 13123, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "action", + "Value": "CONFIG_INVENTORY" + }, + { + "JobId": 14874, + "Key": "isCollectDriverInventory", + "Value": "true" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2060, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 8, + "Name": "Inventory_Task", + }, + }, + { + "Id": 14874, + "JobName": "Refresh inventory", + "JobDescription": JOB_DESC.format("Refresh inventory", "13216"), + "Schedule": "startnow", + "State": "Enabled", + "Targets": [ + { + "JobId": 14874, + "Id": 13216, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "action", + "Value": "CONFIG_INVENTORY" + }, + { + "JobId": 14874, + "Key": "isCollectDriverInventory", + "Value": "false" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2060, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 8, + "Name": "Inventory_Task", + }, + }, + { + "Id": 14874, + "JobName": "Refresh inventory", + "JobDescription": JOB_DESC.format("Refresh inventory", "13216"), + "Schedule": "startnow", + "State": "Enabled", + "Targets": [ + { + "JobId": 14874, + "Id": 13216, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "action", + "Value": "CONFIG_INVENTORY" + }, + { + "JobId": 14874, + "Key": "isCollectDriverInventory", + "Value": "true" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2060, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 8, + "Name": "Inventory_Task", + }, + } + ] + }, + 'message': APPLY_TRIGGERED, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": False + }, "get_dev_ids": ([13216], {})}, + + {"json_data": { + "value": [ + { + "Id": 14874, + "JobName": "Refresh inventory", + "JobDescription": JOB_DESC.format("Refresh inventory", "13216"), + "Schedule": "startnow", + "State": "Enabled", + "Targets": [ + { + "JobId": 14874, + "Id": 13216, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "action", + "Value": "CONFIG_INVENTORY" + }, + { + "JobId": 14874, + "Key": "isCollectDriverInventory", + "Value": "true" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2060, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 8, + "Name": "Inventory_Task", + }, + } + ] + }, + 'message': CHANGES_MSG, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": False + }, "get_dev_ids": ([13216], {}), "check_mode": True}, + {"json_data": { + "value": [ + { + "Id": 14874, + "JobName": "Refresh inventory", + "JobDescription": JOB_DESC.format("Refresh inventory", "13216"), + "Schedule": "startnow", + "State": "Enabled", + "Targets": [ + { + "JobId": 14874, + "Id": 13216, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "action", + "Value": "CONFIG_INVENTORY" + }, + { + "JobId": 14874, + "Key": "isCollectDriverInventory", + "Value": "true" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2050, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 8, + "Name": "Inventory_Task", + }, + } + ]}, + 'message': NO_CHANGES_MSG, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": False + }, "get_dev_ids": ([13216], {})}, + {"json_data": { + "value": [ + { + "Id": 14874, + "JobName": "Reset iDRAC", + "JobDescription": JOB_DESC.format("Reset iDRAC", "13216"), + "Schedule": "startnow", + "State": "Enabled", + "Targets": [ + { + "JobId": 14874, + "Id": 13216, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "operationName", + "Value": "RESET_IDRAC" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2050, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 3, + "Name": "DeviceAction_Task", + }, + } + ]}, + 'message': NO_CHANGES_MSG, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], + "job_wait": False, "device_action": 'reset_idrac', + }, "get_dev_ids": ([13216], {})}, + {"json_data": { + "value": [ + { + "Id": 14874, + "JobName": "Clear iDRAC job queue", + "JobDescription": JOB_DESC.format("Clear iDRAC job queue", "13216"), + "Schedule": "startnow", + "State": "Enabled", + "Targets": [ + { + "JobId": 14874, + "Id": 13216, + "Data": "", + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "Params": [ + { + "JobId": 14874, + "Key": "deviceTypes", + "Value": "1000" + }, + { + "JobId": 14874, + "Key": "operationName", + "Value": "REMOTE_RACADM_EXEC" + }, + { + "JobId": 14874, + "Key": "Command", + "Value": "jobqueue delete -i JID_CLEARALL_FORCE" + }, + { + "JobId": 14874, + "Key": "CommandTimeout", + "Value": "60" + } + ], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2050, + "Name": "Completed" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 3, + "Name": "DeviceAction_Task", + }, + } + ]}, + 'message': NO_CHANGES_MSG, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], + "job_wait": False, "device_action": 'clear_idrac_job_queue', + }, "get_dev_ids": ([13216], {})}, + {"json_data": {"Id": 14874, "LastRunStatus": {"Id": 2060, "Name": "Completed"}}, + 'message': SUCCESS_MSG, "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": True + }, "check_similar_job": {}, "get_dev_ids": ([13216], {})}, + {"json_data": {"Id": 14874, "LastRunStatus": {"Id": 2070, "Name": "Completed"}, + "Value": "Job Tracking has failed"}, + 'message': "Job Tracking has failed", "success": True, 'mparams': { + "device_service_tags": ["ABCTAG1", "BCDTAG2"], "job_wait": True + }, "check_similar_job": {}, "get_dev_ids": ([13216], {})} + ]) + def test_ome_devices_main_state_present(self, params, ome_connection_mock_for_devices, ome_response_mock, + ome_default_args, module_mock, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + mocks = ["check_similar_job", "get_dev_ids"] + for m in mocks: + if m in params: + mocker.patch(MODULE_PATH + m, return_value=params.get(m, {})) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.module_utils.utils." + 'time.sleep', return_value=None) + ome_default_args.update(params['mparams']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_devices_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_devices, ome_response_mock): + ome_default_args.update({"state": "absent", "device_service_tags": "t1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'get_dev_ids', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'get_dev_ids', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_dev_ids', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py new file mode 100644 index 00000000..79c94b5c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_diagnostics +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_diagnostics.' + + +@pytest.fixture +def ome_conn_mock_diagnostics(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEDiagnostics(FakeAnsibleModule): + + module = ome_diagnostics + + def test_check_domain_service(self, ome_conn_mock_diagnostics, ome_default_args, mocker): + f_module = self.get_module_mock() + result = self.module.check_domain_service(f_module, ome_conn_mock_diagnostics) + assert result is None + + def test_group_validation(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"device_group_name": "Servers"}) + ome_response_mock.json_data = {"value": []} + with pytest.raises(Exception) as err: + self.module.group_validation(f_module, ome_conn_mock_diagnostics) + assert err.value.args[0] == "Unable to complete the operation because the entered target device " \ + "group name 'Servers' is invalid." + ome_response_mock.json_data = {"value": [{"Id": 25011, "Type": 1000}]} + result = self.module.group_validation(f_module, ome_conn_mock_diagnostics) + assert result == [25011] + + def test_group_validation_s1(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"device_group_name": "Servers"}) + ome_response_mock.json_data = {"value": [{"Type": 2000, "Id": 10161}]} + with pytest.raises(Exception) as err: + self.module.group_validation(f_module, ome_conn_mock_diagnostics) + assert err.value.args[0] == "The requested group 'Servers' does not contain devices that support export log." + + def test_device_validation(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + resp = {"report_list": [{"Id": 25014, "DeviceServiceTag": "ZXCVB1", "Type": 1000}]} + f_module = self.get_module_mock(params={"device_ids": [25011]}) + ome_conn_mock_diagnostics.get_all_report_details.return_value = resp + with pytest.raises(Exception) as err: + self.module.device_validation(f_module, ome_conn_mock_diagnostics) + assert err.value.args[0] == "Unable to complete the operation because the entered target device " \ + "id(s) '25011' are invalid." + resp = {"report_list": [{"Id": 25011, "DeviceServiceTag": "ZXCVB1", "Type": 1000}]} + ome_conn_mock_diagnostics.get_all_report_details.return_value = resp + result = self.module.device_validation(f_module, ome_conn_mock_diagnostics) + assert result == [25011] + f_module = self.get_module_mock(params={"device_service_tags": ["ZXCVB1"]}) + result = self.module.device_validation(f_module, ome_conn_mock_diagnostics) + assert result == [25011] + resp = {"report_list": [{"Id": 25019, "DeviceServiceTag": "ZXCVB1", "Type": 8000}]} + ome_conn_mock_diagnostics.get_all_report_details.return_value = resp + with pytest.raises(Exception) as err: + self.module.device_validation(f_module, ome_conn_mock_diagnostics) + assert err.value.args[0] == "The requested device service tag(s) 'ZXCVB1' " \ + "are not applicable for export log." + + def test_extract_log_operation(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"log_type": "application", "share_address": "192.168.0.1", + "share_type": "NFS", "share_name": "iso", "share_user": "username", + "share_password": "password", "share_domain": "domain", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], + "lead_chassis_only": "true"}) + ome_response_mock.json_data = {"value": [{"Id": 16011, "Type": 2000}]} + ome_conn_mock_diagnostics.job_submission.return_value = {"Id": 16011} + ome_conn_mock_diagnostics.get_all_items_with_pagination.return_value = \ + {"value": [{"DomainRoleTypeValue": "LEAD", "DeviceId": 16011}]} + result = self.module.extract_log_operation(f_module, ome_conn_mock_diagnostics) + assert result["Id"] == 16011 + + ome_conn_mock_diagnostics.get_all_items_with_pagination.return_value = \ + {"value": [{"DomainRoleTypeValue": "STANDALONE", "DeviceId": 16011}]} + result = self.module.extract_log_operation(f_module, ome_conn_mock_diagnostics) + assert result["Id"] == 16011 + + f_module = self.get_module_mock(params={"log_type": "support_assist_collection", "share_address": "192.168.0.1", + "share_type": "NFS", "share_name": "iso", "share_user": "username", + "share_password": "password", "share_domain": "domain", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"]}) + result = self.module.extract_log_operation(f_module, ome_conn_mock_diagnostics, device_lst=[25012]) + assert result["Id"] == 16011 + + def test_extract_log_operation_member(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"log_type": "application", "share_address": "192.168.0.1", + "share_type": "NFS", "share_name": "iso", "share_user": "username", + "share_password": "password", "share_domain": "domain", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], + "lead_chassis_only": "true"}) + ome_response_mock.json_data = {"value": [{"Id": 16011, "Type": 2000}]} + ome_conn_mock_diagnostics.job_submission.return_value = {"Id": 16011} + ome_conn_mock_diagnostics.get_all_items_with_pagination.return_value = \ + {"value": [{"DomainRoleTypeValue": "MEMBER", "DeviceId": 16011}]} + with pytest.raises(Exception) as err: + self.module.extract_log_operation(f_module, ome_conn_mock_diagnostics) + assert err.value.args[0] == "There is no device(s) available to export application log." + + def test_extract_log_operation_no_lead_chassis(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"lead_chassis_only": False, "log_type": "application", + "share_address": "192.168.0.1", + "share_type": "NFS", "share_name": "iso", "share_user": "username", + "share_password": "password", "share_domain": "domain", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], }) + ome_response_mock.json_data = {"value": [{"Id": 16011, "Type": 2000}]} + ome_conn_mock_diagnostics.job_submission.return_value = {"Id": 16011} + result = self.module.extract_log_operation(f_module, ome_conn_mock_diagnostics) + assert result["Id"] == 16011 + + def test_extract_log_operation_s1(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"lead_chassis_only": False, "log_type": "application", + "share_address": "192.168.0.1", + "share_type": "NFS", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], }) + ome_response_mock.json_data = {"value": [{"Id": 16011, "Type": 2000}]} + ome_conn_mock_diagnostics.job_submission.return_value = {"Id": 16011} + result = self.module.extract_log_operation(f_module, ome_conn_mock_diagnostics) + assert result["Id"] == 16011 + + def test_main_succes_case(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + ome_default_args.update({"log_type": "support_assist_collection", "share_address": "192.168.0.1", + "share_type": "NFS", "share_name": "iso", "share_user": "username", + "share_password": "password", "share_domain": "domain", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], + "test_connection": False, "job_wait": True, "device_ids": [25011]}) + mocker.patch(MODULE_PATH + "check_domain_service", return_value=None) + mocker.patch(MODULE_PATH + "device_validation", return_value=[25011]) + mocker.patch(MODULE_PATH + "find_failed_jobs", return_value=("", False)) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + mocker.patch(MODULE_PATH + "extract_log_operation") + ome_response_mock.json_data = {"value": {"Id": 25011}} + ome_conn_mock_diagnostics.job_tracking.return_value = (False, "") + result = self._run_module(ome_default_args) + assert result["msg"] == "Export log job completed successfully." + + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (False, [25011]) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "An export log job is already running. Wait for the job to finish." + + ome_default_args.update({"test_connection": True, "job_wait": False}) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + ome_conn_mock_diagnostics.job_tracking.return_value = (True, "") + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "Unable to access the share. Ensure that the share address, share name, " \ + "share domain, and share credentials provided are correct." + + def test_main_succes_case02(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + ome_default_args.update({"log_type": "supportassist_collection", "share_address": "192.168.0.1", + "share_type": "CIFS", "share_name": "iso", "share_user": "username", + "share_password": "password", "share_domain": "domain", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], + "test_connection": False, "job_wait": True, "device_ids": [25011]}) + mocker.patch(MODULE_PATH + "check_domain_service", return_value=None) + mocker.patch(MODULE_PATH + "device_validation", return_value=[25011]) + mocker.patch(MODULE_PATH + "find_failed_jobs", return_value=("", False)) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + mocker.patch(MODULE_PATH + "extract_log_operation") + ome_response_mock.json_data = {"value": {"Id": 25011}} + ome_conn_mock_diagnostics.job_tracking.return_value = (False, "") + result = self._run_module(ome_default_args) + assert result["msg"] == "Export log job completed successfully." + + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (False, [25011]) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "An export log job is already running. Wait for the job to finish." + + ome_default_args.update({"test_connection": True, "job_wait": False}) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + ome_conn_mock_diagnostics.job_tracking.return_value = (True, "") + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "Unable to access the share. Ensure that the share address, share name, " \ + "share domain, and share credentials provided are correct." + + def test_main_succes_case03(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + ome_default_args.update({"log_type": "application", "share_address": "192.168.0.1", + "share_type": "NFS", "share_name": "iso", "mask_sensitive_info": "true", + "test_connection": True, "job_wait": True, "device_ids": [25011]}) + mocker.patch(MODULE_PATH + "check_domain_service", return_value=None) + mocker.patch(MODULE_PATH + "device_validation", return_value=[25011]) + mocker.patch(MODULE_PATH + "find_failed_jobs", return_value=("", False)) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + mocker.patch(MODULE_PATH + "extract_log_operation") + ome_response_mock.json_data = {"value": {"Id": 25011}} + ome_conn_mock_diagnostics.job_tracking.return_value = (False, "") + result = self._run_module(ome_default_args) + assert result["msg"] == "Export log job completed successfully." + + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (False, [25011]) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "An export log job is already running. Wait for the job to finish." + + ome_default_args.update({"test_connection": True, "job_wait": False}) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + ome_conn_mock_diagnostics.job_tracking.return_value = (True, "") + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "Unable to access the share. Ensure that the share address, share name, " \ + "share domain, and share credentials provided are correct." + + def test_main_succes_case04(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + ome_default_args.update({"log_type": "supportassist_collection", "share_address": "192.168.0.1", + "share_type": "CIFS", "share_name": "iso", "share_user": "username", + "share_password": "password", "share_domain": "domain", + "mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], + "test_connection": False, "job_wait": True, "device_group_name": "Servers"}) + mocker.patch(MODULE_PATH + "check_domain_service", return_value=None) + mocker.patch(MODULE_PATH + "group_validation", return_value=[25011]) + mocker.patch(MODULE_PATH + "find_failed_jobs", return_value=("", False)) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + mocker.patch(MODULE_PATH + "extract_log_operation") + ome_response_mock.json_data = {"value": {"Id": 25011}} + ome_conn_mock_diagnostics.job_tracking.return_value = (False, "") + result = self._run_module(ome_default_args) + assert result["msg"] == "Export log job completed successfully." + + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (False, [25011]) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "An export log job is already running. Wait for the job to finish." + + ome_default_args.update({"test_connection": True, "job_wait": False}) + ome_conn_mock_diagnostics.check_existing_job_state.return_value = (True, [25011]) + ome_conn_mock_diagnostics.job_tracking.return_value = (True, "") + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "Unable to access the share. Ensure that the share address, share name, " \ + "share domain, and share credentials provided are correct." + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_diagnostics_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_conn_mock_diagnostics, ome_response_mock): + ome_default_args.update({"log_type": "application", "share_address": "192.168.0.1", + "share_type": "NFS", "mask_sensitive_info": False}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error")) + result = self._run_module_with_fail_json(ome_default_args) + assert result["failed"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_domain_service', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + + def test_find_failed_jobs(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker): + ome_response_mock.json_data = { + "Id": 25011, + "value": [{"Id": 25013, "Value": "Job status for JID_255809594125 is Completed with Errors."}] + } + result = self.module.find_failed_jobs({"Id": 25012}, ome_conn_mock_diagnostics) + assert result[0] == "Export log job completed with errors." + assert result[1] is False + + ome_response_mock.json_data = { + "Id": 25011, + "value": [] + } + result = self.module.find_failed_jobs({"Id": 25012}, ome_conn_mock_diagnostics) + assert result[0] == "Export log job completed with errors." + assert result[1] is False + + ome_response_mock.json_data = { + "Id": 25011, + "value": [{"Id": 25013, "Value": "Job status for JID_255809594125 is Completed."}] + } + result = self.module.find_failed_jobs({"Id": 25012}, ome_conn_mock_diagnostics) + print(result) + assert result[0] == "Export log job completed with errors." + assert result[1] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py new file mode 100644 index 00000000..e84e7c7e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py @@ -0,0 +1,460 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.3.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_discovery +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_discovery.' +NO_CHANGES_MSG = "No changes found to be applied." +DISC_JOB_RUNNING = "Discovery job '{name}' with ID {id} is running. Please retry after job completion." +DISC_DEL_JOBS_SUCCESS = "Successfully deleted {n} discovery job(s)." +MULTI_DISCOVERY = "Multiple discoveries present. Run the job again using a specific ID." +DISCOVERY_SCHEDULED = "Successfully scheduled the Discovery job." +DISCOVER_JOB_COMPLETE = "Successfully completed the Discovery job." +JOB_TRACK_SUCCESS = "Discovery job has {0}." +JOB_TRACK_FAIL = "No devices discovered, job is in {0} state." +JOB_TRACK_UNABLE = "Unable to track discovery job status of {0}." +JOB_TRACK_INCOMPLETE = "Discovery job {0} incomplete after polling {1} times." +INVALID_DEVICES = "Invalid device types found - {0}." + + +@pytest.fixture +def ome_connection_mock_for_discovery(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeDiscovery(FakeAnsibleModule): + module = ome_discovery + + @pytest.mark.parametrize("params", [{"mparams": {"state": "absent", "discovery_job_name": "my_discovery1"}, + "discov_list": [{"DiscoveryConfigGroupId": 12, + "DiscoveryConfigGroupName": "my_discovery1"}], + "job_state_dict": {12: 2010}, "res": DISC_DEL_JOBS_SUCCESS.format(n=1), + "json_data": 1, "success": True}, + {"mparams": {"state": "absent", "discovery_job_name": "my_discovery1"}, + "discov_list": [{"DiscoveryConfigGroupId": 12, + "DiscoveryConfigGroupName": "my_discovery1"}], + "job_state_dict": {12: 2050}, + "res": DISC_JOB_RUNNING.format(name='my_discovery1', id=12), "json_data": 1, + "success": True}]) + def test_delete_discovery(self, mocker, params, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_discovery_states', return_value=params["job_state_dict"]) + f_module = self.get_module_mock(params=params["mparams"]) + error_message = params["res"] + with pytest.raises(Exception) as err: + self.module.delete_discovery(f_module, ome_connection_mock_for_discovery, params['discov_list']) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [{"mparams": {"state": "absent", "discovery_job_name": "my_discovery1"}, + "res": [{"DiscoveryConfigGroupId": 12, + "DiscoveryConfigGroupName": "my_discovery1"}], + "json_data": {"value": [{"DiscoveryConfigGroupId": 12, + "DiscoveryConfigGroupName": "my_discovery1"}]}, + "success": True}, + {"mparams": {"state": "absent", "discovery_id": 12}, "res": [ + {"DiscoveryConfigGroupId": 12, + "DiscoveryConfigGroupName": "my_discovery1"}], + "json_data": {"value": [{"DiscoveryConfigGroupId": 11, + "DiscoveryConfigGroupName": "my_discovery2"}, + {"DiscoveryConfigGroupId": 12, + "DiscoveryConfigGroupName": "my_discovery1"}]}, + "success": True}]) + def test_check_existing_discovery(self, mocker, params, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + res = self.module.check_existing_discovery(f_module, ome_connection_mock_for_discovery) + assert res == params["res"] + + @pytest.mark.parametrize("params", [ + {"res": {12: 2020}, "json_data": {"value": [{"DiscoveryConfigGroupId": 12, "JobStatusId": 2020}]}, + "success": True}]) + def test_get_discovery_states(self, params, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + res = self.module.get_discovery_states(ome_connection_mock_for_discovery) + assert res == params["res"] + + @pytest.mark.parametrize("params", [{"mparams": {"schedule": 'RunNow'}, + 'schedule_payload': {"RunNow": True, "RunLater": False, 'Cron': "startnow"}}, + {"mparams": {"schedule": 'RunLater', 'cron': "1 2 3 4 5 *"}, + 'schedule_payload': {"RunNow": False, "RunLater": True, + 'Cron': "1 2 3 4 5 *"}}, ]) + def test_get_schedule(self, params): + f_module = self.get_module_mock(params=params["mparams"]) + res = self.module.get_schedule(f_module) + assert res == params['schedule_payload'] + + @pytest.mark.parametrize("params", [{"json_data": { + "value": [{"ProtocolName": "SNMP", "DeviceTypeId": 1000, "DeviceTypeName": "SERVER"}, + {"ProtocolName": "SNMP", "DeviceTypeId": 5000, "DeviceTypeName": "DELL STORAGE"}, + {"ProtocolName": "SNMP", "DeviceTypeId": 7000, "DeviceTypeName": "NETWORK SWITCH"}, + {"ProtocolName": "WSMAN", "DeviceTypeId": 1000, "DeviceTypeName": "SERVER"}, + {"ProtocolName": "WSMAN", "DeviceTypeId": 2000, "DeviceTypeName": "CHASSIS"}, + {"ProtocolName": "REDFISH", "DeviceTypeId": 1000, "DeviceTypeName": "SERVER"}, + {"ProtocolName": "REDFISH", "DeviceTypeId": 2000, "DeviceTypeName": "CHASSIS", }, + {"ProtocolName": "IPMI", "DeviceTypeId": 1000, "DeviceTypeName": "SERVER"}, + {"ProtocolName": "SSH", "DeviceTypeId": 1000, "DeviceTypeName": "SERVER"}, + {"ProtocolName": "VMWARE", "DeviceTypeId": 1000, "DeviceTypeName": "SERVER"}, + {"ProtocolName": "STORAGE", "DeviceTypeId": 5000, "DeviceTypeName": "DELL STORAGE"}]}, + "dev_id_map": {"CHASSIS": 2000, "DELL STORAGE": 5000, "NETWORK SWITCH": 7000, "SERVER": 1000, "STORAGE": 5000}, + "proto_dev_map": {"CHASSIS": ["WSMAN", "REDFISH"], "DELL STORAGE": ["SNMP", "STORAGE"], + "NETWORK SWITCH": ["SNMP"], + "STORAGE": ["SNMP", "STORAGE"], + "SERVER": ["SNMP", "WSMAN", "REDFISH", "IPMI", "SSH", "VMWARE"]}}]) + def test_get_protocol_device_map(self, params, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + prot_dev_map, dev_id_map = self.module.get_protocol_device_map(ome_connection_mock_for_discovery) + assert prot_dev_map == params['proto_dev_map'] + assert dev_id_map == params['dev_id_map'] + + @pytest.mark.parametrize("params", [{ + "mparams": {"discovery_job_name": 'd1', 'trap_destination': True, 'community_string': True, + 'email_recipient': 'abc@email.com', 'description': "d1_desc"}, + 'other_dict': {"DiscoveryConfigGroupName": 'd1', "TrapDestination": True, 'CommunityString': True, + 'DiscoveryStatusEmailRecipient': 'abc@email.com'}}]) + def test_get_other_discovery_payload(self, params): + f_module = self.get_module_mock(params=params["mparams"]) + res = self.module.get_other_discovery_payload(f_module) + assert res == params['other_dict'] + + @pytest.mark.parametrize("params", [{"json_data": {"value": [{"Id": 1, "StartTime": "2021-04-19 04:54:18.427"}, + {"Id": 2, "StartTime": "2021-04-19 04:55:18.427"}]}, + "ips": {"Failed": ["192.168.1.2"], "Completed": ["192.168.1.3"]}, + "pag_ret_val": { + "value": [{"Key": "192.168.1.2", "JobStatus": {"Name": "Failed"}}, + {"Key": "192.168.1.3", "JobStatus": {"Name": "Completed"}}]}}]) + def test_get_execution_details(self, params, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + ome_connection_mock_for_discovery.get_all_items_with_pagination.return_value = params['pag_ret_val'] + f_module = self.get_module_mock() + ips = self.module.get_execution_details(f_module, ome_connection_mock_for_discovery, 1) + assert ips == params['ips'] + + @pytest.mark.parametrize("params", [{"json_data": {'JobStatusId': 2060}, 'job_wait_sec': 60, 'job_failed': False, + "msg": JOB_TRACK_SUCCESS.format('completed successfully')}, + {"json_data": {'JobStatusId': 2070}, 'job_wait_sec': 60, 'job_failed': True, + "msg": JOB_TRACK_FAIL.format('Failed')}, + {"json_data": {'JobStatusId': 2050}, 'job_wait_sec': 60, 'job_failed': True, + "msg": JOB_TRACK_INCOMPLETE.format(1, 2)}, ]) + def test_discovery_job_tracking(self, params, mocker, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + job_failed, msg = self.module.discovery_job_tracking(ome_connection_mock_for_discovery, 1, + params['job_wait_sec']) + assert job_failed == params['job_failed'] + assert msg == params['msg'] + + @pytest.mark.parametrize("params", [{"discovery_json": {'DiscoveryConfigTaskParam': [{'TaskId': 12}]}, + 'job_id': 12, "json_data": {"value": [{"Id": 1}, {"Id": 2}]}}, + {"discovery_json": {'DiscoveryConfigGroupId': 123, + 'DiscoveryConfigTaskParam': [{'TaskId': 12}, + {'TaskId': 23}]}, + 'job_id': 12, "json_data": {"value": [{'DiscoveryConfigGroupId': 234, + "JobId": 2}, + {'DiscoveryConfigGroupId': 123, + "JobId": 12}, ]}}]) + def test_get_job_data(self, params, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + job_id = self.module.get_job_data(params['discovery_json'], ome_connection_mock_for_discovery) + assert job_id == params['job_id'] + + @pytest.mark.parametrize("params", [{"disc_config": { + "ipmi": {"kgkey": None, "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "retries": 3, "timeout": 60, + "username": "root"}, + "wsman": {"password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "retries": 3, "timeout": 60, "username": "root"}}, + 'conn_profile': {"credentials": [{"authType": "Basic", "credentials": {"kgkey": None, + "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "retries": 3, "timeout": 60, + "username": "root"}, "modified": False, + "type": "IPMI"}], "profileDescription": "", "profileId": 0, "profileName": "", + "type": "DISCOVERY"}}]) + def test_get_connection_profile(self, params): + conn_profile = self.module.get_connection_profile(params['disc_config']) + assert conn_profile['type'] == params['conn_profile']['type'] + + @pytest.mark.parametrize("params", [{"disc_cfg_list": [{ + "ConnectionProfile": "{\"profileDescription\": \"\", \"profileId\": 0, \"type\": \"DISCOVERY\", \"credentials\"" + ": [{\"credentials\": {\"retries\": 3, \"community\": \"public\", \"timeout\": 3, \"port\"" + ": 161}, \"authType\": \"Basic\", \"type\": \"SNMP\", \"modified\": False}], " + "\"profileName\": \"\"}", "DeviceType": [1000], + "DiscoveryConfigTargets": [{"NetworkAddressDetail": "196.168.24.17"}]}], + "get_conn_json": {"profileId": 0, "profileName": "", "profileDescription": "", "type": "DISCOVERY", + 'credentials': [{'authType': 'Basic', + 'credentials': {'community': 'public', 'port': 161, 'retries': 3, + 'timeout': 3}, 'id': 116, 'modified': False, + 'type': 'SNMP'}]}, "DeviceType": [1000], + "DiscoveryConfigTargets": [{"NetworkAddressDetail": "196.168.24.17"}], 'mparams': {'discovery_config_targets': [ + {"device_types": ["SERVER"], "network_address_detail": ["196.168.24.17"], + "snmp": {"community": "public", "port": 161, "retries": 3, "timeout": 3}}]}}]) + def test_get_discovery_config(self, params, mocker, ome_connection_mock_for_discovery, ): + dev_id_map = {"CHASSIS": 2000, "DELL STORAGE": 5000, "NETWORK SWITCH": 7000, "SERVER": 1000, "STORAGE": 5000} + proto_dev_map = {"CHASSIS": ["WSMAN", "REDFISH"], "DELL STORAGE": ["SNMP", "STORAGE"], + "NETWORK SWITCH": ["SNMP"], "SERVER": ["SNMP", "WSMAN", "REDFISH", "IPMI", "SSH", "VMWARE"]} + f_module = self.get_module_mock(params=params['mparams']) + mocker.patch(MODULE_PATH + 'get_protocol_device_map', return_value=(proto_dev_map, dev_id_map)) + mocker.patch(MODULE_PATH + 'get_connection_profile', return_value=params['get_conn_json']) + disc_cfg_list = self.module.get_discovery_config(f_module, ome_connection_mock_for_discovery) + assert disc_cfg_list[0]['DeviceType'] == params['DeviceType'] + assert disc_cfg_list[0]['DiscoveryConfigTargets'] == params[ + 'DiscoveryConfigTargets'] # assert disc_cfg_list == params['disc_cfg_list'] + + @pytest.mark.parametrize("params", [{"json_data": {"@odata.type": "#DiscoveryConfigService.DiscoveryJob", + "@odata.id": "/api/DiscoveryConfigService/Jobs(12617)", + "JobId": 12617, "JobName": "D1", "JobSchedule": "startnow", + "DiscoveryConfigExpectedDeviceCount": 713, + "DiscoveryConfigDiscoveredDeviceCount": 0, + "DiscoveryConfigEmailRecipient": "jag@dell.com", }, + "djob": {"JobId": 12617, "JobName": "D1", "JobSchedule": "startnow", + "DiscoveryConfigExpectedDeviceCount": 713, + "DiscoveryConfigDiscoveredDeviceCount": 0, + "DiscoveryConfigEmailRecipient": "jag@dell.com", }}]) + def test_get_discovery_job(self, params, ome_connection_mock_for_discovery, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + djob = self.module.get_discovery_job(ome_connection_mock_for_discovery, 12) + assert djob == params['djob'] + + @pytest.mark.parametrize("params", [ + {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': False, 'job_message': DISCOVER_JOB_COMPLETE, + 'mparams': {'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}}, + {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': True, 'job_message': JOB_TRACK_FAIL, + 'mparams': {'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}}, + {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': True, 'job_message': DISCOVERY_SCHEDULED, + 'mparams': {'job_wait': False, 'schedule': 'RunLater', 'job_wait_timeout': 1000}}]) + def test_create_discovery(self, params, mocker, ome_connection_mock_for_discovery, ome_response_mock): + mocker.patch(MODULE_PATH + 'get_discovery_config', return_value={}) + mocker.patch(MODULE_PATH + 'get_schedule', return_value={}) + mocker.patch(MODULE_PATH + 'get_other_discovery_payload', return_value={}) + mocker.patch(MODULE_PATH + 'get_job_data', return_value=12) + mocker.patch(MODULE_PATH + 'get_execution_details', return_value={}) + mocker.patch(MODULE_PATH + 'get_discovery_job', return_value={}) + mocker.patch(MODULE_PATH + 'discovery_job_tracking', return_value=(params['job_failed'], params['job_message'])) + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params['mparams']) + error_message = params["job_message"] + with pytest.raises(Exception) as err: + self.module.create_discovery(f_module, ome_connection_mock_for_discovery) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_discovery_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_discovery, ome_response_mock): + ome_default_args.update({"state": "absent", "discovery_job_name": "t1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_existing_discovery', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_existing_discovery', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_existing_discovery', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + + @pytest.mark.parametrize( + "params", [{"json_data": {"DiscoveryConfigGroupName": 'd1'}, + 'job_failed': False, 'job_message': DISCOVER_JOB_COMPLETE, + 'mparams': {'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}}, + {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': True, + 'job_message': JOB_TRACK_FAIL, + 'mparams': {'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}}, + {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': True, + 'job_message': DISCOVERY_SCHEDULED, + 'mparams': {'job_wait': False, 'schedule': 'RunLater', 'job_wait_timeout': 1000}}]) + def test_modify_discovery(self, params, mocker, ome_connection_mock_for_discovery, ome_response_mock): + discov_list = [{"DiscoveryConfigGroupId": 12, "DiscoveryConfigGroupName": "my_discovery1"}] + f_module = self.get_module_mock(params=params['mparams']) + mocker.patch(MODULE_PATH + 'get_other_discovery_payload', return_value={"DiscoveryConfigGroupId": 10}) + mocker.patch(MODULE_PATH + 'update_modify_payload', return_value=None) + mocker.patch(MODULE_PATH + 'get_job_data', return_value=12) + mocker.patch(MODULE_PATH + 'get_execution_details', return_value={}) + mocker.patch(MODULE_PATH + 'get_discovery_job', return_value={}) + mocker.patch(MODULE_PATH + 'get_discovery_config', return_value={}) + mocker.patch(MODULE_PATH + 'get_discovery_states', return_value={12: 15}) + mocker.patch(MODULE_PATH + 'discovery_job_tracking', return_value=(params['job_failed'], params['job_message'])) + error_message = params["job_message"] + with pytest.raises(Exception) as err: + self.module.modify_discovery(f_module, ome_connection_mock_for_discovery, discov_list) + assert err.value.args[0] == error_message + + def test_modify_discovery_failure_case01(self, ome_connection_mock_for_discovery): + multi_disc_msg = MULTI_DISCOVERY + f_module = self.get_module_mock(params={'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}) + with pytest.raises(Exception) as err: + self.module.modify_discovery(f_module, ome_connection_mock_for_discovery, + [{"DiscoveryConfigGroupId": 1, "DiscoveryConfigGroupName": "my_discovery1"}, + {"DiscoveryConfigGroupId": 2, "DiscoveryConfigGroupName": "my_discovery2"}]) + assert err.value.args[0] == multi_disc_msg + + def test_modify_discovery_failure_case2(self, mocker, ome_connection_mock_for_discovery): + f_module = self.get_module_mock(params={'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}) + job_run_msg = DISC_JOB_RUNNING.format(name='my_discovery1', id=12) + mocker.patch(MODULE_PATH + 'get_discovery_states', return_value={12: 2050}) + with pytest.raises(Exception) as err: + self.module.modify_discovery(f_module, ome_connection_mock_for_discovery, [ + {"DiscoveryConfigGroupId": 12, "DiscoveryConfigGroupName": "my_discovery1"}]) + assert err.value.args[0] == job_run_msg + + def test_update_modify_payload(self): + current_payload = { + "DiscoveryConfigGroupId": 21, + "DiscoveryConfigGroupName": "Discoverystorage", + "DiscoveryStatusEmailRecipient": None, + "DiscoveryConfigModels": [ + { + "DiscoveryConfigId": 41, + "DiscoveryConfigStatus": None, + "DiscoveryConfigTargets": [ + { + "DiscoveryConfigTargetId": 41, + "NetworkAddressDetail": "mock_network_address", + "SubnetMask": None, + "AddressType": 1, + "Disabled": False, + "Exclude": False + } + ], + "ConnectionProfileId": 21341, + "ConnectionProfile": "{\n \"profileId\" : 21341,\n \"profileName\" : \"\"," + "\n \"profileDescription\" : \"\",\n \"type\" : \"DISCOVERY\"," + "\n \"updatedBy\" : null,\n \"updateTime\" : 1617952521213," + "\n \"credentials\" : [ {\n \"type\" : \"STORAGE\",\n \"authType\" : " + "\"Basic\",\n \"modified\" : false,\n \"id\" : 44," + "\n \"credentials\" : {\n \"username\" : \"root\"," + "\n \"password\" : null,\n \"domain\" : null,\n \"caCheck\" : " + "false,\n \"cnCheck\" : false,\n \"certificateData\" : null," + "\n \"certificateDetail\" : null,\n \"port\" : 443," + "\n \"retries\" : 3,\n \"timeout\" : 60,\n \"isHttp\" : " + "false,\n \"keepAlive\" : true,\n \"version\" : null\n }\n } " + "]\n}", + "DeviceType": [ + 5000 + ] + } + ], + "Schedule": { + "RunNow": False, + "RunLater": False, + "Recurring": None, + "Cron": "startnow", + "StartTime": None, + "EndTime": None + }, + "TrapDestination": False, + "CommunityString": False, + "UseAllProfiles": False, + "CreateGroup": True + } + discovery_modify_payload = { + "DiscoveryConfigGroupName": "name1" + } + self.module.update_modify_payload(discovery_modify_payload, current_payload, new_name="name2") + assert discovery_modify_payload["DiscoveryConfigGroupName"] == "name2" + assert discovery_modify_payload["Schedule"]["RunNow"] is True + assert discovery_modify_payload["Schedule"]["RunLater"] is False + assert discovery_modify_payload["Schedule"]["Cron"] == "startnow" + + def test_update_modify_payload_case2(self): + current_payload = { + "DiscoveryConfigGroupId": 21, + "DiscoveryConfigGroupName": "Discoverystorage", + "DiscoveryStatusEmailRecipient": None, + "DiscoveryConfigModels": [ + { + "DiscoveryConfigId": 41, + "DiscoveryConfigStatus": None, + "DiscoveryConfigTargets": [ + { + "DiscoveryConfigTargetId": 41, + "NetworkAddressDetail": "mock_network_address", + "SubnetMask": None, + "AddressType": 1, + "Disabled": False, + "Exclude": False + } + ], + "ConnectionProfileId": 21341, + "ConnectionProfile": "{\n \"profileId\" : 21341,\n \"profileName\" : \"\"," + "\n \"profileDescription\" : \"\",\n \"type\" : \"DISCOVERY\"," + "\n \"updatedBy\" : null,\n \"updateTime\" : 1617952521213," + "\n \"credentials\" : [ {\n \"type\" : \"STORAGE\",\n \"authType\" : " + "\"Basic\",\n \"modified\" : false,\n \"id\" : 44," + "\n \"credentials\" : {\n \"username\" : \"root\"," + "\n \"password\" : null,\n \"domain\" : null,\n \"caCheck\" : " + "false,\n \"cnCheck\" : false,\n \"certificateData\" : null," + "\n \"certificateDetail\" : null,\n \"port\" : 443," + "\n \"retries\" : 3,\n \"timeout\" : 60,\n \"isHttp\" : " + "false,\n \"keepAlive\" : true,\n \"version\" : null\n }\n } " + "]\n}", + "DeviceType": [ + 5000 + ] + } + ], + "Schedule": { + "RunNow": False, + "RunLater": False, + "Recurring": None, + "Cron": "startnow", + "StartTime": None, + "EndTime": None + }, + "TrapDestination": False, + "CommunityString": False, + "UseAllProfiles": False, + "CreateGroup": True + } + discovery_modify_payload = { + "DiscoveryConfigGroupName": "name1", + "TrapDestination": True, + "CommunityString": True, + "Schedule": { + "Cron": "startlater", + "RunNow": False, + + } + } + self.module.update_modify_payload(discovery_modify_payload, current_payload) + assert discovery_modify_payload["DiscoveryConfigGroupName"] == "name1" + assert discovery_modify_payload["TrapDestination"] is True + assert discovery_modify_payload["CommunityString"] is True + assert discovery_modify_payload["Schedule"]["Cron"] == "startlater" + assert discovery_modify_payload["Schedule"]["RunNow"] is False diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py new file mode 100644 index 00000000..c931ed82 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.0.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_domain_user_groups +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_domain_user_groups.' +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." + + +@pytest.fixture +def ome_conn_mock_ad(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEADUser(FakeAnsibleModule): + + module = ome_domain_user_groups + + def test_get_directory_user(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"state": "absent", "group_name": "Administrator"}) + ome_response_mock.json_data = {"value": [{"UserName": "Administrator", "RoleId": "10", "UserTypeId": 2}]} + result = self.module.get_directory_user(f_module, ome_conn_mock_ad) + assert result["UserName"] == "Administrator" + + f_module = self.get_module_mock(params={"state": "absent"}) + ome_response_mock.json_data = {"value": [{"UserName": "Administrator", "RoleId": "10", "UserTypeId": 2}]} + with pytest.raises(Exception) as err: + self.module.get_directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "missing required arguments: group_name" + + f_module = self.get_module_mock(params={"state": "absent", "group_name": "Administrator"}) + f_module.check_mode = True + ome_response_mock.json_data = {"value": [{"UserName": "Administrator", "RoleId": "10", "UserTypeId": 2}]} + with pytest.raises(Exception) as err: + self.module.get_directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "Changes found to be applied." + + f_module = self.get_module_mock(params={"state": "absent", "group_name": "Administrator"}) + f_module.check_mode = True + ome_response_mock.json_data = {"value": []} + with pytest.raises(Exception) as err: + self.module.get_directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "No changes found to be applied." + + f_module = self.get_module_mock(params={"state": "absent", "group_name": "Administrator"}) + ome_response_mock.json_data = {"value": []} + with pytest.raises(Exception) as err: + self.module.get_directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == NO_CHANGES_MSG + + def test_delete_directory_user(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker): + ome_response_mock.status_code = 204 + msg, changed = self.module.delete_directory_user(ome_conn_mock_ad, 15011) + assert msg == "Successfully deleted the active directory user group." + assert changed is True + + def test_get_role(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"state": "present", "group_name": "Administrator", + "role": "Administrator"}) + ome_response_mock.json_data = {"value": [{"Name": "ADMINISTRATOR", "Id": 10}]} + result = self.module.get_role(f_module, ome_conn_mock_ad) + assert result == 10 + + f_module = self.get_module_mock(params={"state": "present", "group_name": "Administrator", + "role": "Administrator"}) + ome_response_mock.json_data = {"value": [{"Name": "ADMIN", "Id": 10}]} + with pytest.raises(Exception) as err: + self.module.get_role(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "Unable to complete the operation because the entered " \ + "role name 'Administrator' does not exist." + + f_module = self.get_module_mock(params={"state": "present", "group_name": "Administrator"}) + ome_response_mock.json_data = {"value": [{"Name": "ADMIN", "Id": 10}]} + with pytest.raises(Exception) as err: + self.module.get_role(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "missing required arguments: role" + + def test_search_directory(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"state": "present", "group_name": "Administrator", + "domain_username": "admin@dev0", "domain_password": "password"}) + ome_response_mock.json_data = [{"CommonName": "Administrator", "ObjectGuid": "object_id"}] + obj_id, name = self.module.search_directory(f_module, ome_conn_mock_ad, 16011) + assert obj_id == "object_id" + + f_module = self.get_module_mock(params={"state": "present", "group_name": "Admin", + "domain_username": "admin@dev0", "domain_password": "password"}) + with pytest.raises(Exception) as err: + self.module.search_directory(f_module, ome_conn_mock_ad, 16011) + assert err.value.args[0] == "Unable to complete the operation because the entered " \ + "group name 'Admin' does not exist." + + def test_get_directory(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock() + with pytest.raises(Exception) as err: + self.module.get_directory(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "missing required arguments: directory_name or directory_id" + + f_module = self.get_module_mock(params={"directory_name": "test_directory"}) + ome_response_mock.json_data = {'value': [{"Name": "test_directory", "Id": 1}]} + result = self.module.get_directory(f_module, ome_conn_mock_ad) + assert result == 1 + + f_module = self.get_module_mock(params={"directory_id": 2}) + ome_response_mock.json_data = {'value': [{"Name": "test_directory", "Id": 2}]} + result = self.module.get_directory(f_module, ome_conn_mock_ad) + assert result == 2 + + f_module = self.get_module_mock(params={"directory_id": 3}) + with pytest.raises(Exception) as err: + self.module.get_directory(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "Unable to complete the operation because the entered " \ + "directory id '3' does not exist." + + def test_directory_user(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"group_name": "Administrator", "role": "administrator"}) + mocker.patch(MODULE_PATH + "get_directory_user", return_value={"UserName": "Administrator", "Id": 15011, + "RoleId": "10", "Enabled": True}) + mocker.patch(MODULE_PATH + "get_role", return_value=16) + mocker.patch(MODULE_PATH + "get_directory", return_value=10612) + mocker.patch(MODULE_PATH + "search_directory", return_value=("obj_gui_id", "administrator")) + ome_response_mock.json_data = [{"Name": "Account Operators", "Id": "16617", "ObjectGuid": "a491859c"}] + resp, msg = self.module.directory_user(f_module, ome_conn_mock_ad) + assert msg == 'updated' + + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "Changes found to be applied." + + mocker.patch(MODULE_PATH + "get_directory_user", return_value={"UserName": "Administrator", "Id": 15011, + "RoleId": "16", "Enabled": True}) + with pytest.raises(Exception) as err: + self.module.directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "No changes found to be applied." + + f_module.check_mode = False + mocker.patch(MODULE_PATH + "get_directory_user", return_value={"UserName": "Administrator", "Id": 15011, + "RoleId": "16", "Enabled": True}) + with pytest.raises(Exception) as err: + self.module.directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == NO_CHANGES_MSG + + mocker.patch(MODULE_PATH + "get_directory_user", return_value=None) + f_module.check_mode = True + with pytest.raises(Exception) as err: + self.module.directory_user(f_module, ome_conn_mock_ad) + assert err.value.args[0] == "Changes found to be applied." + + f_module.check_mode = False + resp, msg = self.module.directory_user(f_module, ome_conn_mock_ad) + assert msg == "imported" + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_domain_exception(self, exc_type, mocker, ome_default_args, + ome_conn_mock_ad, ome_response_mock): + ome_default_args.update({"state": "absent"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'get_directory_user', side_effect=exc_type("url open error")) + result = self._run_module_with_fail_json(ome_default_args) + assert result["failed"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'get_directory_user', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_directory_user', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py new file mode 100644 index 00000000..082b8293 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py @@ -0,0 +1,554 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from mock import patch, mock_open + +import pytest +import json +import sys +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_firmware +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' +NO_CHANGES_MSG = "No changes found to be applied. Either there are no updates present or components specified are not" \ + " found in the baseline." +COMPLIANCE_READ_FAIL = "Failed to read compliance report." +APPLICABLE_DUP = "Unable to get applicable components DUP." + +device_resource = {"device_path": "DeviceService/Devices"} + + +@pytest.fixture +def ome_connection_firmware_mock(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_firmware.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeFirmware(FakeAnsibleModule): + module = ome_firmware + + @pytest.fixture + def get_dup_file_mock(self): + m = mock_open() + m.return_value.readlines.return_value = ['this is line 1\n'] + + payload = { + "Builtin": False, + "CreatedBy": "admin", + "Editable": True, + "EndTime": None, + "Id": 29099, + "JobDescription": "Firmware Update Task", + "JobName": "Firmware Update Task", + "JobStatus": { + "Id": 2080, + "Name": "New" + }, + "JobType": { + "Id": 5, + "Internal": False, + "Name": "Update_Task" + }, + "LastRun": None, + "LastRunStatus": { + "Id": 2200, + "Name": "NotRun" + }, + "NextRun": None, + "Params": [ + { + "JobId": 29099, + "Key": "operationName", + "Value": "INSTALL_FIRMWARE" + }, + { + "JobId": 29099, + "Key": "complianceUpdate", + "Value": "false" + }, + { + "JobId": 29099, + "Key": "stagingValue", + "Value": "false" + }, + { + "JobId": 29099, + "Key": "signVerify", + "Value": "true" + } + ], + "Schedule": "startnow", + "StartTime": None, + "State": "Enabled", + "Targets": [ + { + "Data": "DCIM:INSTALLED#741__BIOS.Setup.1-1=1577776981156", + "Id": 28628, + "JobId": 29099, + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "UpdatedBy": None, + "Visible": True + } + + @pytest.mark.parametrize("param", [payload]) + def test_spawn_update_job_case(self, param, ome_response_mock, + ome_connection_firmware_mock): + ome_response_mock.status_code = 201 + ome_response_mock.success = True + ome_response_mock.json_data = {"Builtin": False, + "CreatedBy": "admin", + "Editable": True, + "EndTime": None, + "Id": 29099, + "JobDescription": "Firmware Update Task", + "JobName": "Firmware Update Task", + "JobStatus": {"Id": 2080, + "Name": "New"}, + "JobType": {"Id": 5, + "Internal": False, + "Name": "Update_Task"}, + "LastRun": None, + "LastRunStatus": {"Id": 2200, + "Name": "NotRun"}, + "NextRun": None, + "Params": [{"JobId": 29099, + "Key": "operationName", + "Value": "INSTALL_FIRMWARE"}, + {"JobId": 29099, + "Key": "complianceUpdate", + "Value": "false"}, + {"JobId": 29099, + "Key": "stagingValue", + "Value": "false"}, + {"JobId": 29099, + "Key": "signVerify", + "Value": "true"}], + + "Schedule": "startnow", + "StartTime": None, + "State": "Enabled", + "Targets": [{"Data": "DCIM:INSTALLED#741__BIOS.Setup.1-1=1577776981156", + "Id": 28628, + "JobId": 29099, + "TargetType": {"Id": 1000, + "Name": "DEVICE"}}], + "UpdatedBy": None, + "Visible": True} + result = self.module.spawn_update_job(ome_connection_firmware_mock, param) + assert result == param + + payload1 = { + "Id": 0, "JobName": "Firmware Update Task", + "JobDescription": "Firmware Update Task", "Schedule": "startnow", + "State": "Enabled", "CreatedBy": "admin", + "JobType": {"Id": 5, "Name": "Update_Task"}, + "Targets": [{ + "Data": "DCIM:INSTALLED#741__BIOS.Setup.1-1=1577786112600", + "Id": 28628, + "TargetType": { + "Id": 1000, + "Name": "SERVER" + } + }], + "Params": [{"JobId": 0, "Key": "operationName", "Value": "INSTALL_FIRMWARE"}, + {"JobId": 0, "Key": "complianceUpdate", "Value": "false"}, + {"JobId": 0, "Key": "stagingValue", "Value": "false"}, + {"JobId": 0, "Key": "signVerify", "Value": "true"}] + } + target_data = [ + { + "Data": "DCIM:INSTALLED#741__BIOS.Setup.1-1=1577786112600", + "Id": 28628, + "TargetType": { + "Id": 1000, + "Name": "SERVER" + } + } + ] + + @pytest.mark.parametrize("param", [{"inp": target_data, "out": payload1}]) + def _test_job_payload_for_update_success_case(self, + ome_connection_firmware_mock, param): + f_module = self.get_module_mock() + payload = self.module.job_payload_for_update(f_module, + ome_connection_firmware_mock, param["inp"]) + assert payload == param["out"] + + dupdata = [{"DeviceId": 1674, "DeviceReport": {"DeviceTypeId": "1000", "DeviceTypeName": "SERVER"}}, + {"DeviceId": 1662, "DeviceReport": {"DeviceTypeId": "1000", "DeviceTypeName": "SERVER"}}] + + filepayload1 = {'SingleUpdateReportBaseline': [], + 'SingleUpdateReportGroup': [], + 'SingleUpdateReportFileToken': 1577786112600, + 'SingleUpdateReportTargets': [1674, 2222, 3333]} + + @pytest.mark.parametrize("param", [{"inp": filepayload1, "outp": target_data}]) + def test_get_applicable_components_success_case(self, param, ome_default_args, ome_response_mock, + ome_connection_firmware_mock): + ome_response_mock.json_data = [ + { + "DeviceId": 28628, + "DeviceReport": { + "Components": [ + { + "ComponentCriticality": "Recommended", + "ComponentCurrentVersion": "2.4.7", + "ComponentName": "PowerEdge BIOS", + "ComponentRebootRequired": "true", + "ComponentSourceName": "DCIM:INSTALLED#741__BIOS.Setup.1-1", + "ComponentTargetIdentifier": "159", + "ComponentUniqueIdentifier": "72400448-3a22-4da9-bd19-27a0e2082962", + "ComponentUpdateAction": "EQUAL", + "ComponentUriInformation": None, + "ComponentVersion": "2.4.7", + "ImpactAssessment": "", + "IsCompliant": "OK", + "PrerequisiteInfo": "" + } + ], + "DeviceIPAddress": "192.168.0.3", + "DeviceId": "28628", + "DeviceModel": "PowerEdge R940", + "DeviceName": "192.168.0.3", + "DeviceServiceTag": "HC2XFL2", + "DeviceTypeId": "1000", + "DeviceTypeName": "SERVER" + } + } + ] + ome_response_mock.success = True + ome_response_mock.status_code = 200 + f_module = self.get_module_mock() + result = self.module.get_applicable_components(ome_connection_firmware_mock, param["inp"], f_module) + assert result == param["outp"] + + @pytest.mark.parametrize("param", [payload]) + def test_get_applicable_components_failed_case(self, param, ome_default_args, ome_response_mock): + ome_response_mock.json_data = { + "value": [{"DeviceReport": {"DeviceTypeId": "1000", "DeviceTypeName": "SERVER"}, "DeviceId": "Id"}]} + ome_response_mock.status_code = 500 + ome_response_mock.success = False + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + self.module.get_applicable_components(ome_response_mock, param, f_module) + assert exc.value.args[0] == APPLICABLE_DUP + + filepayload = {'SingleUpdateReportBaseline': [], + 'SingleUpdateReportGroup': [], + 'SingleUpdateReportTargets': [], + 'SingleUpdateReportFileToken': '1577786112600'} + + outpayload = {'SingleUpdateReportBaseline': [], + 'SingleUpdateReportGroup': [], + 'SingleUpdateReportTargets': [], + 'SingleUpdateReportFileToken': '1577786112600'} + + @pytest.mark.parametrize( + "duppayload", + [ + {'file_token': '1577786112600', 'device_ids': None, 'group_ids': None, 'baseline_ids': None, + "out": outpayload}, + {'file_token': '1577786112600', 'device_ids': [123], 'group_ids': None, 'baseline_ids': None, + "out": {'SingleUpdateReportBaseline': [], + 'SingleUpdateReportGroup': [], + 'SingleUpdateReportTargets': [123], + 'SingleUpdateReportFileToken': '1577786112600'}}, + {'file_token': '1577786112600', 'device_ids': None, 'group_ids': [123], 'baseline_ids': None, + "out": {'SingleUpdateReportBaseline': [], + 'SingleUpdateReportGroup': [123], + 'SingleUpdateReportTargets': [], + 'SingleUpdateReportFileToken': '1577786112600'}}, + {'file_token': '1577786112600', 'device_ids': None, 'group_ids': None, 'baseline_ids': [123], + "out": {'SingleUpdateReportBaseline': [123], + 'SingleUpdateReportGroup': [], + 'SingleUpdateReportTargets': [], + 'SingleUpdateReportFileToken': '1577786112600'}}]) + def test_get_dup_applicability_payload_success_case(self, duppayload): + data = self.module.get_dup_applicability_payload( + duppayload.get('file_token'), + duppayload.get('device_ids'), duppayload.get('group_ids'), duppayload.get('baseline_ids')) + assert data == duppayload["out"] + + def test_upload_dup_file_success_case01(self, ome_connection_firmware_mock, ome_response_mock): + ome_response_mock.json_data = "1577786112600" + ome_response_mock.success = True + ome_response_mock.status_code = 200 + f_module = self.get_module_mock(params={'dup_file': "/root1/Ansible_EXE/BIOS_87V69_WN64_2.4.7.EXE"}) + if sys.version_info.major == 3: + builtin_module_name = 'builtins' + else: + builtin_module_name = '__builtin__' + with patch("{0}.open".format(builtin_module_name), mock_open(read_data="data")) as mock_file: + result = self.module.upload_dup_file(ome_connection_firmware_mock, f_module) + assert result == (True, "1577786112600") + + def test_upload_dup_file_failure_case02(self, ome_default_args, + ome_connection_firmware_mock, ome_response_mock): + ome_response_mock.json_data = {"value": [{"Id": [1111, 2222, 3333], "DeviceServiceTag": "KLBR222", + "dup_file": "/root/Ansible_EXE/BIOS_87V69_WN64_2.4.7.EXE"}]} + ome_response_mock.status_code = 500 + + if sys.version_info.major == 3: + builtin_module_name = 'builtins' + else: + builtin_module_name = '__builtin__' + f_module = self.get_module_mock( + params={'dup_file': "/root1/Ansible_EXE/BIOS_87V69_WN64_2.4.7.EXE", 'hostname': '192.168.0.1'}) + with patch("{0}.open".format(builtin_module_name), mock_open(read_data="data")) as mock_file: + with pytest.raises(Exception) as exc: + self.module.upload_dup_file(ome_connection_firmware_mock, f_module) + assert exc.value.args[0] == "Unable to upload {0} to {1}".format('/root1/Ansible_EXE/BIOS_87V69_WN64_2.4.7.EXE', + '192.168.0.1') + + def test_get_device_ids_success_case(self, ome_connection_firmware_mock, ome_response_mock, ome_default_args): + ome_default_args.update() + f_module = self.get_module_mock() + ome_connection_firmware_mock.get_all_report_details.return_value = { + "report_list": [{'Id': 1111, 'DeviceServiceTag': "ABC1111"}, + {'Id': 2222, 'DeviceServiceTag': "ABC2222"}, + {'Id': 3333, 'DeviceServiceTag': "ABC3333"}, + {'Id': 4444, 'DeviceServiceTag': "ABC4444"}]} + data, id_tag_map = self.module.get_device_ids(ome_connection_firmware_mock, f_module, [1111, 2222, 3333, "ABC4444"]) + assert data == ['1111', '2222', '3333', '4444'] + + def test_get_device_ids_failure_case01(self, ome_connection_firmware_mock, ome_response_mock): + ome_response_mock.json_data = {'value': [{'Id': 'DeviceServiceTag'}]} + ome_response_mock.success = False + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + self.module.get_device_ids(ome_connection_firmware_mock, f_module, [2222]) + assert exc.value.args[0] == "Unable to complete the operation because the entered target device service" \ + " tag(s) or device id(s) '{0}' are invalid.".format("2222") + + def test__validate_device_attributes_success_case(self, ome_connection_firmware_mock, ome_response_mock, + ome_default_args): + ome_default_args.update({'device_service_tag': ['R9515PT'], 'device_id': [2222]}) + ome_response_mock.status_code = 200 + ome_response_mock.json_data = {'value': [{'device_service_tag': ['R9515PT'], 'device_id': [2222]}]} + ome_response_mock.success = True + f_module = self.get_module_mock(params={'device_service_tag': ['R9515PT'], 'device_id': [2222], + 'devices': [{'id': 1234}, {'service_tag': "ABCD123"}]}) + data = self.module._validate_device_attributes(f_module) + assert "R9515PT" in data + + def test__validate_device_attributes_failed_case(self, ome_connection_firmware_mock, ome_response_mock): + ome_response_mock.json_data = {'value': [{'device_service_tag': None, 'device_id': None}]} + ome_response_mock.success = False + f_module = self.get_module_mock() + # with pytest.raises(Exception) as exc: + devlist = self.module._validate_device_attributes(f_module) + assert devlist == [] + # assert exc.value.args[0] == "Either device_id or device_service_tag or device_group_names" \ + # " or baseline_names should be specified." + + def test_get_group_ids_fail_case(self, ome_default_args, ome_response_mock, ome_connection_firmware_mock): + ome_default_args.update({'device_group_names': ["Servers"], "dup_file": ""}) + ome_response_mock.json_data = [{"Id": 1024, + "Name": "Servers"}] + ome_response_mock.success = False + data = self._run_module_with_fail_json(ome_default_args) + assert data["msg"] == "Unable to complete the operation because the entered target device group name(s)" \ + " '{0}' are invalid.".format(",".join(set(["Servers"]))) + + def test_get_device_component_map(self, ome_connection_firmware_mock, ome_response_mock, + ome_default_args, mocker): + mocker.patch(MODULE_PATH + 'ome_firmware._validate_device_attributes', + return_value=['R9515PT', 2222, 1234, 'ABCD123']) + mocker.patch(MODULE_PATH + 'ome_firmware.get_device_ids', + return_value=([1234, 2222], {'1111': 'R9515PT', '1235': 'ABCD123'})) + output = {'1111': [], '1235': [], '2222': [], 1234: []} + f_module = self.get_module_mock(params={'device_service_tag': ['R9515PT'], 'device_id': [2222], + 'components': [], + 'devices': [{'id': 1234, 'components': []}, + {'service_tag': "ABCD123", 'components': []}]}) + data = self.module.get_device_component_map(ome_connection_firmware_mock, f_module) + assert 2222 in data + + def test_main_firmware_success_case01(self, ome_default_args, mocker, ome_connection_firmware_mock): + ome_default_args.update({"device_id": Constants.device_id1, "device_service_tag": Constants.service_tag1, + "dup_file": ""}) + mocker.patch(MODULE_PATH + 'ome_firmware._validate_device_attributes', + return_value=[Constants.device_id1, Constants.service_tag1]) + mocker.patch(MODULE_PATH + 'ome_firmware.get_device_ids', + return_value=[Constants.device_id1, Constants.device_id2]) + mocker.patch(MODULE_PATH + 'ome_firmware.upload_dup_file', + return_value=["SUCCESS", "token_id"]) + mocker.patch(MODULE_PATH + 'ome_firmware.get_dup_applicability_payload', + return_value={"report_payload": "values"}) + mocker.patch(MODULE_PATH + 'ome_firmware.get_applicable_components', + return_value="target_data") + mocker.patch(MODULE_PATH + 'ome_firmware.job_payload_for_update', + return_value={"job_payload": "values"}) + mocker.patch(MODULE_PATH + 'ome_firmware.spawn_update_job', + return_value="Success") + data = self._run_module(ome_default_args) + assert data['changed'] is True + assert data['msg'] == "Successfully submitted the firmware update job." + assert data['update_status'] == "Success" + + def test_main_firmware_success_case02(self, ome_default_args, mocker, ome_connection_firmware_mock): + ome_default_args.update({"baseline_name": "baseline_name"}) + mocker.patch(MODULE_PATH + 'ome_firmware.validate_inputs') + mocker.patch(MODULE_PATH + 'ome_firmware.get_baseline_ids', + return_value=[1, 2]) + mocker.patch(MODULE_PATH + 'ome_firmware.job_payload_for_update', + return_value={"job_payload": "values"}) + mocker.patch(MODULE_PATH + 'ome_firmware.spawn_update_job', + return_value="Success") + mocker.patch(MODULE_PATH + 'ome_firmware.baseline_based_update', + return_value="target_data") + data = self._run_module(ome_default_args) + assert data['changed'] is True + assert data['msg'] == "Successfully submitted the firmware update job." + assert data['update_status'] == "Success" + + def test_job_payload_for_update_case_01(self, ome_connection_firmware_mock): + """response None case""" + f_module = self.get_module_mock() + target_data = {} + ome_connection_firmware_mock.get_job_type_id.return_value = None + msg = "Unable to fetch the job type Id." + with pytest.raises(Exception, match=msg) as exc: + self.module.job_payload_for_update(ome_connection_firmware_mock, f_module, target_data) + + def test_job_payload_for_update_case_02(self, ome_connection_firmware_mock, ome_response_mock): + """baseline case""" + f_module = self.get_module_mock(params={'schedule': 'RebootNow'}) + target_data = {} + baseline = {"baseline_id": 1, "repo_id": 2, "catalog_id": 3} + ome_connection_firmware_mock.get_job_type_id.return_value = ome_response_mock + payload = self.module.job_payload_for_update(ome_connection_firmware_mock, f_module, target_data, baseline) + for item in payload["Params"]: + if item["Key"] == "complianceReportId": + assert item["Value"] == str(baseline["baseline_id"]) + if item["Key"] == "repositoryId": + assert item["Value"] == str(baseline["repo_id"]) + if item["Key"] == "catalogId": + assert item["Value"] == str(baseline["catalog_id"]) + + def test_job_payload_for_update_case_03(self, ome_connection_firmware_mock, ome_response_mock): + """response None case""" + f_module = self.get_module_mock(params={'schedule': 'RebootNow'}) + target_data = {} + ome_connection_firmware_mock.get_job_type_id.return_value = ome_response_mock + payload = self.module.job_payload_for_update(ome_connection_firmware_mock, f_module, target_data) + for item in payload["Params"]: + if "JobId" in item: + assert item["JobId"] == 0 + assert item["Key"] == "complianceUpdate" + assert item["Value"] == "false" + + def test_get_baseline_ids_case01(self, ome_connection_firmware_mock, ome_response_mock): + response = {"report_list": [{"Name": "baseline_name", "Id": 1, "RepositoryId": 2, "CatalogId": 3}]} + ome_response_mock.json_data = response + ome_connection_firmware_mock.get_all_report_details.return_value = response + f_module = self.get_module_mock(params={'baseline_name': "baseline_name"}) + baseline_detail = self.module.get_baseline_ids(ome_connection_firmware_mock, f_module) + assert baseline_detail["baseline_id"] == response["report_list"][0]["Id"] + assert baseline_detail["repo_id"] == response["report_list"][0]["RepositoryId"] + assert baseline_detail["catalog_id"] == response["report_list"][0]["CatalogId"] + + def test_get_baseline_ids_case02(self, ome_connection_firmware_mock, ome_response_mock): + response = {"report_list": [{"Name": "baseline_name", "Id": 1, "RepositoryId": 2, "CatalogId": 3}]} + ome_response_mock.json_data = response + ome_connection_firmware_mock.get_all_report_details.return_value = response + f_module = self.get_module_mock(params={'baseline_name': "baseline_name2"}) + with pytest.raises(Exception) as exc: + self.module.get_baseline_ids(ome_connection_firmware_mock, f_module) + assert exc.value.args[0] == "Unable to complete the operation because the entered target" \ + " baseline name 'baseline_name2' is invalid." + + def test_get_baseline_ids_case03(self, ome_connection_firmware_mock, ome_response_mock): + """Note: there is error in message format but UT message is updated as per module message""" + response = {"report_list": {}} + ome_response_mock.json_data = response + ome_connection_firmware_mock.get_all_report_details.return_value = response + f_module = self.get_module_mock(params={'baseline_name': "baseline_name2"}) + with pytest.raises(Exception) as exc: + self.module.get_baseline_ids(ome_connection_firmware_mock, f_module) + assert exc.value.args[0] == "Unable to complete the operation because" \ + " the entered target baseline name does not exist." + + def test_baseline_based_update_exception_case_01(self, ome_connection_firmware_mock): + ome_connection_firmware_mock.get_all_report_details.return_value = {"report_list": []} + f_module = self.get_module_mock() + dev_comp_map = {} + with pytest.raises(Exception) as exc: + self.module.baseline_based_update(ome_connection_firmware_mock, f_module, {"baseline_id": 1}, dev_comp_map) + assert exc.value.args[0] == COMPLIANCE_READ_FAIL + + def test_baseline_based_update_case_02(self, ome_connection_firmware_mock): + f_module = self.get_module_mock(params={'baseline_id': 1}) + response = {"report_list": [ + {"DeviceId": "1111", "DeviceTypeId": 2000, "DeviceName": "MX-111", "DeviceTypeName": "CHASSIS", + "ComponentComplianceReports": [{"UpdateAction": "UPGRADE", "SourceName": "SAS.xx.x2"}]}]} + ome_connection_firmware_mock.get_all_report_details.return_value = response + dev_comp_map = {} + compliance_report_list = self.module.baseline_based_update(ome_connection_firmware_mock, f_module, + {"baseline_id": 1}, dev_comp_map) + assert compliance_report_list == [ + {'Id': "1111", 'Data': 'SAS.xx.x2', 'TargetType': {'Id': 2000, 'Name': 'CHASSIS'}}] + + def test_baseline_based_update_case_03(self, ome_connection_firmware_mock): + f_module = self.get_module_mock(params={'baseline_id': 1}) + response = {"report_list": [ + {"DeviceId": 1111, "DeviceTypeId": 2000, "DeviceName": "MX-111", "DeviceTypeName": "CHASSIS", + "ComponentComplianceReports": []}]} + ome_connection_firmware_mock.get_all_report_details.return_value = response + dev_comp_map = {} + with pytest.raises(Exception, match=NO_CHANGES_MSG) as exc: + self.module.baseline_based_update(ome_connection_firmware_mock, f_module, {"baseline_id": 1}, dev_comp_map) + + def test_validate_inputs(self): + f_module = self.get_module_mock(params={"dup_file": "/path/file.exe"}) + msg = "Parameter 'dup_file' to be provided along with 'device_id'|'device_service_tag'|'device_group_names'" + with pytest.raises(Exception) as exc: + self.module.validate_inputs(f_module) + assert exc.value.args[0] == msg + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLValidationError, TypeError, ConnectionError, HTTPError, URLError]) + def test_firmware_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_connection_firmware_mock, ome_response_mock): + ome_default_args.update( + {"device_id": Constants.device_id1, "device_service_tag": Constants.service_tag1, "dup_file": "duppath"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'ome_firmware._validate_device_attributes', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'ome_firmware._validate_device_attributes', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'ome_firmware._validate_device_attributes', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py new file mode 100644 index 00000000..8af8d676 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py @@ -0,0 +1,554 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_firmware_baseline +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +BASELINE_JOB_RUNNING = "Firmware baseline '{name}' with ID {id} is running. Please retry after job completion." +MULTI_BASLEINES = "Multiple baselines present. Run the module again using a specific ID." +BASELINE_DEL_SUCCESS = "Successfully deleted the firmware baseline." +NO_CHANGES_MSG = "No changes found to be applied." +INVALID_BASELINE_ID = "Invalid baseline ID provided." +BASELINE_TRIGGERED = "Successfully triggered the firmware baseline task." +NO_CATALOG_MESSAGE = "Catalog name not provided for baseline creation." +NO_TARGETS_MESSAGE = "Targets not specified for baseline creation." +CATALOG_STATUS_MESSAGE = "Unable to create the firmware baseline as the catalog is in {status} status." +BASELINE_UPDATED = "Successfully {op} the firmware baseline." +DISCOVER_JOB_COMPLETE = "Successfully completed the Discovery job." +JOB_TRACK_SUCCESS = "Discovery job has {0}." +JOB_TRACK_FAIL = "No devices discovered, job is in {0} state." +SETTLING_TIME = 3 +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline.' + +payload_out1 = { + "Name": "baseline1", + "Description": "baseline_description", + "CatalogId": 12, + "RepositoryId": 23, + "DowngradeEnabled": True, + "Is64Bit": True, + "Targets": [ + {"Id": 123, + "Type": { + "Id": 1000, + "Name": "DEVICE" + }}] +} +payload_out2 = { + "Name": "baseline1", + "CatalogId": 12, + "RepositoryId": 23, 'Description': None, 'DowngradeEnabled': True, 'Is64Bit': True, + "Targets": [ + {"Id": 123, + "Type": { + "Id": 1000, + "Name": "DEVICE" + }}] +} + +baseline_status1 = { + "CatalogId": 123, + "Description": "BASELINE DESCRIPTION", + "DeviceComplianceReports": [], + "DowngradeEnabled": True, + "Id": 0, + "Is64Bit": True, + "Name": "my_baseline", + "RepositoryId": 123, + "RepositoryName": "catalog123", + "RepositoryType": "HTTP", + "Targets": [ + { + "Id": 10083, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + }, + { + "Id": 10076, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "TaskId": 11235, + "TaskStatusId": 0 +} + + +@pytest.fixture +def ome_connection_mock_for_firmware_baseline(mocker, ome_response_mock): + connection_class_mock = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeFirmwareBaseline(FakeAnsibleModule): + module = ome_firmware_baseline + + @pytest.fixture + def mock__get_catalog_payload(self, mocker): + mock_payload = mocker.patch( + MODULE_PATH + '_get_baseline_payload', + return_value={ + "Name": "baseline_name", + "CatalogId": "cat_id", + "RepositoryId": "repo_id", + "Targets": {} + } + ) + return mock_payload + + catrepo_param1 = "catalog1" + catrepo_out1 = (22, 12) + catrepo_param2 = None + catrepo_out2 = (None, None) + catrepo_param3 = "catalog3" + catrepo_out3 = (None, None) + + @pytest.mark.parametrize("params", [{"inp": catrepo_param1, "out": catrepo_out1}, + {"inp": catrepo_param2, "out": catrepo_out2}, + {"inp": catrepo_param3, "out": catrepo_out3}]) + def test_get_catrepo_ids(self, ome_connection_mock_for_firmware_baseline, + ome_response_mock, params): + ome_connection_mock_for_firmware_baseline.get_all_items_with_pagination.return_value = { + "value": [ + { + "Id": 22, + "Repository": { + "Id": 12, + "Name": "catalog1", + }, + "Status": "Completed" + }, + { + "Id": 23, + "Repository": { + "Id": 12, + "Name": "catalog2", + }, + "Status": "Completed" + } + ] + } + f_module = self.get_module_mock(params=params["inp"]) + catrepo = self.module.get_catrepo_ids(f_module, params["inp"], ome_connection_mock_for_firmware_baseline) + assert catrepo == params["out"] + + @pytest.mark.parametrize("params", [{"mparams": {"state": "absent", "baseline_name": "my_baseline1"}, "res": [ + {"Id": 12, "Name": "my_baseline1"}], "json_data": { + "value": [{"Id": 12, "Name": "my_baseline1"}]}, "success": True}, { + "mparams": {"state": "absent", "baseline_id": 12}, + "res": [{"Id": 12, "Name": "my_baseline1"}], + "json_data": {"value": [{"Id": 11, "Name": "my_baseline2"}, + {"Id": 12, "Name": "my_baseline1"}]}, "success": True}]) + def test_check_existing_baseline(self, mocker, params, ome_connection_mock_for_firmware_baseline, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + ome_connection_mock_for_firmware_baseline.get_all_items_with_pagination.return_value = params['json_data'] + f_module = self.get_module_mock(params=params["mparams"]) + res = self.module.check_existing_baseline(f_module, ome_connection_mock_for_firmware_baseline) + assert res == params["res"] + + @pytest.mark.parametrize("params", [ + {"json_data": {"Name": 'd1'}, 'job_failed': False, 'job_message': BASELINE_UPDATED.format(op='created'), + 'mparams': {'catalog_name': 'c1', 'device_ids': 123, 'job_wait': True, 'job_wait_timeout': 1000}}, + {"json_data": {"Name": 'd1'}, 'job_failed': True, 'job_message': JOB_TRACK_FAIL, + 'mparams': {'catalog_name': 'c1', 'device_ids': 123, 'job_wait': True, 'job_wait_timeout': 1000}}, + {"json_data": {"Name": 'd1'}, 'job_failed': True, 'job_message': BASELINE_TRIGGERED, + 'mparams': {'catalog_name': 'c1', 'device_ids': 123, 'job_wait': False, 'schedule': 'RunLater', + 'job_wait_timeout': 1000}}]) + def test_create_baseline(self, params, mocker, ome_connection_mock_for_firmware_baseline, ome_response_mock): + mocker.patch(MODULE_PATH + '_get_baseline_payload', return_value={}) + mocker.patch(MODULE_PATH + 'check_existing_baseline', return_value=[{"Id": 123}]) + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + ome_connection_mock_for_firmware_baseline.job_tracking.return_value = \ + (params['job_failed'], params['job_message']) + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params['mparams']) + error_message = params["job_message"] + with pytest.raises(Exception) as err: + self.module.create_baseline(f_module, ome_connection_mock_for_firmware_baseline) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"json_data": {"Name": 'd1', }, + 'job_failed': False, 'job_message': BASELINE_UPDATED.format(op='modified'), + 'mparams': {"baseline_description": "new description", "baseline_name": "c4", "catalog_name": "baseline", + "device_service_tags": ["2H7HNX2", "2HB9NX2"], "downgrade_enabled": False, "is_64_bit": False, + "job_wait": True, "job_wait_timeout": 600, "new_baseline_name": "new name"}, + "baseline_list": [{"CatalogId": 25, "Description": "", "DowngradeEnabled": True, "Id": 40, "Is64Bit": True, + "Name": "c4", "RepositoryId": 15, + "Targets": [{"Id": 13456, "Type": {"Id": 1000, "Name": "DEVICE"}}, + {"Id": 13457, "Type": {"Id": 1000, "Name": "DEVICE"}}], "TaskId": 14465, + "TaskStatusId": 2010}], + "get_catrepo_ids": (12, 13), "get_target_list": [{"Id": 13456, "Type": {"Id": 1000, "Name": "DEVICE"}}, + {"Id": 13457, "Type": {"Id": 1000, "Name": "DEVICE"}}] + }, + {"json_data": {"Name": 'd1'}, 'job_failed': True, 'job_message': JOB_TRACK_FAIL, + 'mparams': {'catalog_name': 'c1', 'device_ids': 123, 'job_wait': True, 'job_wait_timeout': 1000}, + "baseline_list": [{"Id": 12, "Name": "c1", "TaskStatusId": 2010, "TaskId": 12}], }, + {"json_data": {"Name": 'd1'}, 'job_failed': True, 'job_message': BASELINE_TRIGGERED, + "baseline_list": [{"Id": 12, "Name": "c1", "TaskStatusId": 2010, "TaskId": 12}], + 'mparams': {'catalog_name': 'c1', 'device_ids': 123, 'job_wait': False, 'schedule': 'RunLater', + 'job_wait_timeout': 1000}}]) + def test_modify_baseline(self, params, mocker, ome_connection_mock_for_firmware_baseline, ome_response_mock): + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + mocker.patch(MODULE_PATH + 'get_catrepo_ids', return_value=params.get('get_catrepo_ids', (12, 13))) + mocker.patch(MODULE_PATH + 'get_target_list', return_value=params.get('get_target_list', [])) + ome_connection_mock_for_firmware_baseline.job_tracking.return_value = \ + (params['job_failed'], params['job_message']) + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params['mparams']) + error_message = params["job_message"] + with pytest.raises(Exception) as err: + self.module.modify_baseline(f_module, ome_connection_mock_for_firmware_baseline, params['baseline_list']) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", + [{"mparams": {"state": "absent", "baseline_job_name": "my_baseline1"}, + "baseline_list": [{"Id": 12, "Name": "my_baseline1", "TaskStatusId": 2010}], + "job_state_dict": {12: 2010}, "res": BASELINE_DEL_SUCCESS.format(n=1), + "json_data": 1, "success": True}, + {"mparams": {"state": "absent", "baseline_job_name": "my_baseline1"}, + "baseline_list": [{"Id": 12, "Name": "my_baseline1", "TaskStatusId": 2050, "TaskId": 12}], + "job_state_dict": {12: 2050}, + "res": BASELINE_JOB_RUNNING.format(name='my_baseline1', id=12), "json_data": 1, + "success": True}]) + def test_delete_baseline(self, mocker, params, ome_connection_mock_for_firmware_baseline, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + error_message = params["res"] + with pytest.raises(Exception) as err: + self.module.delete_baseline(f_module, ome_connection_mock_for_firmware_baseline, params['baseline_list']) + assert err.value.args[0] == error_message + + def test_get_catrepo_ids_success(self, ome_connection_mock_for_firmware_baseline, + ome_response_mock): + ome_response_mock.success = False + f_module = self.get_module_mock() + catrepo = self.module.get_catrepo_ids(f_module, "catalog1", ome_connection_mock_for_firmware_baseline) + assert catrepo == (None, None) + + inp_param1 = {"device_service_tags": ["R840PT3", "R940PT3"]} + out1 = [ + { + "Id": 12, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + }, + { + "Id": 23, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ] + inp_param2 = {"device_service_tags": ["R840PT3"]} + out2 = [{ + "Id": 12, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + }] + + @pytest.mark.parametrize("params", [{"inp": inp_param1, "out": out1}, + {"inp": inp_param2, "out": out2}]) + def test_get_dev_ids(self, ome_connection_mock_for_firmware_baseline, + ome_response_mock, params): + f_module = self.get_module_mock(params=params["inp"]) + ome_connection_mock_for_firmware_baseline.get_all_items_with_pagination.return_value = { + "value": + [ + { + "Id": 12, + "Type": 1000, + "DeviceServiceTag": "R840PT3" + }, + { + "Id": 23, + "Type": 1000, + "DeviceServiceTag": "R940PT3" + } + ] + } + targets = self.module.get_dev_ids(f_module, ome_connection_mock_for_firmware_baseline, + "device_service_tags", "DeviceServiceTag") + assert targets == params["out"] + + grp_param1 = {"device_group_names": ["group1", "group2"]} + grp_out1 = [ + { + "Id": 12, + "Type": { + "Id": 6000, + "Name": "GROUP" + } + }, + { + "Id": 23, + "Type": { + "Id": 6000, + "Name": "GROUP" + } + } + ] + grp_param2 = {"device_group_names": ["group1"]} + grp_out2 = [ + { + "Id": 12, + "Type": { + "Id": 6000, + "Name": "GROUP" + } + } + ] + + @pytest.mark.parametrize("params", [{"inp": grp_param1, "out": grp_out1}, + {"inp": grp_param2, "out": grp_out2}]) + def test_get_group_ids(self, ome_connection_mock_for_firmware_baseline, + ome_response_mock, params): + f_module = self.get_module_mock(params=params["inp"]) + ome_response_mock.success = True + ome_connection_mock_for_firmware_baseline.get_all_items_with_pagination.return_value = { + "value": [ + { + "Id": 12, + "TypeId": 6000, + "Name": "group1" + }, + { + "Id": 23, + "TypeId": 6000, + "Name": "group2" + } + ] + } + targets = self.module.get_group_ids(f_module, ome_connection_mock_for_firmware_baseline) + assert targets == params["out"] + + payload_param1 = {"catalog_name": "cat1", + "baseline_name": "baseline1", + "baseline_description": "baseline_description", + "downgrade_enabled": True, + "is_64_bit": True} + payload_param2 = {"catalog_name": "cat1", + "baseline_name": "baseline1", + "baseline_description": None, + "downgrade_enabled": None, + "is_64_bit": None} + + @pytest.mark.parametrize("params", [{"inp": payload_param1, "out": payload_out1}, + {"inp": payload_param2, "out": payload_out2}]) + def test__get_baseline_payload(self, ome_connection_mock_for_firmware_baseline, params, mocker): + f_module = self.get_module_mock(params=params["inp"]) + mocker.patch( + MODULE_PATH + 'get_catrepo_ids', + return_value=(12, 23)) + mocker.patch( + MODULE_PATH + 'get_target_list', + return_value=[{"Id": 123, "Type": {"Id": 1000, "Name": "DEVICE"}}]) + payload = self.module._get_baseline_payload(f_module, ome_connection_mock_for_firmware_baseline) + assert payload == params["out"] + + def test__get_baseline_payload_failure01(self, ome_default_args, ome_connection_mock_for_firmware_baseline, mocker): + f_module = self.get_module_mock(params={"catalog_name": "cat1", + "baseline_name": "baseline1"}) + mocker.patch( + MODULE_PATH + 'get_catrepo_ids', + return_value=(None, None)) + mocker.patch( + MODULE_PATH + 'get_target_list', + return_value=[{"Id": 123, "Type": { + "Id": 1000, "Name": "DEVICE"}}]) + with pytest.raises(Exception) as exc: + self.module._get_baseline_payload(f_module, ome_connection_mock_for_firmware_baseline) + assert exc.value.args[0] == "No Catalog with name cat1 found" + + def test__get_baseline_payload_failure02(self, ome_default_args, ome_connection_mock_for_firmware_baseline, mocker): + f_module = self.get_module_mock(params={"catalog_name": "cat1", + "baseline_name": "baseline1"}) + mocker.patch( + MODULE_PATH + 'get_catrepo_ids', + return_value=(12, 23)) + mocker.patch( + MODULE_PATH + 'get_target_list', + return_value=None) + with pytest.raises(Exception) as exc: + self.module._get_baseline_payload(f_module, ome_connection_mock_for_firmware_baseline) + assert exc.value.args[0] == NO_TARGETS_MESSAGE + + target_param1 = {"device_ids": [12, 23]} + target_out1 = [ + { + "Id": 12, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + }, + { + "Id": 23, + "Type": { + "Id": 1000, + "Name": "DEVICE" + } + } + ] + target_param2 = {"x": 3} + target_out2 = None + + @pytest.mark.parametrize("params", [{"inp": inp_param1, "out": out1}, + {"inp": inp_param2, "out": out2}, + {"inp": grp_param1, "out": grp_out1}, + {"inp": grp_param2, "out": grp_out2}, + {"inp": target_param1, "out": target_out1}, + {"inp": target_param2, "out": target_out2}]) + def test_get_target_list(self, ome_connection_mock_for_firmware_baseline, params, mocker): + f_module = self.get_module_mock(params=params["inp"]) + mocker.patch( + MODULE_PATH + 'get_dev_ids', + return_value=params["out"]) + mocker.patch( + MODULE_PATH + 'get_group_ids', + return_value=params["out"]) + targets = self.module.get_target_list(f_module, ome_connection_mock_for_firmware_baseline) + assert targets == params["out"] + + @pytest.mark.parametrize("params", [ + {"json_data": {"JobId": 1234}, + "check_existing_baseline": [], + "mparams": {"state": "absent", "baseline_name": "b1", "device_ids": [12, 23], 'catalog_name': 'c1', + 'job_wait': False}, + 'message': NO_CHANGES_MSG, "success": True + }, + {"json_data": {"JobId": 1234}, + "check_existing_baseline": [{"name": "b1", "Id": 123, "TaskStatusId": 2060}], "check_mode": True, + "mparams": {"state": "absent", "baseline_id": 123, "device_ids": [12, 23], 'catalog_name': 'c1', + 'job_wait': False}, + 'message': "Changes found to be applied.", "success": True + }, + {"json_data": {"JobId": 1234}, + "check_existing_baseline": [], "check_mode": True, + "mparams": {"state": "present", "baseline_name": "b1", "device_ids": [12, 23], 'catalog_name': 'c1', + 'job_wait': False}, + 'message': "Changes found to be applied.", "success": True + } + ]) + def test_main_success(self, params, ome_connection_mock_for_firmware_baseline, ome_default_args, ome_response_mock, mocker): + mocker.patch(MODULE_PATH + 'check_existing_baseline', return_value=params.get("check_existing_baseline")) + mocker.patch(MODULE_PATH + '_get_baseline_payload', return_value=params.get("_get_baseline_payload")) + ome_response_mock.success = True + ome_response_mock.json_data = params.get("json_data") + ome_default_args.update(params.get('mparams')) + result = self._run_module(ome_default_args, check_mode=params.get("check_mode", False)) + assert result["msg"] == params['message'] + + @pytest.mark.parametrize("params", [ + {"json_data": {"JobId": 1234}, + "check_existing_baseline": [], "check_mode": True, + "mparams": {"state": "present", "baseline_id": 123, "device_ids": [12, 23], 'catalog_name': 'c1', + 'job_wait': False}, + 'message': INVALID_BASELINE_ID, "success": True + }, + {"json_data": {"JobId": 1234}, + "check_existing_baseline": [{"Name": "b1", "Id": 123, "TaskStatusId": 2050, "TaskId": 2050}], "check_mode": True, + "mparams": {"state": "present", "baseline_id": 123, "device_ids": [12, 23], 'catalog_name': 'c1', + 'job_wait': False}, + 'message': "Firmware baseline 'b1' with ID 123 is running. Please retry after job completion.", "success": True + }, + {"json_data": {"JobId": 1234}, + "check_existing_baseline": [{"Name": "b1", "Id": 123, "TaskStatusId": 2060, "TaskId": 2050}], + "check_mode": True, "get_catrepo_ids": (None, None), + "mparams": {"state": "present", "baseline_id": 123, "device_ids": [12, 23], 'catalog_name': 'c1', + 'job_wait': False}, + 'message': "No Catalog with name c1 found", "success": True + }, + ]) + def test_main_failure(self, params, ome_connection_mock_for_firmware_baseline, ome_default_args, ome_response_mock, mocker): + mocker.patch(MODULE_PATH + 'check_existing_baseline', return_value=params.get("check_existing_baseline")) + mocker.patch(MODULE_PATH + '_get_baseline_payload', return_value=params.get("_get_baseline_payload")) + mocker.patch(MODULE_PATH + 'get_catrepo_ids', return_value=params.get("get_catrepo_ids")) + ome_response_mock.success = True + ome_response_mock.json_data = params.get("json_data") + ome_default_args.update(params.get('mparams')) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == params['message'] + + def test_main_failure01(self, ome_connection_mock_for_firmware_baseline, ome_default_args, ome_response_mock, + mocker): + mocker.patch( + MODULE_PATH + '_get_baseline_payload', + return_value=payload_out1) + ome_response_mock.success = False + ome_response_mock.json_data = baseline_status1 + ome_default_args.update({"baseline_name": "b1", "device_ids": [12, 23]}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["failed"] is True + assert 'msg' in result + + def test_main_failure02(self, ome_connection_mock_for_firmware_baseline, ome_default_args, ome_response_mock, + mocker): + mocker.patch( + MODULE_PATH + '_get_baseline_payload', + return_value=payload_out1) + ome_response_mock.success = False + ome_response_mock.json_data = baseline_status1 + ome_default_args.update({"baseline_name": "b1"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["failed"] is True + assert 'msg' in result + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_baseline_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_firmware_baseline, ome_response_mock): + ome_default_args.update({"state": "absent", "baseline_name": "t1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_existing_baseline', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_existing_baseline', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_existing_baseline', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py new file mode 100644 index 00000000..96672f6d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py @@ -0,0 +1,537 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_firmware_baseline_compliance_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, \ + AnsibleFailJSonException, Constants + + +@pytest.fixture +def ome_connection_mock_for_firmware_baseline_compliance_info(mocker, ome_response_mock): + connection_class_mock = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeFirmwareCatalog(FakeAnsibleModule): + module = ome_firmware_baseline_compliance_info + + def test__get_device_id_from_service_tags_for_baseline_success_case(self, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.return_value = { + "report_list": [{"DeviceServiceTag": Constants.service_tag1, "Id": Constants.device_id1}]} + f_module = self.get_module_mock() + data = self.module._get_device_id_from_service_tags([Constants.service_tag1], + ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + assert data == {Constants.device_id1: Constants.service_tag1} + + def test__get_device_id_from_service_tags_empty_case(self, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.return_value = { + "report_list": []} + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + data = self.module._get_device_id_from_service_tags([Constants.service_tag1], + ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + assert exc.value.args[0] == "Unable to fetch the device information." + + def test_get_device_id_from_service_tags_for_baseline_error_case(self, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.side_effect = HTTPError( + 'http://testhost.com', 400, '', {}, None) + f_module = self.get_module_mock() + with pytest.raises(HTTPError) as ex: + self.module._get_device_id_from_service_tags(["INVALID"], + ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + + def test_get_device_id_from_service_tags_for_baseline_value_error_case(self, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.return_value = { + "report_list": []} + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + self.module._get_device_id_from_service_tags(["#$%^&"], + ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + assert exc.value.args[0] == "Unable to fetch the device information." + + def test_get_device_ids_from_group_ids_success_case(self, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.return_value = { + "value": [{"DeviceServiceTag": Constants.service_tag1, "Id": Constants.device_id1}]} + f_module = self.get_module_mock() + device_ids = self.module.get_device_ids_from_group_ids(f_module, ["123", "345"], + ome_connection_mock_for_firmware_baseline_compliance_info) + assert device_ids == [Constants.device_id1, Constants.device_id1] + + def test_get_device_ids_from_group_ids_empty_case(self, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.return_value = {"report_list": []} + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + device_ids = self.module.get_device_ids_from_group_ids(f_module, ["123", "345"], + ome_connection_mock_for_firmware_baseline_compliance_info) + assert exc.value.args[0] == "Unable to fetch the device ids from specified device_group_names." + + def test_get_device_ids_from_group_ids_error_case(self, ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.side_effect = HTTPError( + 'http://testhost.com', 400, '', {}, None) + f_module = self.get_module_mock() + with pytest.raises(HTTPError) as ex: + device_ids = self.module.get_device_ids_from_group_ids(f_module, ["123456"], + ome_connection_mock_for_firmware_baseline_compliance_info) + + def test_get_device_ids_from_group_ids_value_error_case(self, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.return_value = { + "value": []} + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + self.module.get_device_ids_from_group_ids(f_module, ["123456"], + ome_connection_mock_for_firmware_baseline_compliance_info) + assert exc.value.args[0] == "Unable to fetch the device ids from specified device_group_names." + + def test_get_device_ids_from_group_names_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.return_value = { + "report_list": [{"Name": "group1", "Id": 123}]} + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_device_ids_from_group_ids', + return_value=[Constants.device_id1, Constants.device_id2]) + f_module = self.get_module_mock(params={"device_group_names": ["group1", "group2"]}) + device_ids = self.module.get_device_ids_from_group_names(f_module, + ome_connection_mock_for_firmware_baseline_compliance_info) + assert device_ids == [Constants.device_id1, Constants.device_id2] + + def test_get_device_ids_from_group_names_empty_case(self, mocker, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.return_value = { + "report_list": []} + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_device_ids_from_group_ids', + return_value=[]) + f_module = self.get_module_mock(params={"device_group_names": ["abc", "xyz"]}) + with pytest.raises(Exception) as ex: + device_ids = self.module.get_device_ids_from_group_names(f_module, + ome_connection_mock_for_firmware_baseline_compliance_info) + assert ex.value.args[0] == "Unable to fetch the specified device_group_names." + + def test_get_device_ids_from_group_names_error_case(self, ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.side_effect = HTTPError( + 'http://testhost.com', 400, '', {}, None) + f_module = self.get_module_mock(params={"device_group_names": ["abc", "xyz"]}) + with pytest.raises(HTTPError) as ex: + self.module.get_device_ids_from_group_names(f_module, + ome_connection_mock_for_firmware_baseline_compliance_info) + + def test_get_device_ids_from_group_names_value_error_case(self, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.return_value = { + "report_list": []} + f_module = self.get_module_mock(params={"device_group_names": ["abc", "xyz"]}) + with pytest.raises(Exception) as exc: + self.module.get_device_ids_from_group_names(f_module, + ome_connection_mock_for_firmware_baseline_compliance_info) + assert exc.value.args[0] == "Unable to fetch the specified device_group_names." + + def test_get_identifiers_with_device_ids(self, ome_connection_mock_for_firmware_baseline_compliance_info, + module_mock, default_ome_args): + """when device_ids given """ + f_module = self.get_module_mock(params={"device_ids": [Constants.device_id1, Constants.device_id2]}) + identifiers, identifiers_type = self.module.get_identifiers( + ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + assert identifiers == [Constants.device_id1, Constants.device_id2] + assert identifiers_type == "device_ids" + + def test_get_identifiers_with_service_tags(self, mocker, ome_connection_mock_for_firmware_baseline_compliance_info, + module_mock, default_ome_args): + """when service tags given """ + f_module = self.get_module_mock(params={"device_service_tags": [Constants.service_tag1]}) + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info._get_device_id_from_service_tags', + return_value={Constants.device_id1: Constants.service_tag1}) + identifiers, identifiers_type = self.module.get_identifiers( + ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + assert identifiers == [Constants.device_id1] + assert identifiers_type == "device_service_tags" + + def test_get_identifiers_with_group_names(self, mocker, ome_connection_mock_for_firmware_baseline_compliance_info, + module_mock, default_ome_args): + """when service tags given """ + f_module = self.get_module_mock(params={"device_group_names": [Constants.service_tag1]}) + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_device_ids_from_group_names', + return_value=[123, 456]) + identifiers, identifiers_type = self.module.get_identifiers( + ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + assert identifiers == [123, 456] + identifiers_type == "device_group_names" + + def test_get_identifiers_with_service_tags_empty_case(self, mocker, + ome_connection_mock_for_firmware_baseline_compliance_info, + module_mock, default_ome_args): + """when service tags given """ + f_module = self.get_module_mock(params={"device_service_tags": [Constants.service_tag1]}) + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info._get_device_id_from_service_tags', + return_value={}) + identifiers, identifiers_type = self.module.get_identifiers( + ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + assert identifiers == [] + assert identifiers_type == "device_service_tags" + + def test_get_baseline_id_from_name_success_case(self, default_ome_args, + ome_connection_mock_for_firmware_baseline_compliance_info, + module_mock, ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.return_value = { + "value": [{"Name": "baseline_name1", "Id": 111}, {"Name": "baseline_name2", + "Id": 222}]} + f_module = self.get_module_mock(params={"baseline_name": "baseline_name1"}) + baseline_id = self.module.get_baseline_id_from_name(ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + assert baseline_id == 111 + + def test_get_baseline_id_from_name_when_name_not_exists(self, default_ome_args, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.return_value = { + "value": [{"Name": "baseline_name1", "Id": 111}]} + f_module = self.get_module_mock(params={"baseline_name": "not_exits"}) + with pytest.raises(AnsibleFailJSonException) as exc: + self.module.get_baseline_id_from_name(ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + assert exc.value.args[0] == "Specified baseline_name does not exist in the system." + + def test_get_baseline_id_from_name_when_baseline_is_empty(self, default_ome_args, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.return_value = { + "value": []} + f_module = self.get_module_mock(params={"baseline_name": "baseline_name1"}) + with pytest.raises(AnsibleFailJSonException) as exc: + self.module.get_baseline_id_from_name(ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + assert exc.value.args[0] == "No baseline exists in the system." + + def test_get_baseline_id_from_name_when_baselinename_is_none(self, default_ome_args, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.return_value = { + "value": []} + f_module = self.get_module_mock(params={"baseline_notexist": "data"}) + with pytest.raises(AnsibleFailJSonException) as exc: + self.module.get_baseline_id_from_name(ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + assert exc.value.args[0] == "baseline_name is a mandatory option." + + def test_get_baseline_id_from_name_with_http_error_handlin_case(self, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.side_effect = HTTPError( + 'http://testhost.com', 400, '', {}, None) + f_module = self.get_module_mock(params={"baseline_name": "baseline_name1"}) + with pytest.raises(HTTPError) as ex: + self.module.get_baseline_id_from_name(ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + + @pytest.mark.parametrize("exc_type", + [URLError, SSLValidationError, ConnectionError, TypeError, ValueError, HTTPError]) + def test_get_baseline_id_from_name_failure_case_01(self, exc_type, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + if exc_type not in [HTTPError, SSLValidationError]: + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.side_effect = exc_type( + 'test') + else: + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.side_effect = exc_type( + 'http://testhost.com', 400, '', {}, None) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + f_module = self.get_module_mock(params={"baseline_name": "baseline_name1"}) + with pytest.raises(exc_type) as ex: + self.module.get_baseline_id_from_name(ome_connection_mock_for_firmware_baseline_compliance_info, f_module) + + def test_get_baselines_report_by_device_ids_success_case(self, mocker, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_identifiers', + return_value=([Constants.device_id1], "device_ids")) + ome_response_mock.json_data = {"value": []} + ome_response_mock.success = True + f_module = self.get_module_mock() + self.module.get_baselines_report_by_device_ids(ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + + def test_get_baselines_report_by_device_service_tag_not_exits_case(self, mocker, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_identifiers', + return_value=([], "device_service_tags")) + ome_response_mock.json_data = {"value": []} + ome_response_mock.success = True + f_module = self.get_module_mock() + with pytest.raises(AnsibleFailJSonException) as exc: + self.module.get_baselines_report_by_device_ids(ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + assert exc.value.args[0] == "Device details not available as the service tag(s) provided are invalid." + + def test_get_baselines_report_by_group_names_not_exits_case(self, mocker, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_identifiers', + return_value=([], "device_group_names")) + ome_response_mock.json_data = {"value": []} + ome_response_mock.success = True + f_module = self.get_module_mock() + with pytest.raises(AnsibleFailJSonException) as exc: + self.module.get_baselines_report_by_device_ids(ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + assert exc.value.args[0] == "Device details not available as the group name(s) provided are invalid." + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def _test_get_baselines_report_by_device_ids_exception_handling(self, mocker, exc_type, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + """when invalid value for expose_durationis given """ + err_dict = {"file": { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": [ + { + "MessageId": "CUPD3090", + "RelatedProperties": [], + "Message": "Unable to retrieve baseline list either because the device " + "ID(s) entered are invalid, the ID(s) provided are not " + "associated with a baseline or a group is used as a target for " + "a baseline.", + "MessageArgs": [], + "Severity": "Critical", + "Resolution": "Make sure the entered device ID(s) are valid and retry the operation." + } + ] + } + } + } + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_identifiers', + return_value=([], "device_ids")) + if exc_type not in [HTTPError, SSLValidationError]: + ome_connection_mock_for_firmware_baseline_compliance_info.invoke_request.side_effect = exc_type('test') + else: + ome_connection_mock_for_firmware_baseline_compliance_info.invoke_request.side_effect = exc_type( + 'http://testhost.com', 400, '', err_dict, None) + f_module = self.get_module_mock() + with pytest.raises(exc_type): + self.module.get_baselines_report_by_device_ids( + ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + + def test_get_baseline_compliance_reports_success_case_for_baseline_device(self, mocker, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baseline_id_from_name', + return_value=123) + f_module = self.get_module_mock(params={"baseline_name": "baseline1"}) + ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.return_value = { + "value": [{"baseline_device_report1": "data"}]} + data = self.module.get_baseline_compliance_reports(ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + assert data == [{"baseline_device_report1": "data"}] + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_get_baseline_compliance_reports_exception_handling_case(self, exc_type, mocker, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baseline_id_from_name', + side_effect=exc_type('exception message')) + else: + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baseline_id_from_name', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + f_module = self.get_module_mock(params={"baseline_name": "baseline1"}) + with pytest.raises(exc_type): + self.module.get_baseline_compliance_reports(ome_connection_mock_for_firmware_baseline_compliance_info, + f_module) + + param_list1 = [{"baseline_name": ""}, + {"baseline_name": None}, + {"device_ids": []}, + {"device_ids": None}, + {"device_ids": [], "baseline_name": ""}, + {"device_service_tags": []}, + {"device_service_tags": [], "baseline_name": ""}, + {"device_service_tags": None}, + {"device_group_names": [], "baseline_name": ""}, + {"device_group_names": []}, + {"device_group_names": None}, + {"device_ids": [], "device_service_tags": []}, + {"device_ids": None, "device_service_tags": None}, + {"device_ids": [], "device_service_tags": [], "device_group_names": []}, + {"device_ids": None, "device_service_tags": None, "device_group_names": None}, + {"device_ids": None, "device_service_tags": [], "device_group_names": None}, + {"device_ids": [], "device_service_tags": [], "device_group_names": [], "baseline_name": ""}, + + ] + + @pytest.mark.parametrize("param", param_list1) + def test_validate_input_error_handling_case(self, param): + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as exc: + self.module.validate_inputs(f_module) + assert exc.value.args[0] == "one of the following is required: device_ids, " \ + "device_service_tags, device_group_names, baseline_name " \ + "to generate device based compliance report." + + params_list2 = [{ + "device_ids": [Constants.device_id1], + "device_service_tags": [Constants.service_tag1]}, + {"device_ids": [Constants.device_id1]}, + {"device_group_names": ["group1"]}, + {"device_service_tags": [Constants.service_tag1]}, + {"baseline_name": "baseline1", "device_ids": [Constants.device_id1]}, + {"baseline_name": "baseline1", "device_group_names": ["group1"]} + ] + + @pytest.mark.parametrize("param", params_list2) + def test_validate_input_params_without_error_handling_case(self, param): + f_module = self.get_module_mock(params=param) + self.module.validate_inputs(f_module) + + def test_baseline_complaince_main_success_case_01(self, mocker, ome_default_args, module_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.validate_inputs') + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baselines_report_by_device_ids', + return_value=[{"device": "device_report"}]) + ome_default_args.update({"device_ids": [Constants.device_id1]}) + result = self._run_module(ome_default_args) + assert result["changed"] is False + assert 'baseline_compliance_info' in result + assert 'msg' not in result + + def test_baseline_complaince_main_success_case_02(self, mocker, ome_default_args, module_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.validate_inputs') + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baseline_compliance_reports', + return_value=[{"baseline_device": "baseline_device_report"}]) + ome_default_args.update({"baseline_name": "baseline_name"}) + result = self._run_module(ome_default_args) + assert result["changed"] is False + assert 'baseline_compliance_info' in result + assert 'msg' not in result + + def test_baseline_complaince_main_failure_case_01(self, ome_default_args, module_mock): + """required parameter is not passed along with specified report_type""" + # ome_default_args.update({}) + result = self._run_module_with_fail_json(ome_default_args) + assert 'baseline_compliance_info' not in result + assert 'msg' in result + assert result['msg'] == "one of the following is required: device_ids, " \ + "device_service_tags, device_group_names, baseline_name" + assert result['failed'] is True + + param_list4 = [ + {"device_ids": [Constants.device_id1], "device_service_tags": [Constants.service_tag1]}, + {"device_service_tags": [Constants.device_id1], "device_group_names": ["group_name1"]}, + {"device_ids": [Constants.device_id1], "device_group_names": ["group_name1"]}, + {"device_ids": [Constants.device_id1], "device_service_tags": ["group_name1"]}, + {"device_ids": [Constants.device_id1], "device_service_tags": [Constants.service_tag1], + "device_group_names": ["group_name1"]}, + {"device_ids": [Constants.device_id1], "device_service_tags": [Constants.service_tag1], + "device_group_names": ["group_name1"], "baseline_name": "baseline1" + }, + {"device_ids": [Constants.device_id1], "baseline_name": "baseline1"}, + {"device_service_tags": [Constants.service_tag1], "baseline_name": "baseline1"}, + {"device_group_names": ["group_name1"], "baseline_name": "baseline1"}, + {"device_ids": [], "device_service_tags": [], + "device_group_names": [], "baseline_name": "" + }, + ] + + @pytest.mark.parametrize("param", param_list4) + def test_baseline_complaince_main_failure_case_02(self, param, ome_default_args, module_mock): + """required parameter is not passed along with specified report_type""" + ome_default_args.update(param) + result = self._run_module_with_fail_json(ome_default_args) + assert 'baseline_compliance_info' not in result + assert 'msg' in result + assert result["msg"] == "parameters are mutually exclusive: " \ + "baseline_name|device_service_tags|device_ids|device_group_names" + assert result['failed'] is True + + def test_baseline_complaince_main_failure_case_03(self, mocker, ome_default_args, module_mock, ome_response_mock, + ome_connection_mock_for_firmware_baseline_compliance_info): + """when ome response return value is None""" + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.validate_inputs') + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baselines_report_by_device_ids', + return_value=None) + ome_default_args.update({"device_ids": [Constants.device_id1]}) + result = self._run_module(ome_default_args) + assert 'baseline_compliance_info' not in result + assert result['msg'] == "Unable to fetch the compliance baseline information." + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_baseline_complaince_main_exception_handling_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_firmware_baseline_compliance_info, + ome_response_mock): + ome_default_args.update({"device_service_tags": [Constants.service_tag1]}) + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.validate_inputs') + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baselines_report_by_device_ids', + side_effect=exc_type('test')) + else: + mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baselines_report_by_device_ids', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert 'baseline_compliance_info' not in result + assert 'msg' in result + assert result['failed'] is True + if exc_type == HTTPError: + assert 'error_info' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py new file mode 100644 index 00000000..6d394a1a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_firmware_baseline_info +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestOmeFirmwareBaselineInfo(FakeAnsibleModule): + module = ome_firmware_baseline_info + + @pytest.fixture + def ome_connection_ome_firmware_baseline_info_mock(self, mocker, ome_response_mock): + connection_class_mock = mocker.patch( + MODULE_PATH + 'ome_firmware_baseline_info.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + def test_ome_firmware_baseline_info_main_success_case_01(self, mocker, ome_response_mock, ome_default_args, + module_mock, + ome_connection_ome_firmware_baseline_info_mock): + ome_response_mock.json_data = {"value": [{"baseline1": "data"}]} + result = self.execute_module(ome_default_args) + assert result["changed"] is False + assert 'baseline_info' in result + assert result['msg'] == "Successfully fetched firmware baseline information." + assert result['baseline_info'] == {"value": [{"baseline1": "data"}]} + + def test_ome_firmware_baseline_info_main_success_case_02(self, mocker, ome_response_mock, ome_default_args, + module_mock, + ome_connection_ome_firmware_baseline_info_mock): + ome_response_mock.json_data = {"value": []} + result = self.execute_module(ome_default_args) + assert 'baseline_info' in result + assert result['baseline_info'] == [] + + def test_ome_firmware_baseline_info_main_success_case_03(self, mocker, ome_response_mock, ome_default_args, + module_mock, + ome_connection_ome_firmware_baseline_info_mock): + ome_default_args.update({"baseline_name": "baseline1"}) + ome_response_mock.json_data = {"value": [{"Name": "baseline1", "data": "fake_data"}]} + mocker.patch( + MODULE_PATH + 'ome_firmware_baseline_info.get_specific_baseline', + return_value={"Name": "baseline1", "data": "fake_data"}) + result = self.execute_module(ome_default_args) + assert result["changed"] is False + assert 'baseline_info' in result + assert result["baseline_info"] == {"Name": "baseline1", "data": "fake_data"} + assert result['msg'] == "Successfully fetched firmware baseline information." + + def test_ome_firmware_baseline_info_main_success_case_04(self, mocker, ome_response_mock, ome_default_args, + module_mock, + ome_connection_ome_firmware_baseline_info_mock): + ome_default_args.update({"baseline_name": None}) + ome_response_mock.json_data = {"value": []} + mocker.patch( + MODULE_PATH + 'ome_firmware_baseline_info.get_specific_baseline', + return_value={"baseline1": "fake_data"}) + result = self.execute_module(ome_default_args) + assert result['baseline_info'] == [] + assert result['msg'] == "No baselines present." + + def test_ome_firmware_get_specific_baseline_case_01(self): + f_module = self.get_module_mock() + data = {"value": [{"Name": "baseline1", "data": "fakedata1"}, {"Name": "baseline2", "data": "fakedata2"}]} + val = self.module.get_specific_baseline(f_module, "baseline1", data) + assert val == {"Name": "baseline1", "data": "fakedata1"} + + def test_ome_firmware_get_specific_baseline_case_02(self): + f_module = self.get_module_mock() + baseline_name = "baseline3" + msg = "Unable to complete the operation because the requested baseline with" \ + " name '{0}' does not exist.".format(baseline_name) + data = {"value": [{"Name": "baseline1", "data": "fakedata1"}, {"Name": "baseline2", "data": "fakedata2"}]} + with pytest.raises(Exception) as exc: + self.module.get_specific_baseline(f_module, baseline_name, data) + assert exc.value.args[0] == msg + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_main_ome_firmware_baseline_info_failure_case1(self, exc_type, mocker, ome_default_args, + ome_connection_ome_firmware_baseline_info_mock, + ome_response_mock): + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type("TESTS") + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type("exception message") + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type('http://testhost.com', + 400, + 'http error message', + { + "accept-type": "application/json"}, + StringIO(json_str)) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert "error_info" in result + assert result['msg'] == 'HTTP Error 400: http error message' + + ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type('http://testhost.com', + 404, + '<404 not found>', + { + "accept-type": "application/json"}, + StringIO(json_str)) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert "error_info" not in result + assert result["msg"] == "404 Not Found.The requested resource is not available." + assert 'baseline_info' not in result + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py new file mode 100644 index 00000000..c0f0a514 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py @@ -0,0 +1,864 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ssl import SSLError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_firmware_catalog +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule +from io import StringIO +from ansible.module_utils._text import to_text +from ansible.module_utils.urls import ConnectionError, SSLValidationError +import json +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_catalog.' + +NO_CHANGES_MSG = "No changes found to be applied." +INVALID_CATALOG_ID = "Invalid catalog ID provided." +CATALOG_DEL_SUCCESS = "Successfully deleted the firmware catalog." +CATALOG_BASELINE_ATTACHED = "Unable to delete as catalog is associated with baseline(s)." +CATALOG_JOB_RUNNING = "Catalog job '{name}' with ID {id} is running.Retry after job completion." +CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied." +CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No changes found to be applied." +INVALID_CATALOG_ID = "Invalid catalog ID provided." +CATALOG_DEL_SUCCESS = "Successfully deleted the firmware catalog(s)." +CATALOG_BASELINE_ATTACHED = "Unable to delete the catalog as it is with baseline(s)." +CATALOG_EXISTS = "The catalog with the name '{new_name}' already exists in the system." +DELL_ONLINE_EXISTS = "Catalog with 'DELL_ONLINE' repository already exists with the name '{catalog_name}'." +NAMES_ERROR = "Only delete operations accept multiple catalog names or IDs." +CATALOG_ID_NOT_FOUND = "Catalog with ID '{catalog_id}' not found." +CATALOG_NAME_NOT_FOUND = "Catalog '{catalog_name}' not found." +CATALOG_UPDATED = "Successfully {operation} the firmware catalog." + +catalog_info = { + "@odata.context": "/api/$metadata#Collection(UpdateService.Catalogs)", + "@odata.count": 3, + "value": [ + { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(29)", + "Id": 29, + "Filename": "catalog.gz", + "SourcePath": "catalog/catalog.gz", + "Status": "Failed", + "TaskId": 21448, + "BaseLocation": None, + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "AssociatedBaselines": ["abc"], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 19, + "Name": "catalog_http3", + "Description": "catalog desc3", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "HTTP" + } + }, + { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(30)", + "Id": 30, + "Filename": "catalog.gz", + "SourcePath": "catalog/catalog.gz", + "Status": "Failed", + "BaseLocation": None, + "TaskId": 21449, + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 20, + "Name": "catalog_http4", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "HTTP" + } + }, + { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(34)", + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Status": "Completed", + "TaskId": 21453, + "BaseLocation": "downloads.dell.com", + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "BundlesCount": 173, + "PredecessorIdentifier": "aaaaaa", + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 24, + "Name": "catalog_online2", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + ] +} + +catalog_resp = { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(34)", + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Status": "Completed", + "TaskId": 21453, + "BaseLocation": "downloads.dell.com", + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "BundlesCount": 173, + "PredecessorIdentifier": "aaaaaa", + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 24, + "Name": "catalog_online2", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } +} + + +@pytest.fixture +def ome_connection_catalog_mock(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeFirmwareCatalog(FakeAnsibleModule): + module = ome_firmware_catalog + + @pytest.fixture + def mock__get_catalog_payload(self, mocker): + mock_payload = mocker.patch( + MODULE_PATH + '_get_catalog_payload', + return_value={"Repistory": "Dummy val"}) + return mock_payload + + def test_ome_catalog_firmware_main_ome_firmware_catalog_no_mandatory_arg_passed_failuer_case(self, ome_default_args, + module_mock, + mock__get_catalog_payload, + ome_connection_catalog_mock): + result = self._run_module_with_fail_json(ome_default_args) + assert 'catalog_status' not in result + + inp_param1 = {"hostname": "host ip", "username": "username", + "password": "password", "port": 443, "catalog_name": ["catalog_name"]} + inp_param2 = {"hostname": "host ip", "username": "username", + "password": "password", "port": 443, "catalog_name": ["catalog_name"], "catalog_description": "desc", + "source": "10.255.2.128:2607", "source_path": "source_path", "file_name": "file_name", + "repository_type": "HTTPS", + "repository_username": "repository_username", + "repository_password": "repository_password", + "repository_domain": "repository_domain", + "check_certificate": True} + inp_param3 = {"hostname": "host ip", "username": "username", + "password": "password", "port": 443, "catalog_name": " ", "catalog_description": None} + inp_param4 = {"hostname": "host ip", "username": "username", + "password": "password", "port": 443, "catalog_name": ["catalog_name"], "catalog_description": "desc", + "source": "10.255.2.128:2607", "source_path": "source_path", "file_name": "file_name", + "repository_type": "DELL_ONLINE", + "repository_username": "repository_username", + "repository_password": "repository_password", + "repository_domain": "repository_domain", + "check_certificate": True} + inp_param5 = {"hostname": "host ip", "username": "username", + "password": "password", "port": 443, "catalog_name": ["catalog_name"], "catalog_description": "desc", + "source_path": "source_path", "file_name": "file_name", + "repository_type": "DELL_ONLINE", + "repository_username": "repository_username", + "repository_password": "repository_password", + "repository_domain": "repository_domain", + "check_certificate": True} + out1 = {"Repository": {"Name": "catalog_name"}} + out2 = {'Filename': 'file_name', 'SourcePath': 'source_path', + 'Repository': {'Name': 'catalog_name', 'Description': 'desc', + 'Source': '10.255.2.128:2607', 'RepositoryType': 'HTTPS', 'Username': 'repository_username', + 'Password': 'repository_password', 'DomainName': 'repository_domain', + 'CheckCertificate': True}} + + out3 = {"Repository": {"Name": " "}} + out4 = {'Filename': 'file_name', 'SourcePath': 'source_path', + 'Repository': {'Name': 'catalog_name', 'Description': 'desc', + 'Source': '10.255.2.128:2607', 'RepositoryType': 'DELL_ONLINE', + 'CheckCertificate': True}} + out5 = {'Filename': 'file_name', 'SourcePath': 'source_path', + 'Repository': {'Name': 'catalog_name', 'Description': 'desc', + 'Source': 'downloads.dell.com', 'RepositoryType': 'DELL_ONLINE', + 'CheckCertificate': True}} + + @pytest.mark.parametrize("params", [{"inp": inp_param1, "out": out1}, + {"inp": inp_param2, "out": out2}, + {"inp": inp_param3, "out": out3} + ]) + def test_ome_catalog_firmware__get_catalog_payload_success_case(self, params): + payload = self.module._get_catalog_payload(params["inp"], params["inp"]["catalog_name"][0]) + assert payload == params["out"] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_catalog_firmware_ome_catalog_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_catalog_mock, + ome_response_mock): + ome_default_args.update({"state": "absent", "catalog_name": "t1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_existing_catalog', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_existing_catalog', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_existing_catalog', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + + @pytest.mark.parametrize("params", [{"state": "present", "catalog_name": ["catalog_online2"]}, + {"state": "present", "catalog_id": [34]}]) + def test_ome_catalog_firmware_check_existing_catalog_case01(self, params, ome_connection_catalog_mock): + ome_connection_catalog_mock.get_all_items_with_pagination.return_value = {"value": catalog_info["value"]} + f_module = self.get_module_mock(params=params) + catalog, all_catalog = self.module.check_existing_catalog(f_module, ome_connection_catalog_mock, + params["state"]) + assert catalog[0] == { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(34)", + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Status": "Completed", + "TaskId": 21453, + "BaseLocation": "downloads.dell.com", + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "BundlesCount": 173, + "PredecessorIdentifier": "aaaaaa", + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 24, + "Name": "catalog_online2", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + assert all_catalog == {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"} + + @pytest.mark.parametrize("params", + [{"state": "absent", "catalog_name": ["catalog_online2", "catalog_http4"]}, + {"state": "absent", "catalog_id": [34, 30]}]) + def test_ome_catalog_firmware_check_existing_catalog_case02(self, params, ome_connection_catalog_mock): + ome_connection_catalog_mock.get_all_items_with_pagination.return_value = {"value": catalog_info["value"]} + f_module = self.get_module_mock(params=params) + catalog, all_catalog = self.module.check_existing_catalog(f_module, ome_connection_catalog_mock, + params["state"]) + assert catalog == [ + { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(30)", + "Id": 30, + "Filename": "catalog.gz", + "SourcePath": "catalog/catalog.gz", + "Status": "Failed", + "BaseLocation": None, + "TaskId": 21449, + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 20, + "Name": "catalog_http4", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "HTTP" + } + }, + { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(34)", + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Status": "Completed", + "TaskId": 21453, + "BaseLocation": "downloads.dell.com", + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "BundlesCount": 173, + "PredecessorIdentifier": "aaaaaa", + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 24, + "Name": "catalog_online2", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + ] + assert all_catalog == {} + + @pytest.mark.parametrize("params", [{"state": "present", "catalog_name": ["catalog_online2"]}]) + def test_ome_catalog_firmware_check_existing_catalog_case03(self, params, ome_connection_catalog_mock): + ome_connection_catalog_mock.get_all_items_with_pagination.return_value = {"value": catalog_info["value"]} + f_module = self.get_module_mock(params=params) + catalog, all_catalog = self.module.check_existing_catalog(f_module, ome_connection_catalog_mock, + params["state"], + "catalog_online2") + assert catalog[0] == { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(34)", + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Status": "Completed", + "TaskId": 21453, + "BaseLocation": "downloads.dell.com", + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "BundlesCount": 173, + "PredecessorIdentifier": "aaaaaa", + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 24, + "Name": "catalog_online2", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + assert all_catalog == {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"} + + def test_ome_catalog_firmware_get_updated_catalog_info(self, ome_connection_catalog_mock): + resp = { + "@odata.type": "#UpdateService.Catalogs", + "@odata.id": "/api/UpdateService/Catalogs(34)", + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Status": "Completed", + "TaskId": 21453, + "BaseLocation": "downloads.dell.com", + "Schedule": { + "StartTime": None, + "EndTime": None, + "Cron": "startnow" + }, + "BundlesCount": 173, + "PredecessorIdentifier": "aaaaaa", + "AssociatedBaselines": [], + "Repository": { + "@odata.type": "#UpdateService.Repository", + "Id": 24, + "Name": "catalog_online2", + "Description": "catalog desc4", + "Source": "downloads.dell.com", + "DomainName": None, + "Username": None, + "Password": None, + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + f_module = self.get_module_mock(params={"state": "present", "catalog_name": "catalog_online2"}) + ome_connection_catalog_mock.get_all_items_with_pagination.return_value = {"value": catalog_info["value"]} + catalog = self.module.get_updated_catalog_info(f_module, ome_connection_catalog_mock, resp) + assert catalog == resp + + @pytest.mark.parametrize("params", + [{"mparams": {"state": "present", "job_wait_timeout": 10, "job_wait": True, + "catalog_name": ["catalog_online2"]}}]) + @pytest.mark.parametrize("action", + ["created", "modified"]) + def test_ome_catalog_firmware_exit_catalog(self, mocker, ome_connection_catalog_mock, params, action): + ome_connection_catalog_mock.job_tracking.return_value = False, "message" + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + f_module = self.get_module_mock(params=params["mparams"]) + mocker.patch(MODULE_PATH + 'get_updated_catalog_info', return_value=catalog_resp) + msg = CATALOG_UPDATED.format(operation=action) + with pytest.raises(Exception) as err: + self.module.exit_catalog(f_module, ome_connection_catalog_mock, catalog_resp, action, msg) + assert err.value.args[0] == msg + + @pytest.mark.parametrize("params", + [{"mparams": {"state": "present", "job_wait_timeout": 10, "job_wait": False, + "catalog_name": ["catalog_online2"]}}]) + @pytest.mark.parametrize("action", + ["created", "modified"]) + def test_ome_catalog_firmware_exit_catalog2(self, mocker, ome_connection_catalog_mock, params, action): + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + f_module = self.get_module_mock(params=params["mparams"]) + mocker.patch(MODULE_PATH + 'get_updated_catalog_info', return_value=catalog_resp) + msg = CATALOG_UPDATED.format(operation=action) + with pytest.raises(Exception) as err: + self.module.exit_catalog(f_module, ome_connection_catalog_mock, catalog_resp, action, msg) + assert err.value.args[0] == msg + + def test_ome_catalog_firmware_validate_dell_online_case01(self): + all_catalog = {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"} + f_module = self.get_module_mock(params={"catalog_name": ["catalog_online2"]}) + self.module.validate_dell_online(all_catalog, f_module) + + def test_ome_catalog_firmware_validate_dell_online_case02(self): + all_catalog = {"catalog_http4": "HTTP", + "catalog_http3": "HTTP"} + f_module = self.get_module_mock(params={"catalog_name": ["catalog_online2"]}) + self.module.validate_dell_online(all_catalog, f_module) + + def test_ome_catalog_firmware_validate_dell_online_case03(self): + all_catalog = {"catalog_online3": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"} + f_module = self.get_module_mock(params={"catalog_name": ["catalog_online2"]}) + with pytest.raises(Exception) as err: + self.module.validate_dell_online(all_catalog, f_module) + assert err.value.args[0] == DELL_ONLINE_EXISTS.format(catalog_name="catalog_online3") + + def test_ome_catalog_firmware_create_catalog(self, mocker, ome_response_mock, ome_connection_catalog_mock): + f_module = self.get_module_mock(params={"catalog_name": ["catalog_name"]}) + ome_response_mock.json_data = catalog_resp + mocker.patch(MODULE_PATH + 'exit_catalog', return_value=catalog_resp) + self.module.create_catalog(f_module, ome_connection_catalog_mock) + + def test_ome_catalog_firmware_get_current_catalog_settings(self): + payload = self.module.get_current_catalog_settings(catalog_resp) + assert payload == {'Filename': 'catalog.xml', 'SourcePath': 'catalog/catalog.gz', + 'Repository': {'Name': 'catalog_online2', 'Id': 24, 'Description': 'catalog desc4', + 'RepositoryType': 'DELL_ONLINE', 'Source': 'downloads.dell.com', + 'CheckCertificate': False}} + + def test_ome_catalog_firmware_modify_catalog_case01(self, mocker, ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_online2"], "new_catalog_name": "catalog_http3"}) + modify_payload = { + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Repository": { + "Name": "catalog_online2", + "Description": "catalog desc4", + "CheckCertificate": False, + } + } + mocker.patch(MODULE_PATH + '_get_catalog_payload', return_value=modify_payload) + with pytest.raises(Exception) as err: + self.module.modify_catalog(f_module, ome_connection_catalog_mock, [catalog_resp], + {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"}) + assert err.value.args[0] == CATALOG_EXISTS.format(new_name="catalog_http3") + + def test_ome_catalog_firmware_modify_catalog_case02(self, mocker, ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_online2"], "new_catalog_name": "catalog_http10"}) + modify_payload = { + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Repository": { + "Name": "catalog_online2", + "Description": "catalog desc4", + "CheckCertificate": False, + "RepositoryType": "NFS" + } + } + current_payload = { + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Repository": { + "Id": 11, + "Name": "catalog_online2", + "Description": "catalog desc4", + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + mocker.patch(MODULE_PATH + '_get_catalog_payload', return_value=modify_payload) + mocker.patch(MODULE_PATH + 'get_current_catalog_settings', return_value=current_payload) + with pytest.raises(Exception) as err: + self.module.modify_catalog(f_module, ome_connection_catalog_mock, [catalog_resp], + {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"}) + assert err.value.args[0] == "Repository type cannot be changed to another repository type." + + def test_ome_catalog_firmware_modify_catalog_case03(self, mocker, ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_online2"], "new_catalog_name": "catalog_http10"}, check_mode=True) + modify_payload = { + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Repository": { + "Name": "catalog_online2", + "Description": "catalog desc4", + "CheckCertificate": True, + "RepositoryType": "DELL_ONLINE" + } + } + # current_payload = { + # "Id": 34, + # "Filename": "catalog.xml", + # "SourcePath": "catalog/catalog.gz", + # "Repository": { + # "Id": 11, + # "Name": "catalog_online2", + # "Description": "catalog desc4", + # "CheckCertificate": True, + # "RepositoryType": "DELL_ONLINE" + # } + # } + mocker.patch(MODULE_PATH + '_get_catalog_payload', return_value=modify_payload) + with pytest.raises(Exception) as err: + self.module.modify_catalog(f_module, ome_connection_catalog_mock, [catalog_resp], + {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"}) + assert err.value.args[0] == CHECK_MODE_CHANGE_FOUND_MSG + + @pytest.mark.parametrize("check_mode", [True, False]) + def test_ome_catalog_firmware_modify_catalog_case04(self, check_mode, mocker, ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_online2"], "new_catalog_name": "catalog_online2"}, check_mode=check_mode) + modify_payload = { + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Repository": { + "Name": "catalog_online2", + "Description": "catalog desc4", + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + current_payload = { + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Repository": { + "Id": 11, + "Name": "catalog_online2", + "Description": "catalog desc4", + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + mocker.patch(MODULE_PATH + '_get_catalog_payload', return_value=modify_payload) + mocker.patch(MODULE_PATH + 'get_current_catalog_settings', return_value=current_payload) + with pytest.raises(Exception) as err: + self.module.modify_catalog(f_module, ome_connection_catalog_mock, [catalog_resp], + {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"}) + assert err.value.args[0] == CHECK_MODE_CHANGE_NOT_FOUND_MSG + + def test_ome_catalog_firmware_modify_catalog_case05(self, mocker, ome_connection_catalog_mock, ome_response_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_online2"], "new_catalog_name": "catalog_http10"}, check_mode=False) + modify_payload = { + "Id": 34, + "Filename": "catalog.xml", + "SourcePath": "catalog/catalog.gz", + "Repository": { + "Name": "catalog_online2", + "Description": "catalog desc4", + "CheckCertificate": False, + "RepositoryType": "DELL_ONLINE" + } + } + mocker.patch(MODULE_PATH + '_get_catalog_payload', return_value=modify_payload) + ome_response_mock.json_data = catalog_resp + mocker.patch(MODULE_PATH + 'exit_catalog', return_value=None) + self.module.modify_catalog(f_module, ome_connection_catalog_mock, [catalog_resp], + {"catalog_online2": "DELL_ONLINE", "catalog_http4": "HTTP", + "catalog_http3": "HTTP"}) + + def test_ome_catalog_firmware_validate_delete_operation_case1(self, ome_response_mock, ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_http3", "catalog_online2"]}, check_mode=False) + ome_response_mock.json_data = { + "@odata.context": "/api/$metadata#JobService.Job", + "@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10025)", + "Id": 10025, + "JobName": "Default Console Update Execution Task", + "JobDescription": "Default Console Update Execution Task", + "State": "Enabled", + "CreatedBy": "system", + "Targets": [], + "Params": [], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2051, + "Name": "NotRun" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 124, + "Name": "ConsoleUpdateExecution_Task", + "Internal": False + }, + "JobStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2080, + "Name": "New" + }, + } + with pytest.raises(Exception) as err: + self.module.validate_delete_operation(ome_connection_catalog_mock, f_module, catalog_info["value"], [1, 2]) + assert err.value.args[0] == CATALOG_BASELINE_ATTACHED + + def test_ome_catalog_firmware_validate_delete_operation_case2(self, ome_response_mock, ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_http3", "catalog_online2"]}, check_mode=True) + ome_response_mock.json_data = { + "@odata.context": "/api/$metadata#JobService.Job", + "@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10025)", + "Id": 10025, + "JobName": "Default Console Update Execution Task", + "JobDescription": "Default Console Update Execution Task", + "State": "Enabled", + "CreatedBy": "system", + "Targets": [], + "Params": [], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2051, + "Name": "NotRun" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 124, + "Name": "ConsoleUpdateExecution_Task", + "Internal": False + }, + "JobStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2080, + "Name": "New" + }, + } + catalog_info1 = [catalog_resp] + with pytest.raises(Exception) as err: + self.module.validate_delete_operation(ome_connection_catalog_mock, f_module, catalog_info1, [34]) + assert err.value.args[0] == CHECK_MODE_CHANGE_FOUND_MSG + + def test_ome_catalog_firmware_validate_delete_operation_case3(self, ome_response_mock, ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_http3", "catalog_online2"]}, check_mode=False) + ome_response_mock.json_data = { + "@odata.context": "/api/$metadata#JobService.Job", + "@odata.type": "#JobService.Job", + "@odata.id": "/api/JobService/Jobs(10025)", + "Id": 10025, + "JobName": "Default Console Update Execution Task", + "JobDescription": "Default Console Update Execution Task", + "State": "Enabled", + "CreatedBy": "system", + "Targets": [], + "Params": [], + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2051, + "Name": "NotRun" + }, + "JobType": { + "@odata.type": "#JobService.JobType", + "Id": 124, + "Name": "ConsoleUpdateExecution_Task", + "Internal": False + }, + "JobStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2080, + "Name": "New" + }, + } + catalog_info1 = [catalog_resp] + self.module.validate_delete_operation(ome_connection_catalog_mock, f_module, catalog_info1, [34]) + + @pytest.mark.parametrize("params", [ + {"fail_json": True, "json_data": {"JobId": 1234}, + "check_existing_catalog": ([], []), + "mparams": {"state": "present", "job_wait_timeout": 10, "job_wait": False, + "catalog_id": 12, "repository_type": "DELL_ONLINE"}, + 'message': INVALID_CATALOG_ID, "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "check_existing_catalog": ([], []), "check_mode": True, + "mparams": {"state": "present", "job_wait_timeout": 10, "job_wait": False, + "catalog_name": "c1", "repository_type": "HTTPS"}, + 'message': CHECK_MODE_CHANGE_FOUND_MSG, "success": True + } + ]) + def test_main(self, params, ome_connection_catalog_mock, ome_default_args, ome_response_mock, mocker): + mocker.patch(MODULE_PATH + 'check_existing_catalog', return_value=params.get("check_existing_catalog")) + # mocker.patch(MODULE_PATH + '_get_baseline_payload', return_value=params.get("_get_baseline_payload")) + ome_response_mock.success = True + ome_response_mock.json_data = params.get("json_data") + ome_default_args.update(params.get('mparams')) + if params.get("fail_json", False): + result = self._run_module_with_fail_json(ome_default_args) + else: + result = self._run_module(ome_default_args, check_mode=params.get("check_mode", False)) + assert result["msg"] == params['message'] + + @pytest.mark.parametrize("check_mode", [True, False]) + def test_ome_catalog_firmware_validate_delete_operation_case4(self, check_mode, ome_response_mock, + ome_connection_catalog_mock): + f_module = self.get_module_mock( + params={"catalog_name": ["catalog_http3", "catalog_online2"]}, check_mode=check_mode) + with pytest.raises(Exception) as err: + self.module.validate_delete_operation(ome_connection_catalog_mock, f_module, [], []) + assert err.value.args[0] == CHECK_MODE_CHANGE_NOT_FOUND_MSG + + def test_ome_catalog_firmware_delete_catalog(self, mocker, ome_connection_catalog_mock, ome_response_mock): + mocker.patch(MODULE_PATH + 'validate_delete_operation', return_value=None) + ome_response_mock.json_data = [1, 2] + f_module = self.get_module_mock(params={"state": "absent", "catalog_id": [1, 2]}) + with pytest.raises(Exception) as err: + self.module.delete_catalog(f_module, ome_connection_catalog_mock, catalog_info["value"]) + assert err.value.args[0] == CATALOG_DEL_SUCCESS + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_catalog_firmware_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_catalog_mock, ome_response_mock): + ome_default_args.update({"catalog_name": "catalog1", "repository_type": "HTTPS"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'validate_names', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'validate_names', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'validate_names', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + + @pytest.mark.parametrize("param", [{"state": "absent", "catalog_id": [1, 2]}, + {"state": "absent", "catalog_name": ["abc", "xyz"]}]) + def test_ome_catalog_firmware_validate_names(self, param): + f_module = self.get_module_mock(params=param) + self.module.validate_names("absent", f_module) + + @pytest.mark.parametrize("param", [{"state": "present", "catalog_id": [1, 2]}, + {"state": "present", "catalog_name": ["abc", "xyz"]}]) + def test_ome_catalog_firmware_validate_names_exception_case(self, param): + f_module = self.get_module_mock(params=param) + with pytest.raises(Exception) as err: + self.module.validate_names("present", f_module) + assert err.value.args[0] == NAMES_ERROR + + def test_ome_catalog_firmware_argument_exception_case1(self, ome_default_args): + ome_default_args.update({"catalog_name": "t1"}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "state is present but all of the following are missing: repository_type" + + def test_ome_catalog_firmware_argument_exception_case2(self, ome_default_args): + ome_default_args.update({"catalog_id": 1}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "state is present but all of the following are missing: repository_type" + + def test_ome_catalog_firmware_argument_exception_case3(self, ome_default_args): + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "one of the following is required: catalog_name, catalog_id" + + def test_ome_catalog_firmware_argument_exception_case4(self, ome_default_args): + ome_default_args.update({"repository_type": "HTTPS", "catalog_name": "t1", "catalog_id": 1}) + result = self._run_module_with_fail_json(ome_default_args) + assert result["msg"] == "parameters are mutually exclusive: catalog_name|catalog_id" diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py new file mode 100644 index 00000000..6aede932 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.5.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_groups +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MULTIPLE_GROUPS_MSG = "Provide only one unique device group when state is present." +NONEXIST_GROUP_ID = "A device group with the provided ID does not exist." +NONEXIST_PARENT_ID = "A parent device group with the provided ID does not exist." +INVALID_PARENT = "The provided parent device group is not a valid user-defined static device group." +INVALID_GROUPS_DELETE = "Provide valid static device group(s) for deletion." +INVALID_GROUPS_MODIFY = "Provide valid static device group for modification." +PARENT_CREATION_FAILED = "Unable to create a parent device group with the name {pname}." +PARENT_IN_SUBTREE = "The parent group is already under the provided group." +CREATE_SUCCESS = "Successfully {op}d the device group." +GROUP_PARENT_SAME = "Provided parent and the device group cannot be the same." +DELETE_SUCCESS = "Successfully deleted the device group(s)." +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +STATIC_ROOT = 'Static Groups' +SETTLING_TIME = 2 + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_groups.' + + +@pytest.fixture +def ome_connection_mock_for_groups(mocker, ome_response_mock): + connection_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.ome_groups.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeGroups(FakeAnsibleModule): + module = ome_groups + + @pytest.mark.parametrize("params", [ + {"json_data": {"value": [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}]}, + 'message': DELETE_SUCCESS, "success": True, 'mparams': {'name': 'g1', 'state': 'absent'}}, + {"json_data": {"value": [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}]}, + 'message': DELETE_SUCCESS, "success": True, 'mparams': {'name': 'g1', 'state': 'absent'}}, + {"json_data": {"value": [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}]}, + 'message': CHANGES_FOUND, "success": True, 'mparams': {'group_id': 24, 'state': 'absent'}, 'check_mode': True}, + {"json_data": {"value": [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}]}, + 'message': NO_CHANGES_MSG, "success": True, 'mparams': {'name': 'g2', 'state': 'absent'}}, + {"json_data": {"value": [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}]}, + 'message': NO_CHANGES_MSG, "success": True, 'mparams': {'name': 'g2', 'state': 'absent'}, 'check_mode': True}]) + def test_ome_groups_delete(self, params, ome_connection_mock_for_groups, ome_response_mock, ome_default_args, + module_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + ome_connection_mock_for_groups.get_all_items_with_pagination.return_value = params['json_data'] + ome_default_args.update(params['mparams']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("params", [{"json_data": { + "value": [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CREATE_SUCCESS, "success": True, + 'mparams': {'name': 'g1', 'parent_group_name': 'gp1', 'description': 'My group described'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, + {"json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CREATE_SUCCESS, "success": True, + 'mparams': {'name': 'g1', 'parent_group_name': 'gp21', 'description': 'My group described'}, 'return_data': 22, + 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, + {"json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CREATE_SUCCESS, "success": True, + 'mparams': {'name': 'g1', 'parent_group_id': 25, 'description': 'My group described'}, 'return_data': 22, + 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, + {"json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CREATE_SUCCESS, "success": True, + 'mparams': {'name': 'g1', 'parent_group_name': 'Static Groups', 'description': 'My group described'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 1, 'MembershipTypeId': 12}}, + {"json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CREATE_SUCCESS, "success": True, + 'mparams': {'name': 'g1', 'parent_group_id': 1, 'description': 'My group described'}, 'return_data': 22, + 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 1, 'MembershipTypeId': 12}}, + {"json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CHANGES_FOUND, "success": True, + 'mparams': {'name': 'g1', 'parent_group_name': 'gp21', 'description': 'My group described'}, 'return_data': 22, + 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}, 'check_mode': True}]) + def test_ome_groups_create(self, params, ome_connection_mock_for_groups, ome_response_mock, ome_default_args, + module_mock, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['return_data'] + ome_connection_mock_for_groups.get_all_items_with_pagination.return_value = params['json_data'] + ome_connection_mock_for_groups.strip_substr_dict.return_value = params.get('created_group', {}) + mocker.patch(MODULE_PATH + 'get_ome_group_by_id', return_value=params.get('created_group', {})) + mocker.patch(MODULE_PATH + 'create_parent', return_value=params['created_group'].get('ParentId')) + ome_default_args.update(params['mparams']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == (params['message']).format(op='create') + + @pytest.mark.parametrize("params", [{"json_data": { + 'value': [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12, 'description': 'My group described'}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CREATE_SUCCESS, "success": True, + 'mparams': {'name': 'g1', 'new_name': 'j1', 'parent_group_name': 'gp1', 'description': 'description modified'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, { + "json_data": {'value': [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'ParentId': 25, 'MembershipTypeId': 12, + 'description': 'My group described'}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': CHANGES_FOUND, "success": True, + 'mparams': {'name': 'g1', 'parent_group_name': 'gp1', 'description': 'description modified'}, 'return_data': 22, + 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}, 'check_mode': True}, { + "json_data": {'value': [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'ParentId': 25, 'MembershipTypeId': 12, + 'Description': 'My group described'}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': NO_CHANGES_MSG, "success": True, + 'mparams': {'name': 'g1', 'new_name': 'g1', 'parent_group_name': 'gp1', 'description': 'My group described'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 24, 'ParentId': 25, 'MembershipTypeId': 12}, + 'check_mode': True}, ]) + def test_ome_groups_modify(self, params, ome_connection_mock_for_groups, ome_response_mock, ome_default_args, + module_mock, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['return_data'] + ome_connection_mock_for_groups.get_all_items_with_pagination.return_value = params['json_data'] + ome_connection_mock_for_groups.strip_substr_dict.return_value = params.get('created_group', {}) + mocker.patch(MODULE_PATH + 'get_ome_group_by_id', return_value=params.get('created_group', {})) + mocker.patch(MODULE_PATH + 'create_parent', return_value=params['created_group'].get('ParentId')) + # mocker.patch(MODULE_PATH + 'is_parent_in_subtree', return_value=False) + ome_default_args.update(params['mparams']) + result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False)) + assert result['msg'] == (params['message']).format(op='update') + + @pytest.mark.parametrize("params", [{"json_data": { + 'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'g3', 'Id': 12, 'TypeId': 2000, 'MembershipTypeId': 24}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': MULTIPLE_GROUPS_MSG, "success": True, 'mparams': {'name': ['g1', 'g3'], 'parent_group_name': 'gp1', + 'description': 'State present and multiple groups'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, { + "json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'g3', 'Id': 12, 'TypeId': 2000, 'MembershipTypeId': 24}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': NONEXIST_GROUP_ID, "success": True, + 'mparams': {'group_id': 13, 'parent_group_name': 'gp1', 'description': 'State present and no group_id'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, { + "json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'g3', 'Id': 12, 'TypeId': 2000, 'MembershipTypeId': 24}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': INVALID_PARENT, "success": True, + 'mparams': {'name': 'g1', 'parent_group_name': 'g3', 'description': 'State present and invalid parent'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, { + "json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'g3', 'Id': 12, 'TypeId': 2000, 'MembershipTypeId': 24}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': INVALID_GROUPS_DELETE, "success": True, + 'mparams': {'name': ['g1', 'g3'], 'state': 'absent', 'description': 'State absent and invalid group'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, { + "json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': NONEXIST_PARENT_ID, "success": True, + 'mparams': {'name': 'g1', 'parent_group_id': 26, 'description': 'create with non exist parent id'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, { + "json_data": {'value': [{'Name': 'g2', 'Id': 24, 'TypeId': 2000, 'MembershipTypeId': 24}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': INVALID_PARENT, "success": True, + 'mparams': {'name': 'g1', 'parent_group_id': 24, 'description': 'create with non exist parent id'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 26, 'ParentId': 25, 'MembershipTypeId': 12}}, { + "json_data": {'value': [{'Name': 'g1', 'Id': 24, 'TypeId': 2000, 'ParentId': 25, 'MembershipTypeId': 24, + 'Description': 'My group described'}, + {'Name': 'gp1', 'Id': 25, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': INVALID_GROUPS_MODIFY, "success": True, + 'mparams': {'name': 'g1', 'new_name': 'g1', 'parent_group_name': 'gp1', 'description': 'My group described'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 24, 'ParentId': 25, 'MembershipTypeId': 12}, + 'check_mode': True}, + {"json_data": {'value': [{'Name': 'g1', 'Id': 24, 'TypeId': 3000, 'ParentId': 25, 'MembershipTypeId': 12, + 'Description': 'My group described'}, {'Name': 'gp1', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': GROUP_PARENT_SAME, "success": True, + 'mparams': {'name': 'g1', 'new_name': 'g1', 'parent_group_name': 'gp1', 'description': 'My group described'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 24, 'ParentId': 25, 'MembershipTypeId': 12}, + 'check_mode': True}, + {"json_data": {'value': [{'Name': 'x1', 'Id': 24, 'TypeId': 3000, 'ParentId': 25, 'MembershipTypeId': 12, + 'Description': 'My group described'}, + {'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}, + {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12}]}, + 'message': GROUP_PARENT_SAME, "success": True, + 'mparams': {'name': 'g1', 'parent_group_name': 'g1', 'description': 'My group described'}, + 'return_data': 22, 'created_group': {'Name': 'g1', 'Id': 24, 'ParentId': 25, 'MembershipTypeId': 12}, + 'check_mode': True}]) + def test_ome_groups_fail_jsons(self, params, ome_connection_mock_for_groups, ome_response_mock, ome_default_args, + module_mock, mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['return_data'] + ome_connection_mock_for_groups.get_all_items_with_pagination.return_value = params['json_data'] + ome_connection_mock_for_groups.strip_substr_dict.return_value = params.get('created_group', {}) + mocker.patch(MODULE_PATH + 'get_ome_group_by_id', return_value=params.get('created_group', {})) + mocker.patch(MODULE_PATH + 'create_parent', return_value=params['created_group'].get('ParentId')) + ome_default_args.update(params['mparams']) + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("params", [{"json_data": 12, "mparams": {'name': 'g1', 'parent_group_name': 'gp21'}}]) + def test_create_parent(self, params, ome_connection_mock_for_groups, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params['mparams']) + static_root = {'Name': 'Static Groups', 'Id': 1, 'TypeId': 2000, 'MembershipTypeId': 12} + group_id = self.module.create_parent(ome_connection_mock_for_groups, f_module, static_root) + assert group_id == params['json_data'] + + @pytest.mark.parametrize("params", + [{"json_data": {'Name': 'g2', 'Id': 24, 'TypeId': 3000, 'MembershipTypeId': 12}}]) + def test_get_ome_group_by_id(self, params, ome_connection_mock_for_groups, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + group = self.module.get_ome_group_by_id(ome_connection_mock_for_groups, 24) + assert group == params['json_data'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_groups_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_groups, ome_response_mock): + ome_default_args.update({"state": "absent", "name": "t1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'get_valid_groups', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'get_valid_groups', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_valid_groups', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py new file mode 100644 index 00000000..93c18d22 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py @@ -0,0 +1,1346 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import ome_identity_pool +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ssl import SSLError +from io import StringIO +from ansible.module_utils._text import to_text +import json + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_mock_for_identity_pool(mocker, ome_response_mock): + connection_class_mock = mocker.patch( + MODULE_PATH + 'ome_identity_pool.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMeIdentityPool(FakeAnsibleModule): + module = ome_identity_pool + + def test_main_ome_identity_pool_success_case1(self, mocker, ome_default_args, + ome_connection_mock_for_identity_pool, ome_response_mock): + sub_param = {"pool_name": "pool1", + "pool_description": "Identity pool with ethernet and fcoe settings", + "ethernet_settings": { + "starting_mac_address": "50-50-50-50-50-00", + "identity_count": 60}, + "fcoe_settings": { + "starting_mac_address": "70-70-70-70-70-00", + "identity_count": 75 + }, + "iscsi_settings": { + "identity_count": 30, + "initiator_config": { + "iqn_prefix": "iqn.myprefix." + }, + "initiator_ip_pool_settings": { + "gateway": "192.168.4.1", + "ip_range": "10.33.0.1-10.33.0.255", + "primary_dns_server": "10.8.8.8", + "secondary_dns_server": "8.8.8.8", + "subnet_mask": "255.255.255.0" + }, + "starting_mac_address": "60:60:60:60:60:00" + }, + "fc_settings": { + "identity_count": 45, + "starting_address": "10-10-10-10-10-10" + } + } + message_return = {"msg": "Successfully created an identity pool.", + "result": {"Id": 36, "IsSuccessful": True, "Issues": []}} + mocker.patch(MODULE_PATH + 'ome_identity_pool.pool_create_modify', + return_value=message_return) + ome_default_args.update(sub_param) + result = self.execute_module(ome_default_args) + assert result['changed'] is True + assert 'pool_status' in result and "msg" in result + assert result["msg"] == "Successfully created an identity pool." + assert result['pool_status'] == { + "Id": 36, + "IsSuccessful": True, + "Issues": [] + } + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_main_ome_identity_pool_failure_case1(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_identity_pool, ome_response_mock): + ome_default_args.update({"pool_name": "pool1"}) + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'ome_identity_pool.pool_create_modify', + side_effect=exc_type("ansible.module_utils.urls.open_url error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'ome_identity_pool.pool_create_modify', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'ome_identity_pool.pool_create_modify', + side_effect=exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'pool_status' not in result + assert 'msg' in result + + def test_main_ome_identity_pool_no_mandatory_arg_passed_failure_case(self, ome_default_args, + ome_connection_mock_for_identity_pool): + result = self._run_module_with_fail_json(ome_default_args) + assert 'pool_status' not in result + + @pytest.mark.parametrize("param", [{"ethernet_settings": {"invalid_key": "value"}}, + {"fcoe_settings": {"invalid_key": "value"}}, + {"iscsi_settings": {"invalid_key": "value"}}, + {"iscsi_settings": {"initiator_config": {"invalid_key": "value"}}}, + {"iscsi_settings": {"initiator_ip_pool_settings": {"gateway1": "192.168.4.1"}}}, + {"iscsi_settings": { + "initiator_ip_pool_settings": {"primary_dns_server": "192.168.4.1", + "ip_range1": "value"}}}, + {"fc_settings": {"invalid_key": "value"}}, + {"name": "name1"}]) + def test_main_ome_identity_pool_invalid_settings(self, param, ome_default_args, + ome_connection_mock_for_identity_pool): + ome_default_args.update(param) + result = self._run_module_with_fail_json(ome_default_args) + assert 'pool_status' not in result + + @pytest.mark.parametrize("action", ["create", "modify"]) + def test_get_success_message(self, action): + json_data = { + "Id": 36, + "IsSuccessful": True, + "Issues": [] + } + message = self.module.get_success_message(action, json_data) + if action == "create": + assert message["msg"] == "Successfully created an identity pool." + else: + assert message["msg"] == "Successfully modified the identity pool." + assert message["result"] == { + "Id": 36, + "IsSuccessful": True, + "Issues": [] + } + + def test_pool_create_modify_success_case_01(self, mocker, ome_connection_mock_for_identity_pool, ome_response_mock): + params = {"pool_name": "pool_name"} + mocker.patch( + MODULE_PATH + 'ome_identity_pool.validate_modify_create_payload') + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_identity_pool_id_by_name', + return_value=(10, {"paylaod": "value"})) + mocker.patch(MODULE_PATH + 'ome_identity_pool.get_payload', + return_value={"Name": "name"}) + mocker.patch(MODULE_PATH + 'ome_identity_pool.get_success_message', + return_value={"msg": "Successfully modified the identity pool"}) + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_updated_modify_payload') + mocker.patch(MODULE_PATH + 'ome_identity_pool.compare_nested_dict', + return_value=False) + f_module = self.get_module_mock(params=params) + message = self.module.pool_create_modify(f_module, ome_connection_mock_for_identity_pool) + assert message == {"msg": "Successfully modified the identity pool"} + + def test_pool_create_modify_success_case_02(self, mocker, ome_connection_mock_for_identity_pool, ome_response_mock): + params = {"pool_name": "pool_name"} + mocker.patch( + MODULE_PATH + 'ome_identity_pool.validate_modify_create_payload') + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_identity_pool_id_by_name', + return_value=(0, None)) + mocker.patch(MODULE_PATH + 'ome_identity_pool.get_payload', + return_value={"Name": "name"}) + mocker.patch(MODULE_PATH + 'ome_identity_pool.get_success_message', + return_value={"msg": "Successfully created an identity pool"}) + f_module = self.get_module_mock(params=params) + message = self.module.pool_create_modify(f_module, ome_connection_mock_for_identity_pool) + assert message == {"msg": "Successfully created an identity pool"} + + def test_pool_create_modify_success_case_03(self, mocker, ome_connection_mock_for_identity_pool, ome_response_mock): + params = {"pool_name": "pool_name"} + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_identity_pool_id_by_name', + return_value=(10, {"payload": "value"})) + mocker.patch(MODULE_PATH + 'ome_identity_pool.get_payload', + return_value={"Name": "pool1"}) + mocker.patch(MODULE_PATH + 'ome_identity_pool.get_success_message', + return_value={"msg": "Successfully modified the identity pool"}) + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_updated_modify_payload') + mocker.patch(MODULE_PATH + 'ome_identity_pool.compare_nested_dict', + return_value=True) + f_module = self.get_module_mock(params=params) + with pytest.raises(Exception) as exc: + self.module.pool_create_modify(f_module, ome_connection_mock_for_identity_pool) + return exc.value.args[0] == "No changes are made to the specified pool name: pool1, as" \ + " as the entered values are the same as the current configuration." + + def test_get_payload_create_case01(self): + params = {"pool_name": "pool1", + "pool_description": "Identity pool with ethernet and fcoe settings", + "ethernet_settings": { + "starting_mac_address": "50-50-50-50-50-00", + "identity_count": 60}, + "fcoe_settings": { + "starting_mac_address": "70-70-70-70-70-00", + "identity_count": 75 + } + } + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module) + assert payload == { + "Name": "pool1", + "Description": "Identity pool with ethernet and fcoe settings", + "EthernetSettings": {"Mac": { + "StartingMacAddress": "UFBQUFAA", + "IdentityCount": 60}}, + "FcoeSettings": {"Mac": { + "StartingMacAddress": "cHBwcHAA", + "IdentityCount": 75}}, + } + + def test_get_payload_create_case02(self): + """new_pool_name should be ignored for create action""" + params = {"pool_name": "pool1", + "new_pool_name": "pool2", + "pool_description": "Identity pool with ethernet and fcoe settings", + "ethernet_settings": { + "starting_mac_address": "50-50-50-50-50-00", + "identity_count": 60}, + "fcoe_settings": { + "starting_mac_address": "70-70-70-70-70-00", + "identity_count": 75 + } + } + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module) + assert payload == { + "Name": "pool1", + "Description": "Identity pool with ethernet and fcoe settings", + "EthernetSettings": {"Mac": { + "StartingMacAddress": "UFBQUFAA", + "IdentityCount": 60}}, + "FcoeSettings": {"Mac": { + "StartingMacAddress": "cHBwcHAA", + "IdentityCount": 75}}, + } + assert payload["Name"] == "pool1" + + def test_get_payload_create_case03(self): + """new_pool_name should be ignored for create action""" + params = { + "ethernet_settings": { + "identity_count": 60, + "starting_mac_address": "50:50:50:50:50:00" + }, + "fc_settings": { + "identity_count": 45, + "starting_address": "10-10-10-10-10-10" + }, + "fcoe_settings": { + "identity_count": 75, + "starting_mac_address": "aabb.ccdd.7070" + }, + "hostname": "192.168.0.1", + "iscsi_settings": { + "identity_count": 30, + "initiator_config": { + "iqn_prefix": "iqn.myprefix." + }, + "initiator_ip_pool_settings": { + "gateway": "192.168.4.1", + "ip_range": "10.33.0.1-10.33.0.255", + "primary_dns_server": "10.8.8.8", + "secondary_dns_server": "8.8.8.8", + "subnet_mask": "255.255.255.0" + }, + "starting_mac_address": "60:60:60:60:60:00" + }, + "new_pool_name": None, + "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "pool_description": "Identity pool with Ethernet, FCoE, ISCSI and FC settings", + "pool_name": "pool1", + "port": 443, + "state": "present", + "username": "admin" + } + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module) + assert payload == { + "Name": "pool1", + "Description": "Identity pool with Ethernet, FCoE, ISCSI and FC settings", + "EthernetSettings": { + "Mac": { + "IdentityCount": 60, + "StartingMacAddress": "UFBQUFAA" + } + }, + "IscsiSettings": { + "Mac": { + "IdentityCount": 30, + "StartingMacAddress": "YGBgYGAA" + }, + "InitiatorConfig": { + "IqnPrefix": "iqn.myprefix." + }, + "InitiatorIpPoolSettings": { + "IpRange": "10.33.0.1-10.33.0.255", + "SubnetMask": "255.255.255.0", + "Gateway": "192.168.4.1", + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + }, + "FcoeSettings": { + "Mac": { + "IdentityCount": 75, + "StartingMacAddress": "qrvM3XBw" + } + }, + "FcSettings": { + "Wwnn": { + "IdentityCount": 45, + "StartingAddress": "IAAQEBAQEBA=" + }, + "Wwpn": { + "IdentityCount": 45, + "StartingAddress": "IAEQEBAQEBA=" + } + } + } + assert payload["FcSettings"]["Wwnn"] == {"IdentityCount": 45, "StartingAddress": "IAAQEBAQEBA="} + assert payload["FcSettings"]["Wwpn"] == {"IdentityCount": 45, "StartingAddress": "IAEQEBAQEBA="} + assert payload["IscsiSettings"]["Mac"] == {"IdentityCount": 30, "StartingMacAddress": "YGBgYGAA"} + assert payload["IscsiSettings"]["InitiatorIpPoolSettings"] == { + "IpRange": "10.33.0.1-10.33.0.255", + "SubnetMask": "255.255.255.0", + "Gateway": "192.168.4.1", + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + assert payload["IscsiSettings"]["InitiatorConfig"] == { + "IqnPrefix": "iqn.myprefix." + } + + @pytest.mark.parametrize("state", ["create", "modify"]) + def test_get_payload_create_modify_case04(self, state): + """new_pool_name should be ignored for create action""" + params = {"pool_name": "pool3", + "new_pool_name": "pool4", + "pool_description": "Identity pool with iscsi", + "iscsi_settings": { + "identity_count": 30, + "initiator_config": { + "iqn_prefix": "iqn.myprefix." + }, + "initiator_ip_pool_settings": { + "gateway": "192.168.4.1", + "ip_range": "20.33.0.1-20.33.0.255", + "primary_dns_server": "10.8.8.8", + "secondary_dns_server": "8.8.8.8", + "subnet_mask": "255.255.255.0" + }, + "starting_mac_address": "10:10:10:10:10:00" + } + } + f_module = self.get_module_mock(params=params) + if state == "create": + payload = self.module.get_payload(f_module) + else: + payload = self.module.get_payload(f_module, 11) + assert "FcSettings" not in payload + assert "FcoeSettings" not in payload + assert payload["IscsiSettings"]["Mac"] == {"IdentityCount": 30, "StartingMacAddress": "EBAQEBAA"} + assert payload["IscsiSettings"]["InitiatorIpPoolSettings"] == { + "IpRange": "20.33.0.1-20.33.0.255", + "SubnetMask": "255.255.255.0", + "Gateway": "192.168.4.1", + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + assert payload["IscsiSettings"]["InitiatorConfig"] == { + "IqnPrefix": "iqn.myprefix." + } + if state == "create": + assert payload["Name"] == "pool3" + assert "Id" not in payload + else: + assert payload["Name"] == "pool4" + assert payload["Id"] == 11 + + @pytest.mark.parametrize("state", ["create", "modify"]) + def test_get_payload_create_case05(self, state): + """new_pool_name should be ignored for create action and considered in modify""" + params = {"pool_name": "pool3", + "new_pool_name": "pool4", + "pool_description": "Identity pool with iscsi", + "fc_settings": { + "identity_count": 48, + "starting_address": "40:40:40:40:40:22" + } + } + f_module = self.get_module_mock(params=params) + if state == "create": + payload = self.module.get_payload(f_module) + else: + payload = self.module.get_payload(f_module, 11) + return_setting = { + "Name": "pool2", + "Description": "Identity pool with fc_settings", + "EthernetSettings": None, + "IscsiSettings": None, + "FcoeSettings": None, + "FcSettings": { + "Wwnn": { + "IdentityCount": 48, + "StartingAddress": "IABAQEBAQCI=" + }, + "Wwpn": { + "IdentityCount": 48, + "StartingAddress": "IAFAQEBAQCI=" + } + } + } + + assert payload["FcSettings"]["Wwnn"]["StartingAddress"] == "IABAQEBAQCI=" + assert payload["FcSettings"]["Wwpn"]["StartingAddress"] == "IAFAQEBAQCI=" + assert payload["FcSettings"]["Wwnn"]["IdentityCount"] == 48 + assert payload["FcSettings"]["Wwpn"]["IdentityCount"] == 48 + if state == "create": + assert payload["Name"] == "pool3" + assert "Id" not in payload + else: + assert payload["Name"] == "pool4" + assert payload["Id"] == 11 + + def test_get_payload_create_case06(self): + params = {"pool_name": "pool1", + "pool_description": "Identity pool with ethernet and fcoe settings", + "fcoe_settings": {"starting_mac_address": "70-70-70-70-70-00", + "identity_count": 75 + }} + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module) + assert payload["Name"] == "pool1" + assert "Id" not in payload + assert "FcoeSettings" in payload + assert "Ethernet_Settings" not in payload + + @pytest.mark.parametrize("state", ["create", "modify"]) + def test_get_payload_create_case07(self, state): + # case when new_pool_name not passed + params = {"pool_name": "pool1", + "pool_description": "Identity pool with ethernet and fcoe settings"} + f_module = self.get_module_mock(params=params) + if state == "create": + payload = self.module.get_payload(f_module, None) + else: + payload = self.module.get_payload(f_module, 11) + assert payload["Name"] == "pool1" + if state == "modify": + assert "Id" in payload + else: + assert "Id" not in payload + assert "FcoeSettings" not in payload + assert "Ethernet_Settings" not in payload + assert "Ethernet_Settings" not in payload + assert "Ethernet_Settings" not in payload + + def test_get_payload_modify_case01(self): + """moify action Name should be updated with ne_pool_name and Id has to be updated""" + params = {"pool_name": "pool1", + "new_pool_name": "pool2", + "pool_description": "Identity pool with ethernet and fcoe settings", + "ethernet_settings": {"starting_mac_address": "50-50-50-50-50-00", + "identity_count": 60}, + "fcoe_settings": { + "starting_mac_address": "70-70-70-70-70-00", + "identity_count": 75 + } + } + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module, 10) + assert payload == { + "Id": 10, + "Name": "pool2", + "Description": "Identity pool with ethernet and fcoe settings", + "EthernetSettings": {"Mac": { + "StartingMacAddress": "UFBQUFAA", + "IdentityCount": 60}}, + "FcoeSettings": {"Mac": { + "StartingMacAddress": "cHBwcHAA", + "IdentityCount": 75}}, + } + assert payload["Name"] == "pool2" + assert payload["Id"] == 10 + + def test_get_payload_modify_case02(self): + """payload for only ethernet setting + if ne_ppol_name not passed payload Name should be updated with I(pool_name) + """ + params = {"pool_name": "pool1", + "pool_description": "Identity pool with ethernet and fcoe settings", + "ethernet_settings": {"starting_mac_address": "50-50-50-50-50-00", + "identity_count": 60 + } + } + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module, 10) + assert payload["Name"] == "pool1" + assert payload["Id"] == 10 + assert "FcoeSettings" not in payload + assert "EthernetSettings" in payload + assert payload == {'Description': 'Identity pool with ethernet and fcoe settings', + 'Name': 'pool1', + 'Id': 10, + 'EthernetSettings': { + 'Mac': + {'StartingMacAddress': 'UFBQUFAA', 'IdentityCount': 60 + } + } + } + + def test_get_payload_modify_case03(self): + params = {"pool_name": "pool1", "new_pool_name": "pool2"} + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module, 11) + assert payload["Name"] == "pool2" + assert payload["Id"] == 11 + assert "Description" not in payload + assert "FcoeSettings" not in payload + assert "Ethernet_Settings" not in payload + + def test_get_payload_modify_case04(self): + """check case when I(new_pool_name) is empty string + ome is accepting it""" + params = {"pool_name": "pool1", "new_pool_name": ""} + f_module = self.get_module_mock(params=params) + payload = self.module.get_payload(f_module, 11) + assert payload["Name"] == "" + assert payload["Id"] == 11 + assert "Description" not in payload + assert "FcoeSettings" not in payload + assert "Ethernet_Settings" not in payload + + def test_update_mac_settings_case_01(self): + f_module = self.get_module_mock() + settings_params = {"starting_mac_address": "70-70-70-70-70-00", "identity_count": 10} + payload = {"Name": "pool1"} + self.module.update_mac_settings(payload, settings_params, "Ethernet_Settings", f_module) + assert payload == { + "Name": "pool1", + "Ethernet_Settings": {"Mac": {"StartingMacAddress": "cHBwcHAA", "IdentityCount": 10}} + } + + def test_update_mac_settings_case_02(self): + f_module = self.get_module_mock() + settings_params = {"starting_mac_address": "70-70-70-70-70-xx", "identity_count": 10} + payload = {"Name": "pool1"} + with pytest.raises(Exception) as exc: + self.module.update_mac_settings(payload, settings_params, "EthernetSettings", f_module) + assert exc.value.args[0] == "Please provide the valid MAC address format for Ethernet settings." + + def test_update_mac_settings_case_03(self): + """case when no sub settting exists""" + settings_params = {} + payload = {"Name": "pool1"} + f_module = self.get_module_mock() + self.module.update_mac_settings(payload, settings_params, "Ethernet_Settings", f_module) + assert payload == { + "Name": "pool1" + } + + def test_get_identity_pool_id_by_name_exist_case(self, mocker, ome_connection_mock_for_identity_pool, + ome_response_mock): + pool_list = {"resp_obj": ome_response_mock, "report_list": [{"Name": "pool1", "Id": 10}, + {"Name": "pool11", "Id": 11}]} + ome_connection_mock_for_identity_pool.get_all_report_details.return_value = pool_list + pool_id, attributes = self.module.get_identity_pool_id_by_name("pool1", ome_connection_mock_for_identity_pool) + assert pool_id == 10 + + def test_get_identity_pool_id_by_name_non_exist_case(self, mocker, ome_connection_mock_for_identity_pool, + ome_response_mock): + pool_list = {"resp_obj": ome_response_mock, "report_list": [{"Name": "pool2", "Id": 10}]} + ome_connection_mock_for_identity_pool.get_all_report_details.return_value = pool_list + pool_id, attributes = self.module.get_identity_pool_id_by_name("pool1", ome_connection_mock_for_identity_pool) + assert pool_id == 0 and attributes is None + + def test_compare_payload_attributes_false_case_for_dummy_pool_setting(self): + """this put opeartion always gives success result without applying + changes because identity count is not passed as pat of it""" + modify_setting_payload = {'Name': 'pool4', 'EthernetSettings': {'Mac': {'StartingMacAddress': 'qrvM3e6q'}}, + 'Id': 33} + existing_setting_payload = { + "@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(33)", + "Id": 33, + "Name": "pool4", + "Description": None, + "CreatedBy": "admin", + "CreationTime": "2020-01-31 14:53:18.59163", + "LastUpdatedBy": "admin", + "LastUpdateTime": "2020-01-31 15:22:08.34596", + "EthernetSettings": None, + "IscsiSettings": None, + "FcoeSettings": None, + "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(33)/UsageCounts" + }, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(33)/UsageIdentitySets" + } + val = self.module.compare_nested_dict(modify_setting_payload, existing_setting_payload) + assert val is False + + @pytest.mark.parametrize("modify_payload", + [{"Description": "Identity pool with ethernet and fcoe settings2"}, {"Name": "pool2"}, + {"EthernetSettings": {"Mac": {"IdentityCount": 61, "StartingMacAddress": "UFBQUFAA"}}}, + {"EthernetSettings": {"Mac": {"IdentityCount": 60, "StartingMacAddress": "qrvM3e6q"}}}, + {"FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "abcdfe"}}}, + {"FcoeSettings": {"Mac": {"IdentityCount": 71, "StartingMacAddress": "cHBwcHAA"}}}, + {"EthernetSettings": {"Mac": {"IdentityCount": 60, "StartingMacAddress": "cHBwcHAA"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "qrvM3e6q"}}}, + {"Description": "Identity pool with ethernet and fcoe settings2", + "EthernetSettings": {"Mac": {"IdentityCount": 60, "StartingMacAddress": "UFBQUFAA"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}}]) + def test_compare_payload_attributes_case_false(self, modify_payload): + """case when chages are exists and payload can be used for modify opeartion""" + modify_setting_payload = modify_payload + existing_setting_payload = { + "@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(23)", + "Id": 23, + "Name": "pool1", + "Description": "Identity pool with ethernet and fcoe settings1", + "CreatedBy": "admin", + "CreationTime": "2020-01-31 09:28:16.491424", + "LastUpdatedBy": "admin", + "LastUpdateTime": "2020-01-31 09:49:59.012549", + "EthernetSettings": { + "Mac": { + "IdentityCount": 60, + "StartingMacAddress": "UFBQUFAA" + } + }, + "IscsiSettings": None, + "FcoeSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "cHBwcHAA" + } + }, + "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(23)/UsageCounts" + }, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(23)/UsageIdentitySets" + } + val = self.module.compare_nested_dict(modify_setting_payload, existing_setting_payload) + assert val is False + + @pytest.mark.parametrize("modify_payload", [ + {"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "qrvM3e6q"}}}, + {"Name": "pool1", "EthernetSettings": {"Mac": {"IdentityCount": 70}}}, + {"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "qrvM3e6q"}}}, + {"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "qrvM3e6q"}}, + "FcoeSettings": {"Mac": {"StartingMacAddress": "cHBwcHAA"}}}, + {"EthernetSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "qrvM3e6q"}}}, + {"Description": "Identity pool with ethernet setting"}, + {"Name": "pool1"}, + {"FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}}, + {"EthernetSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "qrvM3e6q"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}}, + {"Description": "Identity pool with ethernet setting", + "EthernetSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "qrvM3e6q"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}}]) + def test_compare_payload_attributes_case_true(self, modify_payload): + """setting values are same as existing payload and no need to apply the changes again""" + modify_setting_payload = modify_payload + existing_setting_payload = { + "@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)", + "Id": 30, + "Name": "pool1", + "Description": "Identity pool with ethernet setting", + "CreatedBy": "admin", + "CreationTime": "2020-01-31 11:31:13.621182", + "LastUpdatedBy": "admin", + "LastUpdateTime": "2020-01-31 11:34:28.00876", + "EthernetSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "qrvM3e6q" + } + }, + "IscsiSettings": None, + "FcoeSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "cHBwcHAA" + } + }, + "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)/UsageCounts" + }, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(30)/UsageIdentitySets" + } + val = self.module.compare_nested_dict(modify_setting_payload, existing_setting_payload) + assert val is True + + def test_get_updated_modify_payload_case_01(self): + """when setting not exists in current requested payload, update payload from existing setting value if exists""" + payload = {"Name": "pool1"} + existing_setting_payload = { + "@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)", + "Id": 30, + "Name": "pool1", + "Description": "Identity pool with ethernet setting", + "CreatedBy": "admin", + "CreationTime": "2020-01-31 11:31:13.621182", + "LastUpdatedBy": "admin", + "LastUpdateTime": "2020-01-31 11:34:28.00876", + "EthernetSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "qrvM3e6q" + } + }, + "IscsiSettings": None, + "FcoeSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "cHBwcHAA" + } + }, + "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)/UsageCounts" + }, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(30)/UsageIdentitySets" + } + payload = self.module.get_updated_modify_payload(payload, existing_setting_payload) + assert payload["Description"] == "Identity pool with ethernet setting" + assert payload["EthernetSettings"]["Mac"]["IdentityCount"] == 70 + assert payload["EthernetSettings"]["Mac"]["StartingMacAddress"] == "qrvM3e6q" + assert payload["FcoeSettings"]["Mac"]["IdentityCount"] == 70 + assert payload["FcoeSettings"]["Mac"]["StartingMacAddress"] == "cHBwcHAA" + + def test_get_updated_modify_payload_case_02(self): + """when setting exists in current requested payload, do not + update payload from existing setting value if exists""" + payload = {"Name": "pool1", "EthernetSettings": {"Mac": {"IdentityCount": 55, "StartingMacAddress": "abcd"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 65, "StartingMacAddress": "xyz"}}} + existing_setting_payload = { + "@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)", + "Id": 30, + "Name": "pool1", + "Description": "Identity pool with ethernet setting", + "CreatedBy": "admin", + "CreationTime": "2020-01-31 11:31:13.621182", + "LastUpdatedBy": "admin", + "LastUpdateTime": "2020-01-31 11:34:28.00876", + "EthernetSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "qrvM3e6q" + } + }, + "IscsiSettings": None, + "FcoeSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "cHBwcHAA" + } + }, + "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)/UsageCounts" + }, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(30)/UsageIdentitySets" + } + payload = self.module.get_updated_modify_payload(payload, existing_setting_payload) + assert payload["Description"] == "Identity pool with ethernet setting" + assert payload["EthernetSettings"]["Mac"]["IdentityCount"] == 55 + assert payload["EthernetSettings"]["Mac"]["StartingMacAddress"] == "abcd" + assert payload["FcoeSettings"]["Mac"]["IdentityCount"] == 65 + assert payload["FcoeSettings"]["Mac"]["StartingMacAddress"] == "xyz" + + def test_get_updated_modify_payload_case_03(self): + """update new description""" + payload = {"Name": "pool1", "Description": "new description"} + existing_setting_payload = { + "@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)", + "Id": 30, + "Name": "pool1", + "Description": "Identity pool with ethernet setting", + "CreatedBy": "admin", + "CreationTime": "2020-01-31 11:31:13.621182", + "LastUpdatedBy": "admin", + "LastUpdateTime": "2020-01-31 11:34:28.00876", + "EthernetSettings": None, + "IscsiSettings": None, + "FcoeSettings": None, + "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)/UsageCounts" + }, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(30)/UsageIdentitySets" + } + payload = self.module.get_updated_modify_payload(payload, existing_setting_payload) + assert payload["Description"] == "new description" + assert "EthernetSettings" not in payload + assert "FcoeSettings" not in payload + + def test_get_updated_modify_payload_case_04(self): + """update remaining parameter of ethernet and fcoe setting + if not exists in payload but exists in existing setting payload""" + payload = {"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "abcd"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 65}}} + existing_setting_payload = { + "@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)", + "Id": 30, + "Name": "pool1", + "Description": "Identity pool with ethernet setting", + "CreatedBy": "admin", + "CreationTime": "2020-01-31 11:31:13.621182", + "LastUpdatedBy": "admin", + "LastUpdateTime": "2020-01-31 11:34:28.00876", + "EthernetSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "qrvM3e6q" + } + }, + "IscsiSettings": None, + "FcoeSettings": { + "Mac": { + "IdentityCount": 70, + "StartingMacAddress": "cHBwcHAA" + } + }, + "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)/UsageCounts" + }, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(30)/UsageIdentitySets" + } + payload = self.module.get_updated_modify_payload(payload, existing_setting_payload) + assert payload["Description"] == "Identity pool with ethernet setting" + assert payload["EthernetSettings"]["Mac"]["IdentityCount"] == 70 + assert payload["EthernetSettings"]["Mac"]["StartingMacAddress"] == "abcd" + assert payload["FcoeSettings"]["Mac"]["IdentityCount"] == 65 + assert payload["FcoeSettings"]["Mac"]["StartingMacAddress"] == "cHBwcHAA" + + def test_get_updated_modify_payload_case_05(self): + """update remaining parameter of ethernet and fcoe setting will be null if not exists in existing payload""" + payload = {"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "abcd"}}, } + existing_setting_payload = {"@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)", "Id": 30, + "Name": "pool1", + "Description": "Identity pool with ethernet setting", "CreatedBy": "admin", + "CreationTime": "2020-01-31 11:31:13.621182", + "LastUpdatedBy": "admin", "LastUpdateTime": "2020-01-31 11:34:28.00876", + "EthernetSettings": {"Mac": {"StartingMacAddress": "qrvM3e6q"}}, + "IscsiSettings": None, + "FcoeSettings": {"Mac": {"StartingMacAddress": "cHBwcHAA"}}, "FcSettings": None, + "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)/UsageCounts"}, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(30)/UsageIdentitySets"} + payload = self.module.get_updated_modify_payload(payload, existing_setting_payload) + assert payload["Description"] == "Identity pool with ethernet setting" + assert payload["EthernetSettings"]["Mac"]["StartingMacAddress"] == "abcd" + assert "IdentityCount" not in payload["EthernetSettings"]["Mac"] + + @pytest.mark.parametrize("setting", ["EthernetSettings", "FcoeSettings"]) + def test_get_updated_modify_payload_case_06(self, setting): + modify_payload = {"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "abcd"}}, } + existing_payload = {"@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(35)", + "Id": 35, "Name": "pool1", + "Description": "Identity pool with ethernet and fcoe settings1", + "CreatedBy": "admin", "CreationTime": "2020-02-01 07:55:59.923838", + "LastUpdatedBy": "admin", "LastUpdateTime": "2020-02-01 07:55:59.923838", + "EthernetSettings": {"Mac": {"IdentityCount": 60, "StartingMacAddress": "UFBQUFAA"}}, + "IscsiSettings": None, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}, + "FcSettings": None, + "UsageCounts": {"@odata.id": "/api/IdentityPoolService/IdentityPools(35)/UsageCounts"}, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(35)/UsageIdentitySets"} + modify_payload = self.module.get_updated_modify_payload(modify_payload, existing_payload) + assert modify_payload["EthernetSettings"]["Mac"]["StartingMacAddress"] == "abcd" + assert modify_payload["EthernetSettings"]["Mac"]["IdentityCount"] == 60 + assert modify_payload["FcoeSettings"]["Mac"]["StartingMacAddress"] == "cHBwcHAA" + assert modify_payload["FcoeSettings"]["Mac"]["IdentityCount"] == 70 + + @pytest.mark.parametrize("setting", ["EthernetSettings", "FcoeSettings"]) + def test_update_modify_setting_case_success(self, setting): + modify_payload = {"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "abcd"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 55}}} + existing_payload = {"@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(35)", + "Id": 35, "Name": "pool1", + "Description": "Identity pool with ethernet and fcoe settings1", + "CreatedBy": "admin", "CreationTime": "2020-02-01 07:55:59.923838", + "LastUpdatedBy": "admin", "LastUpdateTime": "2020-02-01 07:55:59.923838", + "EthernetSettings": {"Mac": {"IdentityCount": 60, "StartingMacAddress": "UFBQUFAA"}}, + "IscsiSettings": None, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}, + "FcSettings": None, + "UsageCounts": {"@odata.id": "/api/IdentityPoolService/IdentityPools(35)/UsageCounts"}, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(35)/UsageIdentitySets"} + if setting == "EthernetSettings": + self.module.update_modify_setting(modify_payload, existing_payload, setting, ["Mac"]) + assert modify_payload["EthernetSettings"]["Mac"]["StartingMacAddress"] == "abcd" + assert modify_payload["EthernetSettings"]["Mac"]["IdentityCount"] == 60 + else: + self.module.update_modify_setting(modify_payload, existing_payload, setting, ["Mac"]) + assert modify_payload["FcoeSettings"]["Mac"]["StartingMacAddress"] == "cHBwcHAA" + assert modify_payload["FcoeSettings"]["Mac"]["IdentityCount"] == 55 + + @pytest.mark.parametrize("mac_address", + ['50-50-50-50-50-50', '50:50:50:50:50:50', '5050.5050.5050', 'ab:cd:ef:70:80:70', + 'aabb.ccdd.7070']) + def test_mac_validation_match_case(self, mac_address): + """valid MAC address formats""" + match = self.module.mac_validation(mac_address) + assert match is not None + + @pytest.mark.parametrize("mac_address", ['50--50--50--50--50-50', + '50::50::50::50::50::50', + '5050..5050..5050', + 'ab/cd/ef/70/80/70', + '50-50:50.50-50-50', + 'xy:gh:yk:lm:30:10', + '50-50-50-50-50', + '50-50-50-50-50-50-50-50']) + def test_mac_validation_match_case(self, mac_address): + match = self.module.mac_validation(mac_address) + assert match is None + + @pytest.mark.parametrize("mac_address_base64_map", [{'50-50-50-50-50-50': 'UFBQUFBQ'}, + {'50:50:50:50:50:50': 'UFBQUFBQ'}, + {'5050.5050.5050': 'UFBQUFBQ'}, + {'ab:cd:ef:70:80:70': 'q83vcIBw'}, + {'20-00-50-50-50-50-50-50': 'IABQUFBQUFA='}, + {'20-01-50-50-50-50-50-50': 'IAFQUFBQUFA='}, + {'20:00:50:50:50:50:50:50': 'IABQUFBQUFA='}, + {'20:01:50:50:50:50:50:50': 'IAFQUFBQUFA='}, + {'2000.5050.5050.5050': 'IABQUFBQUFA='}, + {'2001.5050.5050.5050': 'IAFQUFBQUFA='}, + {'20:00:ab:cd:ef:70:80:70': 'IACrze9wgHA='}, + {'20:01:ab:cd:ef:70:80:70': 'IAGrze9wgHA='}, + ]) + def test_mac_to_base64_conversion(self, mac_address_base64_map): + f_module = self.get_module_mock() + mac_address = list(mac_address_base64_map.keys())[0] + base_64_val_expected = list(mac_address_base64_map.values())[0] + base_64_val = self.module.mac_to_base64_conversion(mac_address, f_module) + assert base_64_val == base_64_val_expected + + def test_pool_delete_case_01(self, ome_connection_mock_for_identity_pool, mocker): + params = {"pool_name": "pool_name"} + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_identity_pool_id_by_name', + return_value=(1, {"value": "data"})) + f_module = self.get_module_mock(params=params) + message = self.module.pool_delete(f_module, ome_connection_mock_for_identity_pool) + assert message["msg"] == "Successfully deleted the identity pool." + + def test_pool_delete_case_02(self, ome_connection_mock_for_identity_pool, mocker): + params = {"pool_name": "pool_name"} + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_identity_pool_id_by_name', + return_value=(0, {})) + f_module = self.get_module_mock(params=params) + with pytest.raises(Exception) as exc: + self.module.pool_delete(f_module, ome_connection_mock_for_identity_pool) + assert exc.value.args[0] == "The identity pool '{0}' is not present in the system.".format(params["pool_name"]) + + def test_pool_delete_error_case_02(self, mocker, ome_connection_mock_for_identity_pool, ome_response_mock): + msg = "exception message" + params = {"pool_name": "pool_name"} + mocker.patch( + MODULE_PATH + 'ome_identity_pool.get_identity_pool_id_by_name', + return_value=(1, "data")) + f_module = self.get_module_mock(params=params) + ome_connection_mock_for_identity_pool.invoke_request.side_effect = Exception(msg) + with pytest.raises(Exception, match=msg) as exc: + self.module.pool_delete(f_module, ome_connection_mock_for_identity_pool) + + def test_main_ome_identity_pool_delete_success_case1(self, mocker, ome_default_args, + ome_connection_mock_for_identity_pool, ome_response_mock): + sub_param = {"pool_name": "pool1", + "state": "absent", } + message_return = {"msg": "Successfully deleted the identity pool."} + mocker.patch(MODULE_PATH + 'ome_identity_pool.pool_delete', + return_value=message_return) + ome_default_args.update(sub_param) + result = self.execute_module(ome_default_args) + assert 'pool_status' not in result + assert result["msg"] == "Successfully deleted the identity pool." + + def test_validate_modify_create_payload_no_exception_case(self): + modify_payload = { + "Id": 59, + "Name": "pool_new", + "EthernetSettings": { + "Mac": { + "IdentityCount": 61, + "StartingMacAddress": "kJCQkJCQ" + } + }, + "IscsiSettings": { + "Mac": { + "IdentityCount": 30, + "StartingMacAddress": "YGBgYGAA" + }, + "InitiatorConfig": { + "IqnPrefix": "iqn.myprefix." + }, + "InitiatorIpPoolSettings": { + "IpRange": "10.33.0.1-10.33.0.255", + "SubnetMask": "255.255.255.0", + "Gateway": "192.168.4.1", + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + }, + "FcoeSettings": { + "Mac": { + "IdentityCount": 77, + "StartingMacAddress": "qrvM3VBQ" + } + }, + "FcSettings": { + "Wwnn": { + "IdentityCount": 45, + "StartingAddress": "IAAQEBAQEBA=" + }, + "Wwpn": { + "IdentityCount": 45, + "StartingAddress": "IAEQEBAQEBA=" + } + } + } + f_module = self.get_module_mock() + self.module.validate_modify_create_payload(modify_payload, f_module, "create") + + modify_payload1 = { + "Mac": { + "IdentityCount": 61, + } + } + modify_payload2 = { + "Mac": { + "StartingMacAddress": "kJCQkJCQ" + } + } + + modify_payload3 = { + "Mac": { + } + } + + modify_payload4 = { + "Mac": None + } + + @pytest.mark.parametrize("setting", ["EthernetSettings", "FcoeSettings"]) + @pytest.mark.parametrize("action", ["create", "modify"]) + @pytest.mark.parametrize("payload", [modify_payload1, modify_payload2, modify_payload3, modify_payload4]) + def test_validate_modify_create_payload_failure_case1(self, payload, action, setting): + modify_payload = {"Id": 59, "Name": "pool_new"} + modify_payload[setting] = payload + f_module = self.get_module_mock() + msg = "Both starting MAC address and identity count is required to {0} an identity pool using {1} settings.".format( + action, ''.join(setting.split('Settings'))) + with pytest.raises(Exception, match=msg) as exc: + self.module.validate_modify_create_payload(modify_payload, f_module, action) + + modify_fc_setting1 = {"FcSettings": { + "Wwnn": { + "IdentityCount": 45, + }, + "Wwpn": { + "IdentityCount": 45, + } + }} + modify_fc_setting2 = {"FcSettings": { + "Wwnn": { + "StartingAddress": "IAAQEBAQEBA=" + }, + "Wwpn": { + "IdentityCount": 45, + "StartingAddress": "IAEQEBAQEBA=" + } + }} + modify_fc_setting3 = {"FcSettings": { + "Wwnn": { + "StartingAddress": "IAAQEBAQEBA=" + }, + "Wwpn": { + "StartingAddress": "IAEQEBAQEBA=" + } + }} + modify_fc_setting4 = {"FcSettings": { + "Wwnn": { + }, + "Wwpn": { + } + }} + modify_fc_setting5 = {"FcSettings": { + "Wwnn": None, + "Wwpn": None}} + + @pytest.mark.parametrize("action", ["create", "modify"]) + @pytest.mark.parametrize("modify_payload", + [modify_fc_setting1, modify_fc_setting2, modify_fc_setting3, modify_fc_setting4, + modify_fc_setting5]) + def test_validate_modify_create_payload_failure_fc_setting_case(self, modify_payload, action): + payload = {"Id": 59, "Name": "pool_new"} + modify_payload.update(payload) + f_module = self.get_module_mock() + msg = "Both starting MAC address and identity count is required to {0} an identity pool using Fc settings.".format( + action) + with pytest.raises(Exception, match=msg) as exc: + self.module.validate_modify_create_payload(modify_payload, f_module, action) + + @pytest.mark.parametrize("action", ["create", "modify"]) + @pytest.mark.parametrize("modify_payload", + [modify_fc_setting1, modify_fc_setting2, modify_fc_setting3, modify_fc_setting4, + modify_fc_setting5]) + # @pytest.mark.parametrize("modify_payload", [modify_fc_setting1]) + def test_validate_modify_create_payload_failure_fc_setting_case(self, modify_payload, action): + payload = {"Id": 59, "Name": "pool_new"} + modify_payload.update(payload) + f_module = self.get_module_mock() + msg = "Both starting MAC address and identity count is required to {0} an identity pool using Fc settings.".format( + action) + with pytest.raises(Exception, match=msg) as exc: + self.module.validate_modify_create_payload(modify_payload, f_module, action) + + payload_iscsi1 = {"IscsiSettings": { + "Mac": { + "IdentityCount": 30 + }}} + + payload_iscsi2 = {"IscsiSettings": { + "Mac": { + "StartingMacAddress": "kJCQkJCQ" + }}} + payload_iscsi3 = {"IscsiSettings": { + "Mac": { + }}} + + @pytest.mark.parametrize("action", ["create", "modify"]) + @pytest.mark.parametrize("modify_payload", [payload_iscsi1, payload_iscsi2, payload_iscsi3]) + def test_validate_modify_create_payload_failure_iscsi_setting_case1(self, modify_payload, action): + payload = {"Id": 59, "Name": "pool_new"} + modify_payload.update(payload) + f_module = self.get_module_mock() + msg = "Both starting MAC address and identity count is required to {0} an identity pool using Iscsi settings.".format( + action) + with pytest.raises(Exception, match=msg) as exc: + self.module.validate_modify_create_payload(modify_payload, f_module, action) + + payload_iscsi3 = { + "SubnetMask": "255.255.255.0", + "Gateway": "192.168.4.1", + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + + payload_iscsi4 = { + "IpRange": "10.33.0.1-10.33.0.255", + "Gateway": "192.168.4.1", + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + payload_iscsi5 = { + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + + @pytest.mark.parametrize("action", ["create", "modify"]) + @pytest.mark.parametrize("initiatorip_payload", + [payload_iscsi3, payload_iscsi4, payload_iscsi5]) + def test_validate_modify_create_payload_failure_iscsi_setting_case2(self, initiatorip_payload, action): + modify_payload = {"Id": 59, "Name": "pool_new", + "IscsiSettings": {"Mac": { + "IdentityCount": 30, + "StartingMacAddress": "kJCQkJCQ" + }, + "InitiatorConfig": {"IqnPrefix": "abc"}}, + } + modify_payload["IscsiSettings"]["InitiatorIpPoolSettings"] = initiatorip_payload + f_module = self.get_module_mock() + msg = "Both ip range and subnet mask in required to {0} an identity pool using iSCSI settings.".format(action) + with pytest.raises(Exception, match=msg): + self.module.validate_modify_create_payload(modify_payload, f_module, action) + + def test_update_fc_settings_success_case1(self): + setting_params = { + "identity_count": 45, + "starting_address": "10-10-10-10-10-10" + } + payload = {"Name": "pool_name"} + f_module = self.get_module_mock() + self.module.update_fc_settings(payload, setting_params, "FcSettings", f_module) + assert payload == { + "Name": "pool_name", + 'FcSettings': {'Wwnn': {'IdentityCount': 45, 'StartingAddress': 'IAAQEBAQEBA='}, + 'Wwpn': {'IdentityCount': 45, 'StartingAddress': 'IAEQEBAQEBA='} + } + } + + def test_update_fc_settings_success_case2(self): + setting_params = { + "identity_count": 45 + } + payload = {"Name": "pool_name"} + f_module = self.get_module_mock() + self.module.update_fc_settings(payload, setting_params, "FcSettings", f_module) + assert payload == { + "Name": "pool_name", + 'FcSettings': {'Wwnn': {'IdentityCount': 45}, + 'Wwpn': {'IdentityCount': 45}} + } + + def test_update_fc_settings_success_case3(self): + setting_params = { + "starting_address": "10-10-10-10-10-10" + } + payload = {"Name": "pool_name"} + f_module = self.get_module_mock() + self.module.update_fc_settings(payload, setting_params, "FcSettings", f_module) + assert payload == { + "Name": "pool_name", + 'FcSettings': {'Wwnn': {'StartingAddress': 'IAAQEBAQEBA='}, + 'Wwpn': {'StartingAddress': 'IAEQEBAQEBA='} + } + } + + def test_update_fc_settings_mac_failure_case1(self): + setting_params = { + "identity_count": 45, + "starting_address": "abcd.1010:1010" + } + payload = {"Name": "pool_name"} + setting_type = "FcSettings" + f_module = self.get_module_mock() + msg = "Please provide the valid starting address format for FC settings." + with pytest.raises(Exception, match=msg) as exc: + self.module.update_fc_settings(payload, setting_params, setting_type, f_module) + + @pytest.mark.parametrize("mac", [{'50-50-50-50-50-50': ['20-00-', '20-01-']}, + {'50:50:50:50:50:50': ['20:00:', '20:01:']}, + {'5050.5050.5050': ['2000.', '2001.']}, + {'ab:cd:ef:70:80:70': ['20:00:', '20:01:']}, + {'aabb.ccdd.7070': ['2000.', '2001.']}]) + def test_get_wwn_address(self, mac): + mac_address = list(mac.keys())[0] + expected_values = list(mac.values())[0] + wwnn_address_expected = expected_values[0] + wwpn_address_expected = expected_values[1] + wwnn_address, wwpn_address = self.module.get_wwn_address_prefix(mac_address) + assert wwnn_address == wwnn_address_expected + assert wwpn_address == wwpn_address_expected + + def test_update_iscsi_specific_settings_case1(self): + setting_type = "IscsiSettings" + payload = {"Name": "pool_new", setting_type: {"Mac": {"IdentityCount": 30, "StartingMacAddress": "YGBgYGAA"}}} + settings_params = { + "identity_count": 30, + "initiator_config": { + "iqn_prefix": "iqn.myprefix." + }, + "initiator_ip_pool_settings": { + "gateway": "192.168.4.1", + "ip_range": "10.33.0.1-10.33.0.255", + "primary_dns_server": "10.8.8.8", + "secondary_dns_server": "8.8.8.8", + "subnet_mask": "255.255.255.0" + }, + "starting_mac_address": "60:60:60:60:60:00" + } + self.module.update_iscsi_specific_settings(payload, settings_params, setting_type) + assert payload == { + "Name": "pool_new", + "IscsiSettings": { + "Mac": { + "IdentityCount": 30, + "StartingMacAddress": "YGBgYGAA" + }, + "InitiatorConfig": { + "IqnPrefix": "iqn.myprefix." + }, + "InitiatorIpPoolSettings": { + "IpRange": "10.33.0.1-10.33.0.255", + "SubnetMask": "255.255.255.0", + "Gateway": "192.168.4.1", + "PrimaryDnsServer": "10.8.8.8", + "SecondaryDnsServer": "8.8.8.8" + } + }} + + def test_update_iscsi_specific_settings_case2(self): + setting_type = "IscsiSettings" + payload = {"Name": "pool_new", "Description": "description"} + settings_params = { + "initiator_ip_pool_settings": { + "gateway": "192.168.4.1", + "ip_range": "10.33.0.1-10.33.0.255", + "subnet_mask": "255.255.255.0" + } + } + self.module.update_iscsi_specific_settings(payload, settings_params, setting_type) + assert payload == { + "Name": "pool_new", "Description": "description", + "IscsiSettings": { + "InitiatorIpPoolSettings": { + "IpRange": "10.33.0.1-10.33.0.255", + "SubnetMask": "255.255.255.0", + "Gateway": "192.168.4.1" + } + }} diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py new file mode 100644 index 00000000..34de35d1 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.1.3 +# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import ome_job_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text + + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +class TestOmeJobInfo(FakeAnsibleModule): + """Pyest class for ome_job_info module.""" + module = ome_job_info + + @pytest.fixture + def ome_connection_job_info_mock(self, mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_job_info.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + @pytest.mark.parametrize("module_params,data", [({"system_query_options": {"filter": "abc"}}, "$filter")]) + def test_get_query_parameters(self, module_params, data): + res = self.module._get_query_parameters(module_params) + if data is not None: + assert data in res + else: + assert res is None + + def test_job_info_success_case(self, ome_default_args, ome_connection_job_info_mock, + ome_response_mock): + ome_response_mock.json_data = {"@odata.context": "/api/$metadata#Collection(JobService.Job)", + "@odata.count": 1} + ome_response_mock.success = True + job_details = {"resp_obj": ome_response_mock, + "report_list": [{"Name": "job1", "Id": 123}, {"Name": "job2", "Id": 124}]} + ome_connection_job_info_mock.get_all_report_details.return_value = job_details + result = self._run_module(ome_default_args) + assert 'job_info' in result + assert result['msg'] == "Successfully fetched the job info" + + def test_job_info_main_success_case_job_id(self, ome_default_args, ome_connection_job_info_mock, + ome_response_mock): + ome_default_args.update({"job_id": 1}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"job_id": 1}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'job_info' in result + + def test_job_info_success_case03(self, ome_default_args, ome_connection_job_info_mock, + ome_response_mock): + ome_default_args.update({"system_query_options": {"filter": "abc"}}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"filter": "abc"}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'job_info' in result + + def test_job_info_failure_case(self, ome_default_args, ome_connection_job_info_mock, + ome_response_mock): + ome_response_mock.status_code = 500 + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == "Failed to fetch the job info" + + @pytest.mark.parametrize("exc_type", [URLError, HTTPError, SSLValidationError, ConnectionError, + TypeError, ValueError]) + def test_job_info_main_exception_case(self, exc_type, mocker, ome_default_args, ome_connection_job_info_mock, + ome_response_mock): + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'ome_job_info._get_query_parameters', + side_effect=exc_type('test')) + else: + mocker.patch( + MODULE_PATH + 'ome_job_info._get_query_parameters', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + if not exc_type == URLError: + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + result = self._run_module(ome_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py new file mode 100644 index 00000000..44ceef4d --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.0.0 +# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import json +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import ome_network_port_breakout +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \ + AnsibleFailJSonException +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_breakout_mock(mocker, ome_response_mock): + connection_class_mock = mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.RestOME")) + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEPortBreakout(FakeAnsibleModule): + module = ome_network_port_breakout + + def test_get_payload(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args): + payload = self.module.get_breakout_payload("25017", "HardwareDefault", "2HB7NX2:phy-port1/1/11") + assert payload["JobName"] == "Breakout Port" + + def test_check_mode(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args): + f_module = self.get_module_mock(check_mode=True) + with pytest.raises(Exception) as exc: + self.module.check_mode(f_module, changes=True) + assert exc.value.args[0] == "Changes found to commit!" + + def test_get_device_id(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X40GE"}) + ome_response_mock.status_code = 200 + ome_response_mock.json_data = {"value": [{"Id": 25017, "DeviceServiceTag": "2HB7NX2"}]} + result = self.module.get_device_id(f_module, ome_connection_breakout_mock) + assert result == 25017 + + def test_get_device_id_regex_failed(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-:port1/1/11", "breakout_type": "1X40GE"}) + with pytest.raises(Exception) as exc: + self.module.get_device_id(f_module, ome_connection_breakout_mock) + assert exc.value.args[0] == "Invalid target port 2HB7NX2:phy-:port1/1/11." + + def test_get_device_id_invalid_status(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X40GE"}) + ome_response_mock.status_code = 200 + ome_response_mock.json_data = {"value": []} + with pytest.raises(Exception) as exc: + self.module.get_device_id(f_module, ome_connection_breakout_mock) + assert exc.value.args[0] == "Unable to retrieve the device information because the" \ + " device with the entered service tag 2HB7NX2 is not present." + + def test_get_port_information(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X40GE"}) + ome_response_mock.json_data = {"InventoryInfo": [{"Configuration": "HardwareDefault", + "Id": "2HB7NX2:phy-port1/1/11", + "PortBreakoutCapabilities": [{"Type": "1X40GE"}, + {"Type": "1X10GE"}, + {"Type": "HardwareDefault"}]}]} + config, capability, interface = self.module.get_port_information(f_module, ome_connection_breakout_mock, 25017) + assert config == "HardwareDefault" + + def test_get_port_information_failed(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X40GE"}) + ome_response_mock.json_data = {"InventoryInfo": [{"Configuration": "NoBreakout", + "Id": "2HB7NX2:phy-port1/1/11", + "PortBreakoutCapabilities": [{"Type": "1X40GE"}, + {"Type": "1X10GE"}, + {"Type": "HardwareDefault"}]}]} + with pytest.raises(Exception) as exc: + self.module.get_port_information(f_module, ome_connection_breakout_mock, 25017) + assert exc.value.args[0] == "2HB7NX2:phy-port1/1/11 does not support port breakout" \ + " or invalid port number entered." + + def test_set_breakout_port(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X40GE"}) + capability = [{"Type": "1X40GE"}, {"Type": "1X10GE"}, {"Type": "HardwareDefault"}] + payload = { + "Id": 0, "JobName": "Breakout Port", "JobDescription": "", + "Schedule": "startnow", "State": "Enabled", + "JobType": {"Id": 3, "Name": "DeviceAction_Task"}, + "Params": [ + {"Key": "breakoutType", "Value": "1X40GE"}, + {"Key": "interfaceId", "Value": "2HB7NX2:phy-port1/1/11"}, + {"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}], + "Targets": [ + {"JobId": 0, "Id": 25017, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}} + ]} + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_breakout_payload"), + return_value=payload) + ome_response_mock.status_code = 200 + result = self.module.set_breakout(f_module, ome_connection_breakout_mock, "HardwareDefault", + capability, "2HB7NX2:phy-port1/1/11", 25017) + assert result.status_code == 200 + + def test_set_breakout_port_invalid(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X100GE"}) + capability = [{"Type": "1X40GE"}, {"Type": "1X10GE"}, {"Type": "HardwareDefault"}] + payload = { + "Id": 0, "JobName": "Breakout Port", "JobDescription": "", + "Schedule": "startnow", "State": "Enabled", + "JobType": {"Id": 3, "Name": "DeviceAction_Task"}, + "Params": [ + {"Key": "breakoutType", "Value": "1X40GE"}, + {"Key": "interfaceId", "Value": "2HB7NX2:phy-port1/1/11"}, + {"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}], + "Targets": [ + {"JobId": 0, "Id": 25017, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}} + ]} + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_breakout_payload"), + return_value=payload) + with pytest.raises(Exception) as exc: + self.module.set_breakout(f_module, ome_connection_breakout_mock, "HardwareDefault", + capability, "2HB7NX2:phy-port1/1/11", 25017) + assert exc.value.args[0] == "Invalid breakout type: 1X100GE, supported values are 1X40GE, " \ + "1X10GE, HardwareDefault." + + def test_set_breakout_port_reset(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", + "breakout_type": "HardwareDefault"}) + capability = [{"Type": "1X40GE"}, {"Type": "1X10GE"}, {"Type": "HardwareDefault"}] + payload = { + "Id": 0, "JobName": "Breakout Port", "JobDescription": "", + "Schedule": "startnow", "State": "Enabled", + "JobType": {"Id": 3, "Name": "DeviceAction_Task"}, + "Params": [ + {"Key": "breakoutType", "Value": "1X40GE"}, + {"Key": "interfaceId", "Value": "2HB7NX2:phy-port1/1/11"}, + {"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}], + "Targets": [ + {"JobId": 0, "Id": 25017, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}} + ]} + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_breakout_payload"), + return_value=payload) + ome_response_mock.status_code = 200 + result = self.module.set_breakout(f_module, ome_connection_breakout_mock, "1X40GE", + capability, "2HB7NX2:phy-port1/1/11", 25017) + assert result.status_code == 200 + + def test_set_breakout_port_symmetry(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", + "breakout_type": "1X40GE"}) + capability = [{"Type": "1X40GE"}, {"Type": "1X10GE"}, {"Type": "HardwareDefault"}] + payload = { + "Id": 0, "JobName": "Breakout Port", "JobDescription": "", + "Schedule": "startnow", "State": "Enabled", + "JobType": {"Id": 3, "Name": "DeviceAction_Task"}, + "Params": [ + {"Key": "breakoutType", "Value": "1X40GE"}, + {"Key": "interfaceId", "Value": "2HB7NX2:phy-port1/1/11"}, + {"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}], + "Targets": [ + {"JobId": 0, "Id": 25017, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}} + ]} + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_breakout_payload"), + return_value=payload) + with pytest.raises(Exception) as exc: + self.module.set_breakout(f_module, ome_connection_breakout_mock, "1X40GE", + capability, "2HB7NX2:phy-port1/1/11", 25017) + assert exc.value.args[0] == "The port is already configured with the selected breakout configuration." + + def test_set_breakout_port_asymmetry(self, ome_connection_breakout_mock, ome_response_mock, ome_default_args, mocker): + f_module = self.get_module_mock(params={"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X20GE"}) + capability = [{"Type": "1X40GE"}, {"Type": "1X10GE"}, {"Type": "HardwareDefault"}] + payload = { + "Id": 0, "JobName": "Breakout Port", "JobDescription": "", + "Schedule": "startnow", "State": "Enabled", + "JobType": {"Id": 3, "Name": "DeviceAction_Task"}, + "Params": [ + {"Key": "breakoutType", "Value": "1X40GE"}, + {"Key": "interfaceId", "Value": "2HB7NX2:phy-port1/1/11"}, + {"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}], + "Targets": [ + {"JobId": 0, "Id": 25017, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}} + ]} + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_breakout_payload"), + return_value=payload) + with pytest.raises(Exception) as exc: + self.module.set_breakout(f_module, ome_connection_breakout_mock, "1X40GE", + capability, "2HB7NX2:phy-port1/1/11", 25017) + assert exc.value.args[0] == "Device does not support changing a port breakout" \ + " configuration to different breakout type. Configure the port to" \ + " HardwareDefault and retry the operation." + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_main_exception(self, exc_type, mocker, ome_default_args, ome_connection_breakout_mock, ome_response_mock): + ome_default_args.update({"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X20GE"}) + json_str = to_text(json.dumps({"info": "error_details"})) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + if exc_type not in [HTTPError, SSLValidationError]: + ome_connection_breakout_mock.invoke_request.side_effect = exc_type('test') + else: + ome_connection_breakout_mock.invoke_request.side_effect = exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str)) + if not exc_type == URLError: + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_breakout_payload"), + return_value={}) + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_port_information"), + return_value=(None, None, None)) + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.set_breakout"), + return_value={}) + result = self._run_module(ome_default_args) + assert 'msg' in result + + def test_main(self, mocker, ome_default_args, ome_connection_breakout_mock, ome_response_mock): + ome_default_args.update({"target_port": "2HB7NX2:phy-port1/1/11", "breakout_type": "1X20GE"}) + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_device_id"), + return_value=25017) + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_breakout_payload"), + return_value={}) + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.get_port_information"), + return_value=("HardwareDefault", [{"Type": "1X40GE"}, {"Type": "1X20GE"}], + "2HB7NX2:phy-port1/1/11")) + mocker.patch("{0}{1}".format(MODULE_PATH, "ome_network_port_breakout.set_breakout"), + return_value=ome_response_mock) + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result["msg"] == "Port breakout configuration job submitted successfully." diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py new file mode 100644 index 00000000..e7b7a05c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_network_vlan +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_network_vlan.' + + +@pytest.fixture +def ome_connection_mock_for_network_vlan(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeNetworkVlan(FakeAnsibleModule): + module = ome_network_vlan + + @pytest.mark.parametrize("params", + [{"success": True, "json_data": {"value": [{"Name": "vlan_name", "Id": 123}]}, "id": 123}, + {"success": True, "json_data": {"value": []}, "id": 0}, + {"success": False, "json_data": {"value": [{"Name": "vlan_name", "Id": 123}]}, "id": 0}, + {"success": True, "json_data": {"value": [{"Name": "vlan_name1", "Id": 123}]}, "id": 0}]) + def test_get_item_id(self, params, ome_connection_mock_for_network_vlan, ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + id, vlans = self.module.get_item_id(ome_connection_mock_for_network_vlan, "vlan_name", "uri") + assert id == params["id"] + + @pytest.mark.parametrize("vlan_param", + [{"in": {"name": "vlan1", "type": 1, "vlan_maximum": 40, "vlan_minimum": 35}, + "out": {"Name": "vlan1", "Type": 1, "VlanMaximum": 40, "VlanMinimum": 35}}, + {"in": None, "out": None}]) + def test_format_payload(self, vlan_param): + result = self.module.format_payload(vlan_param["in"]) + assert result == vlan_param["out"] + + def test_delete_vlan(self, ome_connection_mock_for_network_vlan, ome_response_mock): + ome_response_mock.success = True + ome_response_mock.json_data = {} + f_module = self.get_module_mock(params={"name": "vlan1"}) + with pytest.raises(Exception, match="Successfully deleted the VLAN.") as err: + self.module.delete_vlan(f_module, ome_connection_mock_for_network_vlan, 12) + + @pytest.mark.parametrize("params", + [{"format_payload": {"VlanMaximum": None, "VlanMinimum": 35}, + "error_msg": "The vlan_minimum, vlan_maximum and type values are required for creating" + " a VLAN.", "overlap": {}}, + {"format_payload": {"VlanMaximum": 40, "VlanMinimum": 45}, "overlap": {}, + "error_msg": "VLAN-minimum value is greater than VLAN-maximum value."}, + {"format_payload": {"VlanMaximum": 40, "VlanMinimum": 35}, + "overlap": {"Name": "vlan1", "Type": 1, "VlanMaximum": 40, "VlanMinimum": 35}, + "error_msg": "Unable to create or update the VLAN because the entered range" + " overlaps with vlan1 with the range 35-40."}, + {"format_payload": {"VlanMaximum": 40, "VlanMinimum": 35}, + "error_msg": "Network type 'General Purpose (Silver)' not found.", + "overlap": {}}, + {"format_payload": {"VlanMaximum": 40, "VlanMinimum": 35}, "item": 1, "overlap": {}, + "check_mode": True, "error_msg": "Changes found to be applied."}, + ]) + def test_create_vlan(self, mocker, params, ome_connection_mock_for_network_vlan, ome_response_mock): + f_module = self.get_module_mock(params={"name": "vlan1", "vlan_maximum": 40, "vlan_minimum": 35, + "type": "General Purpose (Silver)"}, check_mode=params.get("check_mode", False)) + mocker.patch(MODULE_PATH + "format_payload", return_value=(params["format_payload"])) + mocker.patch(MODULE_PATH + "check_overlapping_vlan_range", return_value=(params["overlap"])) + mocker.patch(MODULE_PATH + "get_item_id", return_value=(0, [])) + error_message = params["error_msg"] + with pytest.raises(Exception) as err: + self.module.create_vlan(f_module, ome_connection_mock_for_network_vlan, []) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", + [{"format_payload": {"VlanMaximum": 40, "VlanMinimum": 35}, + "error_msg": "Network type 'General Purpose (Silver)' not found.", + "overlap": {}}, + {"format_payload": {"Name": "vlan11", "Type": 1, "VlanMaximum": 40, "VlanMinimum": 45}, + "overlap": {}, "item": 1, + "error_msg": "VLAN-minimum value is greater than VLAN-maximum value."}, + {"format_payload": {"VlanMaximum": 40, "VlanMinimum": 35}, "item": 1, + "overlap": {"Name": "vlan1", "Type": 1, "VlanMaximum": 40, "VlanMinimum": 35}, + "error_msg": "Unable to create or update the VLAN because the entered range" + " overlaps with vlan1 with the range 35-40."}, + {"format_payload": {"Name": "vlan11", "Type": 1, "VlanMaximum": 45, "VlanMinimum": 40}, + "item": 1, "overlap": {}, + "check_mode": True, "error_msg": "Changes found to be applied."}, + ]) + def test_modify_vlan(self, mocker, params, ome_connection_mock_for_network_vlan, ome_response_mock): + f_module = self.get_module_mock(params={"name": "vlan1", "vlan_maximum": 40, "vlan_minimum": 45, + "type": "General Purpose (Silver)"}, + check_mode=params.get("check_mode", False)) + mocker.patch(MODULE_PATH + "format_payload", return_value=(params["format_payload"])) + mocker.patch(MODULE_PATH + "check_overlapping_vlan_range", return_value=(params["overlap"])) + mocker.patch(MODULE_PATH + "get_item_id", return_value=(params.get("item", 0), [])) + error_message = params["error_msg"] + with pytest.raises(Exception) as err: + self.module.modify_vlan(f_module, ome_connection_mock_for_network_vlan, 123, + [{"Id": 13, "Name": "vlan11", "Type": 1, "VlanMaximum": 140, "VlanMinimum": 135}, + {"Id": 123, "Name": "vlan1", "Type": 1, "VlanMaximum": 40, "VlanMinimum": 35, + 'Description': None}]) + assert err.value.args[0] == error_message + + def test_main_case_create_success(self, mocker, ome_default_args, ome_connection_mock_for_network_vlan, ome_response_mock): + mocker.patch(MODULE_PATH + "check_existing_vlan", return_value=(0, [{"VlanMaximum": 40, "VlanMinimum": 35}])) + mocker.patch(MODULE_PATH + "get_item_id", return_value=(1, [])) + mocker.patch(MODULE_PATH + "check_overlapping_vlan_range", return_value=None) + ome_default_args.update( + {"name": "vlan1", "state": "present", "type": "General Purpose (Bronze)", + "vlan_maximum": 40, "vlan_minimum": 35}) + ome_response_mock.json_data = {"Id": 14227, "Name": "vlan1", "Type": 1, + "VlanMaximum": 40, "VlanMinimum": 35} + result = self._run_module(ome_default_args) + # assert result['changed'] is True + assert "msg" in result + assert result['vlan_status'] == {"Id": 14227, "Name": "vlan1", "Type": 1, + "VlanMaximum": 40, "VlanMinimum": 35} + assert result["msg"] == "Successfully created the VLAN." + + def test_main_case_modify_success(self, mocker, ome_default_args, ome_connection_mock_for_network_vlan, ome_response_mock): + mocker.patch(MODULE_PATH + "check_existing_vlan", return_value=(1, [{"Id": 1, "VlanMaximum": 40, "VlanMinimum": 35}])) + mocker.patch(MODULE_PATH + "get_item_id", return_value=(2, [])) + mocker.patch(MODULE_PATH + "check_overlapping_vlan_range", return_value=None) + ome_default_args.update( + {"name": "vlan1", "state": "present", "type": "General Purpose (Bronze)", + "vlan_maximum": 40, "vlan_minimum": 35}) + ome_response_mock.json_data = {"Id": 14227, "Name": "vlan1", "Type": 2, "VlanMaximum": 40, "VlanMinimum": 35} + result = self._run_module(ome_default_args) + # assert result['changed'] is True + assert "msg" in result + assert result['vlan_status'] == {"Id": 14227, "Name": "vlan1", "Type": 2, "VlanMaximum": 40, "VlanMinimum": 35} + assert result["msg"] == "Successfully updated the VLAN." + + @pytest.mark.parametrize("params", [ + {"fail_json": False, "json_data": {"JobId": 1234}, + "check_existing_vlan": (1, []), "check_mode": True, + "mparams": {"state": "absent", "name": "v1"}, + 'message': "Changes found to be applied.", "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "check_existing_vlan": (None, []), "check_mode": True, + "mparams": {"state": "absent", "name": "v1"}, + 'message': "No changes found to be applied to the VLAN configuration.", "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "check_existing_vlan": (None, []), "check_mode": False, + "mparams": {"state": "absent", "name": "v1"}, + 'message': "VLAN v1 does not exist.", "success": True + } + ]) + def test_main(self, params, ome_connection_mock_for_network_vlan, ome_default_args, ome_response_mock, mocker): + mocker.patch(MODULE_PATH + 'check_existing_vlan', return_value=params.get("check_existing_vlan")) + ome_response_mock.success = True + ome_response_mock.json_data = params.get("json_data") + ome_default_args.update(params.get('mparams')) + if params.get("fail_json", False): + result = self._run_module_with_fail_json(ome_default_args) + else: + result = self._run_module(ome_default_args, check_mode=params.get("check_mode", False)) + assert result["msg"] == params['message'] + + @pytest.mark.parametrize("params", + [{"payload": {"VlanMaximum": 40, "VlanMinimum": 35}, + "vlans": [{"VlanMaximum": 40, "VlanMinimum": 35}], + "current_vlan": {"VlanMaximum": 40, "VlanMinimum": 35}}]) + def test_check_overlapping_vlan_range(self, params, ome_connection_mock_for_network_vlan, ome_response_mock): + result = self.module.check_overlapping_vlan_range(params["payload"], params["vlans"]) + assert result == params["current_vlan"] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_application_network_vlan_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_network_vlan, + ome_response_mock): + ome_default_args.update({"name": "vlan1", "state": "present", "type": "General Purpose (Bronze)", + "vlan_maximum": 40, "vlan_minimum": 35}) + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_existing_vlan', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_existing_vlan', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_existing_vlan', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'vlan_status' not in result + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py new file mode 100644 index 00000000..084fcd85 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.1.3 +# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import ome_network_vlan_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + +response = { + '@odata.context': '/api/$metadata#Collection(NetworkConfigurationService.Network)', + '@odata.count': 1, + 'value': [ + { + '@odata.type': '#NetworkConfigurationService.Network', + '@odata.id': '/api/NetworkConfigurationService/Networks(20057)', + 'Id': 20057, + 'Name': 'Logical Network - 1', + 'Description': 'Description of Logical Network - 1', + 'VlanMaximum': 111, + 'VlanMinimum': 111, + "Type": 1, + 'CreatedBy': 'admin', + 'CreationTime': '2020-09-02 18:48:42.129', + 'UpdatedBy': None, + 'UpdatedTime': '2020-09-02 18:48:42.129', + 'InternalRefNWUUId': '42b9903d-93f8-4184-adcf-0772e4492f71' + } + ] +} + +network_type_qos_type_dict_reponse = {1: {'Id': 1, 'Name': 'General Purpose (Bronze)', + 'Description': + 'This is the network for general purpose traffic. QOS Priority : Bronze.', + 'VendorCode': 'GeneralPurpose', 'NetworkTrafficType': 'Ethernet', + 'QosType': {'Id': 4, 'Name': 'Bronze'}}} + +network_type_dict_response = {1: {'Id': 1, 'Name': 'General Purpose (Bronze)', + 'Description': + 'This is the network for general purpose traffic. QOS Priority : Bronze.', + 'VendorCode': 'GeneralPurpose', 'NetworkTrafficType': 'Ethernet', + 'QosType': 4}} + +qos_type_dict_response = {4: {'Id': 4, 'Name': 'Bronze'}} + +type_dict_ome_reponse = {'@odata.context': '/api/$metadata#Collection(NetworkConfigurationService.Network)', + '@odata.count': 1, + 'value': [ + {'@odata.type': '#NetworkConfigurationService.NetworkType', + '@odata.id': '/api/NetworkConfigurationService/NetworkTypes(1)', + 'Id': 1, + 'Name': 'General Purpose (Bronze)', + 'Description': 'This is the network for general purpose traffic. QOS Priority : Bronze.', + 'VendorCode': 'GeneralPurpose', 'NetworkTrafficType': 'Ethernet', + 'QosType': 4}]} + + +class TestOmeNetworkVlanInfo(FakeAnsibleModule): + """Pytest class for ome_network_vlan_info module.""" + module = ome_network_vlan_info + + @pytest.fixture + def ome_connection_network_vlan_info_mock(self, mocker, ome_response_mock): + connection_class_mock = mocker.patch( + 'ansible_collections.dellemc.openmanage.plugins.modules.ome_network_vlan_info.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + def test_get_network_vlan_info_success_case(self, mocker, ome_default_args, ome_connection_network_vlan_info_mock, + ome_response_mock): + ome_response_mock.json_data = response + ome_response_mock.status_code = 200 + mocker.patch( + MODULE_PATH + 'ome_network_vlan_info.get_network_type_and_qos_type_information', + return_value=network_type_qos_type_dict_reponse) + result = self._run_module(ome_default_args) + print(result) + assert 'network_vlan_info' in result + assert result['msg'] == "Successfully retrieved the network VLAN information." + + def test_get_network_vlan_info_by_id_success_case(self, mocker, ome_default_args, + ome_connection_network_vlan_info_mock, ome_response_mock): + ome_default_args.update({"id": 20057}) + ome_response_mock.success = True + ome_response_mock.json_data = response + ome_response_mock.status_code = 200 + mocker.patch( + MODULE_PATH + 'ome_network_vlan_info.get_network_type_and_qos_type_information', + return_value=network_type_qos_type_dict_reponse) + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'network_vlan_info' in result + assert result['msg'] == "Successfully retrieved the network VLAN information." + + def test_get_network_vlan_info_by_name_success_case(self, mocker, ome_default_args, + ome_connection_network_vlan_info_mock, ome_response_mock): + ome_default_args.update({"name": "Logical Network - 1"}) + ome_response_mock.success = True + ome_response_mock.json_data = response + ome_response_mock.status_code = 200 + mocker.patch( + MODULE_PATH + 'ome_network_vlan_info.get_network_type_and_qos_type_information', + return_value=network_type_qos_type_dict_reponse) + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'network_vlan_info' in result + assert result['msg'] == "Successfully retrieved the network VLAN information." + + def test_get_network_type_and_qos_type_information(self, mocker, ome_connection_network_vlan_info_mock): + mocker.patch(MODULE_PATH + 'ome_network_vlan_info.get_type_information', + side_effect=[network_type_dict_response, qos_type_dict_response]) + result = self.module.get_network_type_and_qos_type_information(ome_connection_network_vlan_info_mock) + assert result[1]['QosType']['Id'] == 4 + + def test_get_type_information(self, mocker, ome_default_args, + ome_connection_network_vlan_info_mock, ome_response_mock): + ome_response_mock.success = True + ome_response_mock.json_data = type_dict_ome_reponse + ome_response_mock.status_code = 200 + result = self.module.get_type_information(ome_connection_network_vlan_info_mock, '') + assert result[1]['QosType'] == 4 + + def test_network_vlan_info_failure_case(self, ome_default_args, ome_connection_network_vlan_info_mock, + ome_response_mock): + ome_response_mock.status_code = 500 + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == "Failed to retrieve the network VLAN information." + + def test_network_vlan_info_name_failure_case(self, ome_default_args, ome_connection_network_vlan_info_mock, + ome_response_mock): + ome_default_args.update({"name": "non-existing vlan"}) + ome_response_mock.success = True + ome_response_mock.json_data = response + ome_response_mock.status_code = 200 + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'network_vlan_info' not in result + assert result['msg'] == "Provided network VLAN with name - 'non-existing vlan' does not exist." + + @pytest.mark.parametrize("exc_type", [URLError, HTTPError, SSLValidationError, ConnectionError, + TypeError, ValueError]) + def test_network_vlan_info_info_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_connection_network_vlan_info_mock, ome_response_mock): + ome_response_mock.status_code = 404 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type == URLError: + ome_connection_network_vlan_info_mock.invoke_request.side_effect = exc_type( + "ansible.module_utils.urls.open_url error") + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type == HTTPError: + ome_connection_network_vlan_info_mock.invoke_request.side_effect = exc_type( + 'http://testhost.com', 400, '<400 bad request>', {"accept-type": "application/json"}, + StringIO(json_str)) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + assert 'error_info' in result + + ome_connection_network_vlan_info_mock.invoke_request.side_effect = exc_type( + 'http://testhost.com', 404, '<404 not found>', {"accept-type": "application/json"}, StringIO(json_str)) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + elif exc_type != SSLValidationError: + mocker.patch(MODULE_PATH + 'ome_network_vlan_info.get_network_type_and_qos_type_information', + side_effect=exc_type('test')) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + else: + mocker.patch(MODULE_PATH + 'ome_network_vlan_info.get_network_type_and_qos_type_information', + side_effect=exc_type('http://testhost.com', 404, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py new file mode 100644 index 00000000..707e495c --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py @@ -0,0 +1,436 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.3.0 +# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from io import StringIO +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_powerstate + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_powerstate_mock(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_powerstate.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmePowerstate(FakeAnsibleModule): + module = ome_powerstate + + payload = { + "Builtin": False, + "CreatedBy": "admin", + "Editable": True, + "EndTime": None, + "Id": 29099, + "JobDescription": "Firmware Update Task", + "JobName": "Firmware Update Task", + "JobStatus": { + "Id": 2080, + "Name": "New" + }, + "JobType": { + "Id": 5, + "Internal": False, + "Name": "Update_Task" + }, + "LastRun": None, + "LastRunStatus": { + "Id": 2200, + "Name": "NotRun" + }, + "NextRun": None, + "Params": [ + { + "JobId": 29099, + "Key": "operationName", + "Value": "INSTALL_FIRMWARE" + }, + { + "JobId": 29099, + "Key": "complianceUpdate", + "Value": "false" + }, + { + "JobId": 29099, + "Key": "stagingValue", + "Value": "false" + }, + { + "JobId": 29099, + "Key": "signVerify", + "Value": "true" + } + ], + "Schedule": "startnow", + "StartTime": None, + "State": "Enabled", + "Targets": [ + { + "Data": "DCIM:INSTALLED#741__BIOS.Setup.1-1=1577776981156", + "Id": 28628, + "JobId": 29099, + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + } + ], + "UpdatedBy": None, + "Visible": True + } + + @pytest.mark.parametrize("param", [payload]) + def test_spawn_update_job_case(self, param, ome_response_mock, + ome_connection_powerstate_mock): + ome_response_mock.status_code = 201 + ome_response_mock.success = True + ome_response_mock.json_data = { + "Builtin": False, + "CreatedBy": "admin", + "Editable": True, + "EndTime": None, + "Id": 29099, + "JobDescription": "Firmware Update Task", + "JobName": "Firmware Update Task", + "JobStatus": { + "Id": 2080, + "Name": "New" + }, + "JobType": { + "Id": 5, + "Internal": False, + "Name": "Update_Task" + }, + "LastRun": None, + "LastRunStatus": { + "Id": 2200, + "Name": "NotRun" + }, + "NextRun": None, + "Params": [ + { + "JobId": 29099, + "Key": "operationName", + "Value": "INSTALL_FIRMWARE" + }, + { + "JobId": 29099, + "Key": "complianceUpdate", + "Value": "false" + }, + { + "JobId": 29099, + "Key": "stagingValue", + "Value": "false" + }, + { + "JobId": 29099, + "Key": "signVerify", + "Value": "true" + } + ], + + "Schedule": "startnow", + "StartTime": None, + "State": "Enabled", + "Targets": [{ + "Data": "DCIM:INSTALLED#741__BIOS.Setup.1-1=1577776981156", + "Id": 28628, + "JobId": 29099, + "TargetType": { + "Id": 1000, + "Name": "DEVICE" + } + }], + "UpdatedBy": None, + "Visible": True + } + data = self.module.spawn_update_job(ome_connection_powerstate_mock, param) + assert data == param + + def test_build_power_state_payload_success_case(self, ome_connection_powerstate_mock): + + payload = self.module.build_power_state_payload(Constants.device_id1, "off", 2000) + assert payload == { + 'Id': 0, + 'JobDescription': 'DeviceAction_Task', + 'JobName': 'DeviceAction_Task_PowerState', + 'JobType': { + 'Id': 3, + 'Name': 'DeviceAction_Task' + }, + 'Params': [ + { + 'Key': 'operationName', + 'Value': 'POWER_CONTROL' + }, + { + 'Key': 'powerState', + 'Value': '2000' + } + ], + 'Schedule': 'startnow', + 'State': 'Enabled', + 'Targets': [ + { + 'Data': '', + 'Id': 1234, + 'TargetType': { + 'Id': 'off', + 'Name': 'DEVICE' + } + } + ] + } + + def test_get_device_state_success_case01(self, ome_connection_powerstate_mock, ome_response_mock): + json_data = { + "report_list": [{"Id": Constants.device_id1, "PowerState": "on", "Type": 1000}]} + ome_response_mock.status_code = 200 + ome_response_mock.success = True + f_module = self.get_module_mock() + data = self.module.get_device_state(f_module, json_data, Constants.device_id1) + assert data == ("on", 1000) + + def test_get_device_state_fail_case01(self, ome_connection_powerstate_mock, ome_response_mock): + json_data = { + "report_list": [{"Id": Constants.device_id1, "PowerState": "on", "Type": 4000}]} + ome_response_mock.status_code = 500 + ome_response_mock.success = False + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + self.module.get_device_state(f_module, json_data, Constants.device_id1) + assert exc.value.args[0] == "Unable to complete the operation because power" \ + " state supports device type 1000 and 2000." + + def test_get_device_state_fail_case02(self, ome_connection_powerstate_mock, ome_response_mock): + json_data = { + "report_list": [{"Id": 1224, "power_state": "on", "Type": 1000}]} + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + self.module.get_device_state(f_module, json_data, Constants.device_id1) + assert exc.value.args[0] == "Unable to complete the operation because the entered target" \ + " device id '{0}' is invalid.".format(1234) + + def test_main_powerstate_success_case01(self, ome_default_args, mocker, ome_connection_powerstate_mock, + ome_response_mock): + mocker.patch( + MODULE_PATH + 'ome_powerstate.get_device_resource', + return_value={"Repository": "payload"}) + ome_default_args.update({"device_id": "11111", "power_state": "off"}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"device_id": "11111", "power_state": "off"}]} + ome_response_mock.status_code = 200 + data = self._run_module(ome_default_args) + assert data['changed'] is True + assert data['msg'] == "Power State operation job submitted successfully." + + def test_main_powerstate_success_case02(self, ome_default_args, mocker, ome_connection_powerstate_mock, + ome_response_mock): + mocker.patch(MODULE_PATH + 'ome_powerstate.get_device_resource', + return_value={"Repository": "payload"}) + ome_default_args.update({"device_service_tag": "KLBR111", "power_state": "on"}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"device_id": "11111", "power_state": "on"}]} + ome_response_mock.status_code = 200 + data = self._run_module(ome_default_args) + assert data['changed'] is True + assert data['msg'] == "Power State operation job submitted successfully." + + def test_main_powerstate_failure_case(self, ome_default_args, mocker, ome_connection_powerstate_mock, + ome_response_mock): + mocker.patch(MODULE_PATH + 'ome_powerstate.get_device_resource', + return_value={"Repository": "payload"}) + mocker.patch(MODULE_PATH + 'ome_powerstate.spawn_update_job', + return_value="payload") + ome_default_args.update({"device_service_tag": None, "power_state": "on"}) + ome_response_mock.json_data = {"value": [{"device_service_tag": None, "power_state": "on"}]} + ome_response_mock.status_code = 500 + data = self._run_module_with_fail_json(ome_default_args) + assert data['msg'] == "device_id and device_service_tag attributes should not be None." + + def test_get_device_resource_success_case01(self, mocker, ome_default_args, ome_connection_powerstate_mock, + ome_response_mock): + ome_default_args.update({"device_id": Constants.service_tag1, "power_state": "on", "Type": 1000, + "device_service_tag": Constants.service_tag1}) + mocker.patch(MODULE_PATH + 'ome_powerstate.get_device_state', + return_value=('on', 1000)) + mocker.patch(MODULE_PATH + 'ome_powerstate.build_power_state_payload', + return_value={'Id': 0, 'JobDescription': 'DeviceAction_Task', + 'JobName': 'DeviceAction_Task_PowerState', + 'JobType': {'Id': 3, 'Name': 'DeviceAction_Task'}, + 'Params': [{'Key': 'operationName', 'Value': 'POWER_CONTROL'}, + {'Key': 'powerState', 'Value': '2000'}], + 'Schedule': 'startnow', + 'State': 'Enabled', + 'Targets': [{'Data': '', + 'Id': 1234, + 'TargetType': {'Id': 'off', + 'Name': 'DEVICE'}}]}) + ome_connection_powerstate_mock.get_all_report_details.return_value = { + 'report_list': [{"DeviceServiceTag": Constants.service_tag1, "Id": Constants.service_tag1, + "power_state": "on"}]} + f_module = self.get_module_mock(params=ome_default_args) + f_module.check_mode = False + data = self.module.get_device_resource(f_module, ome_connection_powerstate_mock) + assert data == {'Id': 0, 'JobDescription': 'DeviceAction_Task', 'JobName': 'DeviceAction_Task_PowerState', + 'JobType': {'Id': 3, 'Name': 'DeviceAction_Task'}, + 'Params': [{'Key': 'operationName', 'Value': 'POWER_CONTROL'}, + {'Key': 'powerState', 'Value': '2000'}], + 'Schedule': 'startnow', + 'State': 'Enabled', + 'Targets': [{'Data': '', + 'Id': 1234, + 'TargetType': {'Id': 'off', + 'Name': 'DEVICE'}}]} + + def test_get_device_resource_success_case02(self, mocker, ome_default_args, ome_connection_powerstate_mock, + ome_response_mock): + ome_default_args.update({"device_id": Constants.service_tag1, "power_state": "on", "Type": 1000, + "device_service_tag": Constants.service_tag1}) + mocker.patch(MODULE_PATH + 'ome_powerstate.get_device_state', + return_value=('on', 1000)) + mocker.patch(MODULE_PATH + 'ome_powerstate.build_power_state_payload', + return_value={'Id': 0, 'JobDescription': 'DeviceAction_Task', + 'JobName': 'DeviceAction_Task_PowerState', + 'JobType': {'Id': 3, 'Name': 'DeviceAction_Task'}, + 'Params': [{'Key': 'operationName', 'Value': 'POWER_CONTROL'}, + {'Key': 'powerState', 'Value': '2000'}], + 'Schedule': 'startnow', + 'State': 'Enabled', + 'Targets': [{'Data': '', + 'Id': 1234, + 'TargetType': {'Id': 'off', + 'Name': 'DEVICE'}}]}) + ome_connection_powerstate_mock.get_all_report_details.return_value = { + 'report_list': [{"DeviceServiceTag": None, "Id": Constants.service_tag1, + "power_state": "on"}]} + f_module = self.get_module_mock(params=ome_default_args, check_mode=False) + with pytest.raises(Exception) as exc: + self.module.get_device_resource(f_module, ome_connection_powerstate_mock) + assert exc.value.args[0] == "Unable to complete the operation because the entered target device " \ + "service tag 'MXL1234' is invalid." + + def test_get_device_resource_success_case03(self, mocker, ome_default_args, ome_connection_powerstate_mock, + ome_response_mock): + ome_default_args.update({"device_id": Constants.service_tag1, "power_state": "coldboot", "Type": 1000, + "device_service_tag": Constants.service_tag1}) + mocker.patch(MODULE_PATH + 'ome_powerstate.get_device_state', + return_value=('off', 1000)) + mocker.patch(MODULE_PATH + 'ome_powerstate.build_power_state_payload', + return_value={'Id': 0, 'JobDescription': 'DeviceAction_Task', + 'JobName': 'DeviceAction_Task_PowerState', + 'JobType': {'Id': 3, 'Name': 'DeviceAction_Task'}, + 'Params': [{'Key': 'operationName', 'Value': 'POWER_CONTROL'}, + {'Key': 'powerState', 'Value': '2000'}], + 'Schedule': 'startnow', + 'State': 'Enabled', + 'Targets': [{'Data': '', + 'Id': 1234, + 'TargetType': {'Id': 'off', + 'Name': 'DEVICE'}}]}) + ome_connection_powerstate_mock.get_all_report_details.return_value = { + 'report_list': [{"DeviceServiceTag": Constants.service_tag1, "Id": Constants.service_tag1, + "power_state": "coldboot"}]} + ome_response_mock.success = True + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + with pytest.raises(Exception) as exc: + self.module.get_device_resource(f_module, ome_connection_powerstate_mock) + assert exc.value.args[0] == "No changes found to commit." + + def test_get_device_resource_success_case04(self, mocker, ome_default_args, ome_connection_powerstate_mock, + ome_response_mock): + ome_default_args.update({"device_id": Constants.service_tag1, "power_state": "on", "Type": 1000, + "device_service_tag": Constants.service_tag1}) + mocker.patch(MODULE_PATH + 'ome_powerstate.get_device_state', + return_value=(2, 1000)) + mocker.patch(MODULE_PATH + 'ome_powerstate.build_power_state_payload', + return_value={'Id': 0, 'JobDescription': 'DeviceAction_Task', + 'JobName': 'DeviceAction_Task_PowerState', + 'JobType': {'Id': 3, 'Name': 'DeviceAction_Task'}, + 'Params': [{'Key': 'operationName', 'Value': 'POWER_CONTROL'}, + {'Key': 'powerState', 'Value': '2000'}], + 'Schedule': 'startnow', + 'State': 'Enabled', + 'Targets': [{'Data': '', + 'Id': 1234, + 'TargetType': {'Id': 'off', + 'Name': 'DEVICE'}}]}) + ome_connection_powerstate_mock.get_all_report_details.return_value = { + 'report_list': [ + {"DeviceServiceTag": Constants.service_tag1, + "Id": Constants.service_tag1, "power_state": "on" + } + ] + } + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + with pytest.raises(Exception) as exc: + self.module.get_device_resource(f_module, ome_connection_powerstate_mock) + assert exc.value.args[0] == "No changes found to commit." + + def test_get_device_resource_failed_case01(self, mocker, ome_default_args, ome_connection_powerstate_mock, + ome_response_mock): + ome_default_args.update({"device_id": None, "power_state": "on", "Type": 1000, + "device_service_tag": "@#4"}) + mocker.patch(MODULE_PATH + 'ome_powerstate.get_device_state', + return_value=('on', 1000)) + ome_connection_powerstate_mock.get_all_report_details.return_value = { + 'report_list': [{"DeviceServiceTag": "@#4", "Id": None, + "power_state": "on"}]} + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + with pytest.raises(Exception) as exc: + self.module.get_device_resource(f_module, ome_connection_powerstate_mock) + assert exc.value.args[0] == "Changes found to commit." + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_powerstate_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_connection_powerstate_mock, + ome_response_mock): + ome_default_args.update({"device_service_tag": Constants.service_tag1, "power_state": "on"}) + ome_response_mock.json_data = {"value": [{"device_service_tag": Constants.service_tag1, "power_state": "on", + "Id": Constants.device_id1}]} + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'ome_powerstate.get_device_resource', + side_effect=exc_type('test')) + mocker.patch( + MODULE_PATH + 'ome_powerstate.spawn_update_job', + side_effect=exc_type('test')) + else: + mocker.patch( + MODULE_PATH + 'ome_powerstate.spawn_update_job', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + mocker.patch( + MODULE_PATH + 'ome_powerstate.get_device_resource', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert 'power_state' not in result + assert 'msg' in result + assert result['failed'] is True + if exc_type == HTTPError: + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py new file mode 100644 index 00000000..91f7fc1b --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py @@ -0,0 +1,547 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_profile +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_profile.' +CHANGES_MSG = "Changes found to be applied." +NO_CHANGES_MSG = "No changes found to be applied." + + +@pytest.fixture +def ome_connection_mock_for_profile(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeProfile(FakeAnsibleModule): + module = ome_profile + + @pytest.mark.parametrize("params", + [{"mparams": {"template_id": 123}, "success": True, + "json_data": {"value": [{"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}]}, + "res": {"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}}, + {"mparams": {"template_name": "temp1"}, "success": True, + "json_data": {"value": [{"Name": "temp1", "Id": 123, "IdentityPoolId": 23}]}, + "res": {"Name": "temp1", "Id": 123, "IdentityPoolId": 23}}]) + def test_get_template_details(self, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.get_template_details(f_module, ome_connection_mock_for_profile) + assert result == params["res"] + + @pytest.mark.parametrize("params", + [{"mparams": {"device_id": 123}, "success": True, + "json_data": {"value": [{"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}]}, + "res": {"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}}, + {"mparams": {"device_service_tag": "ABC1234"}, "success": True, + "json_data": {"value": [{"Identifier": "ABC1234", "Id": 123, "IdentityPoolId": 23}]}, + "res": {"Identifier": "ABC1234", "Id": 123, "IdentityPoolId": 23}}]) + def test_get_target_details(self, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.get_target_details(f_module, ome_connection_mock_for_profile) + assert result == params["res"] + + @pytest.mark.parametrize("params", + [{"mparams": { + "attributes": { + "Attributes": [ + { + "Id": 93812, + "IsIgnored": False, + "Value": "Aisle Five" + }, + { + "DisplayName": 'System, Server Topology, ServerTopology 1 Aisle Name', + "IsIgnored": False, + "Value": "Aisle 5" + } + ] + }}, "success": True, + "json_data": { + "Id": 11, + "Name": "ProfileViewEditAttributes", + "AttributeGroupNames": [], + "AttributeGroups": [ + { + "GroupNameId": 5, + "DisplayName": "System", + "SubAttributeGroups": [ + { + "GroupNameId": 33016, + "DisplayName": "Server Operating System", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93820, + "DisplayName": "ServerOS 1 Server Host Name", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + } + ] + }, + { + "GroupNameId": 33019, + "DisplayName": "Server Topology", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93812, + "CustomId": 0, + "AttributeEditInfoId": 2248, + "DisplayName": "ServerTopology 1 Aisle Name", + "Description": None, + "Value": "Aisle 5", + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 93811, + "DisplayName": "ServerTopology 1 Data Center Name", + "Value": "BLG 2nd Floor DS 1", + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 93813, + "DisplayName": "ServerTopology 1 Rack Name", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 93814, + "DisplayName": "ServerTopology 1 Rack Slot", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + } + ] + } + ], + "Attributes": [] + }, + { + "GroupNameId": 9, + "DisplayName": "iDRAC", + "SubAttributeGroups": [ + { + "GroupNameId": 32688, + "DisplayName": "Active Directory", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93523, + "DisplayName": "ActiveDirectory 1 Active Directory RAC Name", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + } + ] + }, + { + "GroupNameId": 32930, + "DisplayName": "NIC Information", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93035, + "DisplayName": "NIC 1 DNS RAC Name", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 92510, + "DisplayName": "NIC 1 Enable VLAN", + "Description": None, + "Value": "Disabled", + "IsReadOnly": False, + "IsIgnored": False, + } + ] + } + ], + "Attributes": []}]}, + "diff": 2}]) + def test_attributes_check(self, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.attributes_check(f_module, ome_connection_mock_for_profile, + params['mparams']['attributes'], 123) + assert result == params["diff"] + + @pytest.mark.parametrize("params", [{"mparams": {"command": 'create'}, "func": "create_profile"}, + {"mparams": {"command": 'modify'}, "func": "modify_profile"}, + {"mparams": {"command": 'delete'}, "func": "delete_profile"}, + {"mparams": {"command": 'assign'}, "func": "assign_profile"}, + {"mparams": {"command": 'unassign'}, "func": "unassign_profile"}, + {"mparams": {"command": 'migrate'}, "func": "migrate_profile"}]) + def test_profile_operation(self, mocker, params, ome_connection_mock_for_profile, ome_response_mock): + mocker.patch(MODULE_PATH + params.get('func'), return_value={"Id": 12}) + f_module = self.get_module_mock(params=params["mparams"]) + self.module.profile_operation(f_module, ome_connection_mock_for_profile) + + @pytest.mark.parametrize("params", [{"mparams": {"name": "p1"}, "success": True, "json_data": { + "value": [{"Id": 123, "ProfileName": "p1"}]}, "res": {"Id": 123, "ProfileName": "p1"}}]) + def test_get_profile(self, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.get_profile(ome_connection_mock_for_profile, f_module) + assert result == params["res"] + + @pytest.mark.parametrize("params", [{"mparams": { + "command": "create", "template_name": "t1", "name_prefix": "profile", + "number_of_profiles": 2, "description": "Created 1", + "boot_to_network_iso": { + "boot_to_network": True, + "share_type": "CIFS", + "share_ip": "100.200.300", + "share_user": "shareuser", + "share_pwd": "sharepwd", + "workgroup": "workgroup", + "iso_path": "pathofiso.iso", + "iso_timeout": 8 + } + }, + "success": True, + "json_data": [1, 2], + "res": "Successfully created 2 profile(s)."}, + { + "mparams": + { + "command": "create", + "template_name": "t1", + "name_prefix": "profile", + "number_of_profiles": 1 + }, + "success": True, + "json_data": [1], + "res": "Successfully created 1 profile(s)."}, + { + "mparams": + { + "command": "create", + "template_name": "t1", + "name_prefix": "profile", + "number_of_profiles": 1 + }, + "success": True, "check_mode": True, "json_data": [1], "res": CHANGES_MSG} + ]) + def test_create_profile(self, mocker, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 12}) + f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False)) + error_message = params["res"] + with pytest.raises(Exception) as err: + self.module.create_profile(f_module, ome_connection_mock_for_profile) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"mparams": {"command": "modify", "name": "profile"}, + "success": True, + "prof": {}, "json_data": 0, + "res": "Profile with the name 'profile' not found."}, + {"mparams": {"command": "modify", "name": "profile", "new_name": "modified profile", + "description": "new description", + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", + "IsIgnored": True}]}}, "success": True, + "prof": {"Id": 1234, + "ProfileName": "jrofile 00002", + "ProfileDescription": "from source template t1", + "NetworkBootToIso": {"BootToNetwork": True, "ShareType": "NFS", "IsoPath": "abcd.iso", + "ShareDetail": {"IpAddress": "XX.XX.XX.XX", "ShareName": "XX.XX.XX.XX", }, + "IsoTimeout": 4}, + "ProfileState": 0, }, + "json_data": 0, + "res": "Successfully modified the profile."}, + {"mparams": {"command": "modify", "name": "myprofile", "new_name": "myprofile"}, + "success": True, + "prof": {"Id": 1234, "ProfileName": "myprofile", "ProfileDescription": "my description"}, + "json_data": 0, "res": "No changes found to be applied."}, + {"mparams": {"command": "modify", "name": "profile", "new_name": "modified profile", + "description": "new description", + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", + "IsIgnored": True}]}}, "success": True, + "prof": {"Id": 1234, "ProfileName": "jrofile 00002", + "ProfileDescription": "from source template t1", + "NetworkBootToIso": { + "BootToNetwork": True, "ShareType": "NFS", "IsoPath": "abcd.iso", + "ShareDetail": {"IpAddress": "XX.XX.XX.XX", "ShareName": "XX.XX.XX.XX"}, "IsoTimeout": 4}, + "ProfileState": 0, }, + "json_data": 0, "attributes_check": 2, "check_mode": True, "res": CHANGES_MSG} + ]) + def test_modify_profile(self, mocker, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_profile', return_value=params.get('prof')) + mocker.patch(MODULE_PATH + 'attributes_check', return_value=params.get('attributes_check', 0)) + f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False)) + error_message = params["res"] + with pytest.raises(Exception) as err: + self.module.modify_profile(f_module, ome_connection_mock_for_profile) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"mparams": {"command": "delete", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4}, "json_data": 0, + "res": "Profile has to be in an unassigned state for it to be deleted."}, + {"mparams": {"command": "delete", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 0}, "json_data": 0, + "res": "Successfully deleted the profile."}, + {"mparams": {"command": "delete", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 0}, "json_data": 0, "check_mode": True, + "res": CHANGES_MSG}, + {"mparams": {"command": "delete", "name": "profile"}, "success": True, + "prof": {}, "json_data": 0, + "res": "Profile with the name 'profile' not found."}, + {"mparams": {"command": "delete", "filters": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 0}, "json_data": 0, + "res": "Successfully completed the delete operation."}, + {"mparams": {"command": "delete", "filters": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 0}, "json_data": 0, "check_mode": True, + "res": CHANGES_MSG}, + ]) + def test_delete_profile(self, mocker, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_profile', return_value=params.get('prof')) + f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False)) + error_message = params["res"] + with pytest.raises(Exception) as err: + self.module.delete_profile(f_module, ome_connection_mock_for_profile) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"mparams": {"command": "assign", "name": "profile"}, "success": True, + "prof": {"Id": 123, "ProfileState": 1, "TargetName": "ABC1234"}, "json_data": 0, + "res": "The profile is assigned to a different target. Unassign the profile and then proceed with assigning the" + " profile to the target."}, + {"mparams": {"command": "assign", "name": "profile"}, "success": True, "prof": {}, + "json_data": 0, "res": "Profile with the name 'profile' not found."}, + {"mparams": {"command": "assign", "name": "profile", "device_id": 234}, "success": True, + "prof": {"Id": 123, "ProfileState": 0}, "target": {"Id": 234, "Name": "mytarget"}, + "json_data": [234, 123], + "res": "The target device is invalid for the given profile."}, + {"mparams": {"command": "assign", "name": "profile", "device_id": 234, + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, + "prof": {"Id": 123, "ProfileState": 0}, "target": {"Id": 234, "Name": "mytarget"}, "json_data": [23, 123], + "res": "Successfully applied the assign operation."}, + {"mparams": {"command": "assign", "name": "profile", "device_service_tag": "ABCDEFG", + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, "prof": {"Id": 123, "ProfileState": 0}, "target": {"Id": 234, "Name": "mytarget"}, + "json_data": [23, 123], "res": "Successfully applied the assign operation."}, + {"mparams": {"command": "assign", "name": "profile", "device_id": 234, + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, + "prof": {"Id": 123, "ProfileState": 4, "TargetId": 234}, "target": {"Id": 234, "Name": "mytarget"}, + "json_data": [23, 123], + "res": "The profile is assigned to the target 234."}, + {"mparams": {"command": "assign", "name": "profile", "device_id": 234, + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, + "prof": {"Id": 123, "ProfileState": 4, "TargetId": 235}, "target": {"Id": 234, "Name": "mytarget"}, + "json_data": [23, 123], + "res": "The profile is assigned to a different target. Use the migrate command or unassign the profile and " + "then proceed with assigning the profile to the target."}, + {"mparams": {"command": "assign", "name": "profile", "device_service_tag": "STG1234", + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, + "prof": {"Id": 123, "ProfileState": 1, "TargetId": 235, "TargetName": "STG1234"}, "target": "Target invalid.", + "json_data": [23, 123], + "res": "The profile is assigned to the target STG1234."}, + {"mparams": {"command": "assign", "name": "profile", "device_id": 123, + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, + "prof": {"Id": 123, "ProfileState": 1, "TargetId": 235, "TargetName": "STG1234"}, "target": "Target invalid.", + "json_data": [23, 123], + "res": "Target invalid."}, + {"mparams": {"command": "assign", "name": "profile", "device_id": 234, + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, "check_mode": True, + "prof": {"Id": 123, "ProfileState": 0}, "target": {"Id": 234, "Name": "mytarget"}, "json_data": [23, 123], + "res": CHANGES_MSG}, + {"mparams": {"command": "assign", "name": "profile", "device_id": 234, + "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1", + "iso_path": "path/to/my_iso.iso", + "iso_timeout": 8}, + "attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}}, + "success": True, + "prof": {"Id": 123, "ProfileState": 0, "DeploymentTaskId": 12}, "target": {"Id": 234, "Name": "mytarget"}, + "json_data": [23, 123], + "res": "Successfully triggered the job for the assign operation."}, + ]) + def test_assign_profile(self, mocker, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_profile', return_value=params.get('prof')) + mocker.patch(MODULE_PATH + 'get_target_details', return_value=params.get('target')) + f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False)) + error_message = params["res"] + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + with pytest.raises(Exception) as err: + self.module.assign_profile(f_module, ome_connection_mock_for_profile) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"mparams": {"command": "unassign", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 0}, + "json_data": 0, "res": "Profile is in an unassigned state."}, + {"mparams": {"command": "unassign", "name": "profile"}, "success": True, + "prof": {}, "json_data": 0, + "res": "Profile with the name 'profile' not found."}, + {"mparams": {"command": "unassign", "filters": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4}, + "json_data": 0, "res": "Successfully applied the unassign operation. No job was triggered."}, + {"mparams": {"command": "unassign", "filters": "profile"}, "success": True, + "json_data": 0, "prof": {"Id": 12, "ProfileState": 1}, + "res": "Successfully applied the unassign operation. No job was triggered."}, + {"mparams": {"command": "unassign", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "DeploymentTaskId": 123}, + "json_data": {"LastRunStatus": {"Name": "Running"}}, + "res": "Profile deployment task is in progress. Wait for the job to finish."}, + {"mparams": {"command": "unassign", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "DeploymentTaskId": 123}, + "json_data": {"LastRunStatus": {"Name": "Starting"}}, + "res": "Successfully triggered a job for the unassign operation."}, + {"mparams": {"command": "unassign", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "DeploymentTaskId": 123}, + "json_data": {"LastRunStatus": {"Name": "Starting"}}, "check_mode": True, + "res": CHANGES_MSG} + ]) + def test_unassign_profile(self, mocker, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_profile', return_value=params.get('prof')) + f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False)) + error_message = params["res"] + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + with pytest.raises(Exception) as err: + self.module.unassign_profile(f_module, ome_connection_mock_for_profile) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"mparams": {"command": "migrate", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "TargetId": 14, "DeploymentTaskId": 123}, + "target": {"Id": 12}, + "json_data": [1, 2, 3], "res": "Successfully triggered the job for the migrate operation."}, + {"mparams": {"command": "migrate", "name": "profile"}, "success": True, + "prof": {}, + "target": {"Id": 12, "TargetId": 14}, "json_data": 0, + "res": "Profile with the name 'profile' not found."}, + {"mparams": {"command": "migrate", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 0, "TargetId": 14}, + "target": {"Id": 13, "TargetId": 14}, "json_data": [1, 2, 3], + "res": "Profile needs to be in a deployed state for a migrate operation."}, + {"mparams": {"command": "migrate", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "TargetId": 12}, "target": {"Id": 12}, "json_data": 0, + "res": "No changes found to be applied."}, + {"mparams": {"command": "migrate", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "TargetId": 14, "DeploymentTaskId": 123}, + "target": "Target invalid.", + "json_data": [1, 2, 3], "res": "Target invalid."}, + {"mparams": {"command": "migrate", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "TargetId": 14, "DeploymentTaskId": 123}, + "target": {"Id": 12}, + "json_data": [12, 21, 13], "res": "The target device is invalid for the given profile."}, + {"mparams": {"command": "migrate", "name": "profile"}, "success": True, + "prof": {"Id": 12, "ProfileState": 4, "TargetId": 14, "DeploymentTaskId": 123}, + "target": {"Id": 12}, "check_mode": True, + "json_data": [1, 2, 3], "res": CHANGES_MSG}, + ]) + def test_migrate_profile(self, mocker, params, ome_connection_mock_for_profile, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_profile', return_value=params.get('prof')) + mocker.patch(MODULE_PATH + 'get_target_details', return_value=params.get('target')) + f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False)) + error_message = params["res"] + mocker.patch(MODULE_PATH + 'time.sleep', return_value=None) + with pytest.raises(Exception) as err: + self.module.migrate_profile(f_module, ome_connection_mock_for_profile) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_profile_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_profile, ome_response_mock): + ome_default_args.update({"template_name": "t1"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'profile_operation', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'profile_operation', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'profile_operation', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py new file mode 100644 index 00000000..d83725d2 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ssl import SSLError +from io import StringIO +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible.module_utils._text import to_text +from ansible_collections.dellemc.openmanage.plugins.modules import ome_server_interface_profile_info +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_server_interface_profile_info.' + + +@pytest.fixture +def ome_conn_mock_sip(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMEMSIP(FakeAnsibleModule): + + module = ome_server_interface_profile_info + + def test_check_domain_service(self, ome_conn_mock_sip, ome_default_args): + f_module = self.get_module_mock() + result = self.module.check_domain_service(f_module, ome_conn_mock_sip) + assert result is None + + def test_get_sip_info(self, ome_conn_mock_sip, ome_response_mock): + f_module = self.get_module_mock(params={"device_id": [25011]}) + ome_conn_mock_sip.get_all_report_details.return_value = { + "resp_obj": ome_response_mock, "report_list": [{"Id": 25012, "DeviceServiceTag": "HKRF20"}] + } + with pytest.raises(Exception) as err: + self.module.get_sip_info(f_module, ome_conn_mock_sip) + assert err.value.args[0] == "Unable to complete the operation because the entered target " \ + "device id(s) '25011' are invalid." + f_module = self.get_module_mock(params={"device_id": [25012]}) + ome_response_mock.json_data = {"Id": "HKRF20", "ServerServiceTag": "HKRF20", "value": [{"Network": []}]} + ome_conn_mock_sip.json_data = [{"Id": "HKRF20", "ServerServiceTag": "HKRF20"}] + ome_conn_mock_sip.strip_substr_dict.return_value = {"Id": "HKRF20", "ServerServiceTag": "HKRF20", + "Networks": [{"Id": 10001}]} + result = self.module.get_sip_info(f_module, ome_conn_mock_sip) + assert result[0]["Id"] == "HKRF20" + + def test_main_case(self, ome_conn_mock_sip, ome_response_mock, ome_default_args, mocker): + ome_default_args.update({"device_id": None, "validate_certs": False}) + with pytest.raises(Exception) as err: + self._run_module(ome_default_args) + assert err.value.args[0]['msg'] == "one of the following is required: device_id, device_service_tag." + ome_default_args.update({"device_id": [25011], "validate_certs": False}) + mocker.patch(MODULE_PATH + 'check_domain_service') + mocker.patch(MODULE_PATH + 'get_sip_info', return_value={"server_profiles": [{"Id": 25011}]}) + result = self._run_module(ome_default_args) + assert result["msg"] == "Successfully retrieved the server interface profile information." + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_sip_power_main_exception_case(self, exc_type, mocker, ome_default_args, + ome_conn_mock_sip, ome_response_mock): + ome_default_args.update({"device_id": [25011], "validate_certs": False}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'check_domain_service', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py new file mode 100644 index 00000000..dcb1688a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py @@ -0,0 +1,699 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_server_interface_profiles +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +APPLY_TRIGGERED = "Successfully initiated the apply server profiles job." +NO_STAG = "No profile found for service tag {service_tag}." +CHANGES_MSG = "Changes found to be applied." +NO_CHANGES_MSG = "No changes found to be applied." +VLAN_NOT_FOUND = "The VLAN with a name {vlan_name} not found." +DUPLICATE_NIC_IDENTIFIED = "Duplicate NIC identfiers provided." +INVALID_UNTAGGED = "The untagged VLAN {id} provided for the NIC ID {nic_id} is not valid." +NW_OVERLAP = "Network profiles of {service_tag} provided for tagged or untagged VLANs of {nic_id} overlaps." +INVALID_DEV_ST = "Unable to complete the operation because the entered target device service tag(s) '{0}' are invalid." +INVALID_DEV_ID = "Unable to complete the operation because the entered target device ids '{0}' are invalid." + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_server_interface_profiles.' + + +@pytest.fixture +def ome_connection_mock_for_sips(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeSIPs(FakeAnsibleModule): + module = ome_server_interface_profiles + + @pytest.mark.parametrize("params", [ + {"json_data": {"JobId": 1234}, + 'message': APPLY_TRIGGERED, "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 2, + "Networks": [ + 11569, + 10155, + 12350 + ], + "NicBonded": False + }}, + "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["jagvlan"], + "state": "present"}, + "team": False, + "untagged_network": 3}, + { + "nic_identifier": "NIC.Mezzanine.1A-2-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": True, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }}, + {"json_data": {"JobId": 1234}, + 'message': INVALID_DEV_ST.format('ABC123'), "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + 'mparams': {"job_wait": False, "device_service_tag": ["ABC123"], + "nic_configuration": [], + "nic_teaming": "NoTeaming", + }}, + {"json_data": {"JobId": 1234}, + 'message': INVALID_DEV_ID.format('1111'), "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + 'mparams': {"job_wait": False, "device_id": [1111], + "nic_configuration": [], + "nic_teaming": "NoTeaming", + }}, + {"json_data": {"JobId": 1234}, + 'message': INVALID_UNTAGGED.format(id=10, nic_id="NIC.Mezzanine.1A-1-1"), "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155, + 12350 + ], + "NicBonded": False + }}, + "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["jagvlan"], + "state": "present"}, + "team": False, + "untagged_network": 10}, + { + "nic_identifier": "NIC.Mezzanine.1A-2-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": True, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }}, + {"json_data": {"JobId": 1234}, + 'message': VLAN_NOT_FOUND.format(vlan_name='vlan_x'), "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155, + 12350 + ], + "NicBonded": False + }}, + "vlan_map": {"vlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["vlan_x"], + "state": "present"}, + "team": False, + "untagged_network": 3}, + { + "nic_identifier": "NIC.Mezzanine.1A-2-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": True, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }}, + {"json_data": {"JobId": 1234}, + 'message': NO_CHANGES_MSG, "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 3, + "Networks": [ + 12350 + ], + "NicBonded": False + }}, + "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["jagvlan", "VLAN 1"], + "state": "present"}, + "team": False, + "untagged_network": 3}, + { + "nic_identifier": "NIC.Mezzanine.1A-2-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": False, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }}, + {"json_data": {"JobId": 1234}, "check_mode": True, + 'message': CHANGES_MSG, "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 3, + "Networks": [ + 11569 + ], + "NicBonded": False + }}, + "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["jagvlan"], + "state": "present"}, + "team": False, + "untagged_network": 3}, + { + "nic_identifier": "NIC.Mezzanine.1A-2-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": True, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }}, + {"json_data": {"JobId": 1234}, + 'message': DUPLICATE_NIC_IDENTIFIED, "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 2, + "Networks": [ + 11569, + 10155, + 12350 + ], + "NicBonded": False + }}, + "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["jagvlan"], + "state": "present"}, + "team": False, + "untagged_network": 3}, + { + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": True, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }}, + {"json_data": + {"Id": 14808, + "JobId": 1234, + "JobName": "Server profile(s) configuration task", + "JobDescription": "Applies bonding technology to profile and networks to NICs.", + "Value": "Successfully Applied bonding technology to profile and networks to NICs.", + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2060, + "Name": "Completed" + }, + }, + 'message': "Successfully Applied bonding technology to profile and networks to NICs.", "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 2, + "Networks": [ + 11569, + 10155, + 12350 + ], + "NicBonded": False + }}, + "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["jagvlan"], + "state": "present"}, + "team": False, + "untagged_network": 3}, + { + "nic_identifier": "NIC.Mezzanine.1A-2-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": True, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }}, + {"json_data": { + "Id": 14808, + "JobId": 1234, + "JobName": "Server profile(s) configuration task", + "JobDescription": "Applies bonding technology to profile and networks to NICs.", + "Value": 1234, # to cause exception + "LastRunStatus": { + "@odata.type": "#JobService.JobStatus", + "Id": 2060, + "Name": "Completed" + }, + }, + 'message': "Applies bonding technology to profile and networks to NICs.", "success": True, + 'Devices': {"value": [{"Id": 123, "Identifier": "ABC1234"}]}, + "_get_profile": { + "Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "_get_interface": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": [ + 11569, + 10155 + ], + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 2, + "Networks": [ + 11569, + 10155, + 12350 + ], + "NicBonded": False + }}, + "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679, + "three": 14681}, + "natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0}, + 'mparams': {"device_service_tag": ["ABC1234"], + "nic_configuration": [{ + "nic_identifier": "NIC.Mezzanine.1A-1-1", + "tagged_networks": { + "names": ["jagvlan"], + "state": "present"}, + "team": False, + "untagged_network": 3}, + { + "nic_identifier": "NIC.Mezzanine.1A-2-1", + "tagged_networks": {"names": ["range120-125"], + "state": "present"}, + "team": True, + "untagged_network": 3}], + "nic_teaming": "NoTeaming", + }} + ]) + def test_ome_sips_success_case( + self, + params, + ome_connection_mock_for_sips, + ome_response_mock, + ome_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + ome_connection_mock_for_sips.get_all_items_with_pagination.return_value = params[ + 'Devices'] + mocker.patch( + MODULE_PATH + + '_get_profile', + return_value=params.get( + '_get_profile', + {})) + mocker.patch( + MODULE_PATH + + '_get_interface', + return_value=params.get( + '_get_interface', + {})) + mocker.patch( + MODULE_PATH + 'get_vlan_ids', + return_value=( + params.get('vlan_map'), + params.get('natives'))) + ome_default_args.update(params['mparams']) + result = self._run_module( + ome_default_args, check_mode=params.get( + 'check_mode', False)) + assert result['msg'] == params['message'] + + @pytest.mark.parametrize("params", + [{"json_data": {"Id": "ABC1234", + "ServerServiceTag": "ABC1234", + "BondingTechnology": "NoTeaming"}, + "service_tag": "ABC1234"}]) + def test_ome_get_profile( + self, + params, + ome_connection_mock_for_sips, + ome_response_mock, + ome_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + f_module = self.get_module_mock(ome_default_args) + result = self.module._get_profile( + f_module, + ome_connection_mock_for_sips, + params.get("service_tag")) + assert result["Id"] == params.get("service_tag") + + @pytest.mark.parametrize("params", [ + {"json_data": { + "@odata.context": "/api/$metadata#Collection(NetworkService.ServerInterfaceProfile)", + "@odata.count": 2, + "value": [ + { + "Id": "NIC.Mezzanine.1A-1-1", + "OnboardedPort": "59HW8X2:ethernet1/1/1", + "NativeVLAN": 3, + "NicBonded": False, + "FabricId": "f918826e-2515-4967-98f4-5488e810ca2e", + "Networks@odata.count": 2, + "Networks": [ + { + "Id": 10155, + "Name": "jagvlan", + "Description": None, + "VlanMaximum": 143, + "VlanMinimum": 143, + "Type": 1, + }, + { + "Id": 11569, + "Name": "VLAN 1", + "Description": "VLAN 1", + "VlanMaximum": 1, + "VlanMinimum": 1, + "Type": 2, + } + ] + }, + { + "Id": "NIC.Mezzanine.1A-2-1", + "OnboardedPort": "6H7J6Z2:ethernet1/1/1", + "NativeVLAN": 3, + "NicBonded": False, + "FabricId": "f918826e-2515-4967-98f4-5488e810ca2e", + "Networks@odata.count": 3, + "Networks": [ + { + "Id": 10155, + "Name": "jagvlan", + "Description": None, + "VlanMaximum": 143, + "VlanMinimum": 143, + "Type": 1, + }, + { + "Id": 11569, + "Name": "VLAN 1", + "Description": "VLAN 1", + "VlanMaximum": 1, + "VlanMinimum": 1, + "Type": 2, + }, + { + "Id": 12350, + "Name": "range120-125", + "Description": None, + "VlanMaximum": 125, + "VlanMinimum": 120, + "Type": 3, + } + ] + } + ] + }, + "service_tag": "ABC1234", "intrfc": { + "NIC.Mezzanine.1A-1-1": { + "NativeVLAN": 3, + "Networks": { + 11569, + 10155 + }, + "NicBonded": False + }, + "NIC.Mezzanine.1A-2-1": { + "NativeVLAN": 3, + "Networks": { + 11569, + 10155, + 12350 + }, + "NicBonded": False + } + }}]) + def test_ome_get_interface( + self, + params, + ome_connection_mock_for_sips, + ome_response_mock, + ome_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + f_module = self.get_module_mock(ome_default_args) + result = self.module._get_interface( + f_module, + ome_connection_mock_for_sips, + params.get("service_tag")) + assert result == params.get("intrfc") + + @pytest.mark.parametrize("params", + [{"json_data": {"@odata.context": "/api/$metadata#Collection(NetworkConfigurationService.Network)", + "@odata.count": 6, + "value": [{"Id": 10155, + "Name": "jagvlan", + "VlanMaximum": 143, + "VlanMinimum": 143, + "Type": 1, + }, + {"Id": 11569, + "Name": "VLAN 1", + "Description": "VLAN 1", + "VlanMaximum": 1, + "VlanMinimum": 1, + "Type": 2, + }, + {"Id": 12350, + "Name": "range120-125", + "VlanMaximum": 125, + "VlanMinimum": 120, + "Type": 3, + }, + {"Id": 12352, + "Name": "range130-135", + "VlanMaximum": 135, + "VlanMinimum": 130, + "Type": 4, + }, + {"Id": 14679, + "Name": "two", + "VlanMaximum": 2, + "VlanMinimum": 2, + "Type": 1, + }, + {"Id": 14681, + "Name": "three", + "VlanMaximum": 3, + "VlanMinimum": 3, + "Type": 3, + }]}, + "vlan_map": {"jagvlan": 10155, + "VLAN 1": 11569, + "range120-125": 12350, + "range130-135": 12352, + "two": 14679, + "three": 14681}, + "natives": {143: 10155, + 1: 11569, + 2: 14679, + 3: 14681, + 0: 0}}]) + def test_ome_get_vlan_ids( + self, + params, + ome_connection_mock_for_sips, + ome_response_mock, + ome_default_args, + mocker): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params['json_data'] + vlan_map, natives = self.module.get_vlan_ids( + ome_connection_mock_for_sips) + assert vlan_map == params.get("vlan_map") + assert natives == params.get("natives") + + @pytest.mark.parametrize("exc_type", + [IOError, + ValueError, + SSLError, + TypeError, + ConnectionError, + HTTPError, + URLError]) + def test_ome_sips_main_exception_failure_case( + self, + exc_type, + mocker, + ome_default_args, + ome_connection_mock_for_sips, + ome_response_mock): + ome_default_args.update({"device_service_tag": ["SRV1234"], + "nic_configuration": [{'nic_identifier': "NIC1"}]}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch( + MODULE_PATH + 'get_valid_service_tags', + side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'get_valid_service_tags', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_valid_service_tags', + side_effect=exc_type('http://testhost.com', + 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py new file mode 100644 index 00000000..5d275f19 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py @@ -0,0 +1,1892 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 3.6.0 +# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import ome_smart_fabric +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text + +CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied." +CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No Changes found to be applied." +FABRIC_NOT_FOUND_ERROR_MSG = "The smart fabric '{0}' is not present in the system." +DOMAIN_SERVICE_TAG_ERROR_MSG = "Unable to retrieve the domain information because the" \ + " domain of the provided service tag {0} is not available." +LEAD_CHASSIS_ERROR_MSG = "System should be a lead chassis if the assigned fabric topology type is {0}." +SYSTEM_NOT_SUPPORTED_ERROR_MSG = "Fabric management is not supported on the specified system." +DESIGN_MODEL_ERROR_MSG = "The network type of the {0} must be {1}." +DEVICE_SERVICE_TAG_TYPE_ERROR_MSG = "The {0} type must be {1}." +DEVICE_SERVICE_TAG_NOT_FOUND_ERROR_MSG = "Unable to retrieve the device information because the device" \ + " with the provided service tag {0} is not available." +IDEMPOTENCY_MSG = "Specified fabric details are the same as the existing settings." +REQUIRED_FIELD = "Options 'fabric_design', 'primary_switch_service_tag' and 'secondary_switch_service_tag'" \ + " are required for fabric creation." +DUPLICATE_TAGS = "The switch details of the primary switch overlaps with the secondary switch details." +PRIMARY_SWITCH_OVERLAP_MSG = "The primary switch service tag is overlapping with existing secondary switch details." +SECONDARY_SWITCH_OVERLAP_MSG = "The switch details of the secondary switch overlaps with the existing primary" \ + " switch details." +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' +device_details = { + "Id": Constants.device_id1, + "Type": 4000, + "Identifier": "GTCT8T2", + "DeviceServiceTag": "GTCT8T2", + "ChassisServiceTag": "FPTN6Z2", + "Model": "MX9116n Fabric Engine", + "PowerState": 17, + "ManagedState": 3000, + "Status": 1000, + "SystemId": 2031, + "DeviceName": "IOM-A2", + "SlotConfiguration": { + "ChassisName": "MX-FPTN6Z2", + "SlotId": "13313", + "DeviceType": "4000", + "ChassisId": "13294", + "SlotNumber": "2", + "SledBlockPowerOn": "null", + "SlotName": "IOM-A2", + "ChassisServiceTag": "FPTN6Z2", + "SlotType": "4000" + }, + "DeviceManagement": [ + { + "ManagementId": 76383, + "NetworkAddress": Constants.hostname1, + "MacAddress": "00:00:00:00:00", + "ManagementType": 2, + "InstrumentationName": "MX9116n Fabric Engine", + "DnsName": "", + "ManagementProfile": [ + { + "ManagementProfileId": 76383, + "ProfileId": "FX7_BASE", + "ManagementId": 76383, + "AgentName": "", + "Version": "", + "ManagementURL": "", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-05-07 15:00:14.718" + } + ] + } + ] +} +all_fabric_details = [ + { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_1", + "Description": "create new fabric1", + "OverrideLLDPConfiguration": "NA", + "ScaleVLANProfile": "NA", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": { + "@odata.id": "/api/NetworkService/Fabrics('1312cceb-c3dd-4348-95c1-d8541a17d776')/FabricDesign" + } + }, + { + "Id": "1312cceb-c3dd-4348-95c1-123456", + "Name": "Fabric_1_2", + "Description": "create new fabric2", + "OverrideLLDPConfiguration": "Enabled", + "ScaleVLANProfile": "NA", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], + "FabricDesign": { + "@odata.id": "/api/NetworkService/Fabrics('1312cceb-c3dd-4348-95c1-123456')/FabricDesign" + } + } +] + + +@pytest.fixture +def ome_connection_mock_for_smart_fabric(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_smart_fabric.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeSmartFabric(FakeAnsibleModule): + module = ome_smart_fabric + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_main_ome_smart_fabric_exception_handling_case(self, exc_type, ome_default_args, + ome_connection_mock_for_smart_fabric, + ome_response_mock, mocker): + ome_default_args.update({"name": "name", "new_name": "new_name"}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'ome_smart_fabric.fabric_actions', + side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'ome_smart_fabric.fabric_actions', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + for status_code, msg in {501: SYSTEM_NOT_SUPPORTED_ERROR_MSG, 400: 'http error message'}.items(): + mocker.patch(MODULE_PATH + 'ome_smart_fabric.fabric_actions', + side_effect=exc_type('http://testhost.com', status_code, msg, + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert msg in result['msg'] + assert 'msg' in result + + def test_get_msm_device_details_success_case(self, ome_connection_mock_for_smart_fabric, ome_default_args, mocker): + """ + success case: when provided design type and role type natches return the service tag and msm details + """ + ome_default_args.update({"fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis"}) + f_module = self.get_module_mock(params=ome_default_args) + resp_data = { + "Id": Constants.device_id1, + "value": [ + { + "Id": 10086, + "DeviceId": 10061, + "PublicAddress": [ + ome_default_args["hostname"], + "1000:mock_val" + ], + "Identifier": Constants.service_tag1, + "DomainRoleTypeValue": "LEAD", + "Version": "1.20.00", + }, + { + "Id": 13341, + "DeviceId": 13294, + "PublicAddress": [ + Constants.hostname2, + "1000:mocked_val" + ], + "Identifier": Constants.service_tag2, + "DomainTypeValue": "MSM", + "DomainRoleTypeValue": "MEMBER", + "Version": "1.20.00", + } + ] + } + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_service_tag_with_fqdn', + return_value=None) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_ip_from_host', + return_value=ome_default_args["hostname"]) + service_tag, msm_version = self.module.get_msm_device_details(ome_connection_mock_for_smart_fabric, f_module) + assert service_tag == Constants.service_tag1 + assert msm_version == "1.20.00" + + def test_get_msm_device_details_fqdn_success_case1(self, ome_connection_mock_for_smart_fabric, ome_default_args, + mocker): + """ + when hostname provided is fqdn and + success case: when provided design type and role type matches return the service tag and msm details + """ + ome_default_args.update( + {"hostname": "XX-XXXX.yyy.lab", "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis"}) + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_ip_from_host', + return_value=ome_default_args["hostname"]) + resp_data = { + "Id": Constants.device_id1, + "value": [ + { + "Id": 10086, + "DeviceId": 10061, + "PublicAddress": [ + ome_default_args["hostname"], + "1000:mock_val" + ], + "Identifier": Constants.service_tag1, + "DomainRoleTypeValue": "LEAD", + "Version": "1.20.00", + }, + { + "Id": 13341, + "DeviceId": 13294, + "PublicAddress": [ + Constants.hostname2, + "1000:mocked_val" + ], + "Identifier": Constants.service_tag2, + "DomainTypeValue": "MSM", + "DomainRoleTypeValue": "MEMBER", + "Version": "1.20.00", + } + ] + } + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_service_tag_with_fqdn', + return_value="FKMLRZ2") + service_tag, msm_version = self.module.get_msm_device_details(ome_connection_mock_for_smart_fabric, f_module) + assert service_tag == Constants.service_tag1 + assert msm_version == "1.20.00" + + def test_get_msm_device_details_fqdn_success_case2(self, ome_connection_mock_for_smart_fabric, ome_default_args, + mocker): + """ + when hostname provided is fqdn and + success case: when provided design type is same and fqdn is not of lead type + """ + ome_default_args.update( + {"hostname": "XX-XXXX.yyy.lab", "fabric_design": "2xMX5108n_Ethernet_Switches_in_same_chassis"}) + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_ip_from_host', + return_value=ome_default_args["hostname"]) + resp_data = { + "Id": Constants.device_id1, + "value": [ + { + "Id": 10086, + "DeviceId": 10061, + "PublicAddress": [ + Constants.hostname1, + "1000:mock_ipv6" + ], + "Identifier": Constants.service_tag1, + "DomainRoleTypeValue": "LEAD", + "Version": "1.20.00", + }, + { + "Id": 13341, + "DeviceId": 13294, + "PublicAddress": [ + Constants.hostname2, + "1001:mocked_ippv6" + ], + "Identifier": Constants.service_tag2, + "DomainTypeValue": "MSM", + "DomainRoleTypeValue": "MEMBER", + "Version": "1.20.10", + } + ] + } + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_service_tag_with_fqdn', + return_value=Constants.service_tag2) + service_tag, msm_version = self.module.get_msm_device_details(ome_connection_mock_for_smart_fabric, f_module) + assert service_tag == Constants.service_tag2 + assert msm_version == "1.20.10" + + def test_get_msm_device_details_fqdn_failure_case1(self, ome_connection_mock_for_smart_fabric, ome_default_args, + mocker): + """ + when hostname provided is fqdn and + failure case: when provided design type is 2xMX9116n_Fabric_Switching_Engines_in_different_chassis + but fqdn is not of lead type + """ + ome_default_args.update( + {"hostname": "XX-XXXX.yyy.lab", "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis"}) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_ip_from_host', + return_value=ome_default_args["hostname"]) + f_module = self.get_module_mock(params=ome_default_args) + resp_data = { + "Id": Constants.device_id1, + "value": [ + { + "Id": 10086, + "DeviceId": 10061, + "PublicAddress": [ + Constants.hostname1, + "1000:mock_val" + ], + "Identifier": Constants.service_tag1, + "DomainRoleTypeValue": "LEAD", + "Version": "1.20.00", + }, + { + "Id": 13341, + "DeviceId": 13294, + "PublicAddress": [ + Constants.hostname2, + "1000:mocked_val" + ], + "Identifier": Constants.service_tag2, + "DomainTypeValue": "MSM", + "DomainRoleTypeValue": "MEMBER", + "Version": "1.20.00", + } + ] + } + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_service_tag_with_fqdn', + return_value=Constants.service_tag2) + with pytest.raises(Exception, match=LEAD_CHASSIS_ERROR_MSG.format(ome_default_args["fabric_design"])) as ex: + self.module.get_msm_device_details(ome_connection_mock_for_smart_fabric, f_module) + + def test_get_msm_device_details_fqdn_failure_case2(self, ome_connection_mock_for_smart_fabric, ome_default_args, + mocker): + """ + when hostname provided is fqdn and + failure case: when provided fqdn not available in domain list should throw an error + """ + ome_default_args.update( + {"hostname": "XX-XXXX.yyy.lab", "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis"}) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_ip_from_host', + return_value=ome_default_args["hostname"]) + f_module = self.get_module_mock(params=ome_default_args) + resp_data = { + "value": [ + ] + } + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_service_tag_with_fqdn', + return_value="FPTN6Z2") + with pytest.raises(Exception, match=SYSTEM_NOT_SUPPORTED_ERROR_MSG): + self.module.get_msm_device_details(ome_connection_mock_for_smart_fabric, f_module) + + def test_get_msm_device_details_failure_case_01(self, ome_connection_mock_for_smart_fabric, ome_default_args, + mocker): + """ + raise exception if design type is 2xMX9116n_Fabric_Switching_Engines_in_different_chassis but domain type is not lead + """ + ome_default_args.update({"fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis"}) + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_ip_from_host', + return_value=ome_default_args["hostname"]) + resp_data = {"Id": Constants.device_id1, "value": [ + { + "@odata.id": "/api/ManagementDomainService/Domains(25038)", + "Id": 25038, + "DeviceId": Constants.device_id1, + "PublicAddress": [ + ome_default_args["hostname"] + ], + "Name": "MX-2H5DNX2", + "Description": "PowerEdge MX7000", + "Identifier": Constants.service_tag1, + "DomainTypeId": 4000, + "DomainTypeValue": "MSM", + "DomainRoleTypeId": 3002, + "DomainRoleTypeValue": "STANDALONE", + "Version": "1.20.00", + "Local": True, + "GroupId": "d78ba475-f5d5-4dbb-97da-b4b1f190caa2", + "GroupName": None, + "BackupLead": False, + "Capabilities": [], + "BackupLeadHealth": 2000 + } + ]} + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_service_tag_with_fqdn', + return_value=None) + with pytest.raises(Exception, match=LEAD_CHASSIS_ERROR_MSG.format(ome_default_args["fabric_design"])) as ex: + self.module.get_msm_device_details(ome_connection_mock_for_smart_fabric, f_module) + + def test_get_msm_device_details_failure_case_02(self, ome_connection_mock_for_smart_fabric, ome_default_args, + mocker): + """ + raise exception if there is no domain values in system + """ + ome_default_args.update({"fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis"}) + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_ip_from_host', + return_value=ome_default_args["hostname"]) + resp_data = {"Id": None, "value": [ + ]} + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_service_tag_with_fqdn', + return_value=None) + with pytest.raises(Exception, match=SYSTEM_NOT_SUPPORTED_ERROR_MSG): + self.module.get_msm_device_details(ome_connection_mock_for_smart_fabric, f_module) + + @pytest.mark.parametrize("modify_payload", [ + {"Name": "Fabric-2"}, + {"Name": "Fabric-1", "Description": "This is a fabric1."}, + {"FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], }, + { + "FabricDesign": { + "Name": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" + } + }, + { + "FabricDesignMapping": [ + {"DesignNode": "Switch-B", "PhysicalNode": Constants.service_tag2}, + {"DesignNode": "Switch-A", "PhysicalNode": Constants.service_tag1}] + } + ]) + def test_compare_payloads_diff_case_01(self, modify_payload): + current_payload = { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "Description": "This is a fabric.", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + } + diff = self.module.compare_payloads(modify_payload, current_payload) + assert diff is True + + @pytest.mark.parametrize("current_payload", [ + {"Name": "Fabric-1", "Description": "This is a fabric1."}, + {"Name": "Fabric-1", "Description": "This is a fabric.", "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + }}]) + def test_compare_payloads_diff_case_02(self, current_payload): + modify_payload = { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "Description": "This is a fabric.", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + } + diff = self.module.compare_payloads(modify_payload, current_payload) + assert diff is True + + @pytest.mark.parametrize("modify_payload", [ + {"Name": "Fabric-1", "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f"}, + {"Name": "Fabric-1", "Description": "This is a fabric.", "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", }, + {"Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", "Name": "Fabric-1", "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], }, + { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + }, + { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "Description": "This is a fabric.", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + } + ]) + def test_compare_payloads_no_diff_case_01(self, modify_payload): + current_payload = { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "Description": "This is a fabric.", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + } + val = self.module.compare_payloads(modify_payload, current_payload) + # print(val) + assert val is False + + @pytest.mark.parametrize('val', [{'msg': CHECK_MODE_CHANGE_FOUND_MSG, + "current_payload": {"Name": "Fabric-1", "Description": "This is a fabric.", + "FabricDesignMapping": [{"DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1}, + {"DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2}], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}}, + "expected_payload": {"Name": "Fabric-1", "Description": "This is a fabric.", + "FabricDesignMapping": [{"DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag2}, + {"DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag1}], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}}}, + {'msg': CHECK_MODE_CHANGE_NOT_FOUND_MSG, + "current_payload": {"Name": "Fabric-1", "Description": "This is a fabric.", + "FabricDesignMapping": [{"DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1}, + {"DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2}], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}}, + "expected_payload": {"Name": "Fabric-1", "Description": "This is a fabric.", + "FabricDesignMapping": [{"DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1}, + {"DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2}], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}}}, + {'msg': CHECK_MODE_CHANGE_NOT_FOUND_MSG, "current_payload": {"Name": "Fabric-1", + "Description": "This is list order change case.", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1}, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2}], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}}, + "expected_payload": {"Name": "Fabric-1", + "Description": "This is list order change case.", + "FabricDesignMapping": [{"DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2}, + {"DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1}], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}}}, + {'msg': CHECK_MODE_CHANGE_NOT_FOUND_MSG, + "current_payload": {'Id': 'fa9f1b12-c003-4772-8b90-601d0bf87c69', + 'Name': 'MX9116N', 'OverrideLLDPConfiguration': 'Disabled', + 'FabricDesignMapping': [ + {'DesignNode': 'Switch-B', 'PhysicalNode': '6XLVMR2'}, + {'DesignNode': 'Switch-A', 'PhysicalNode': '6XLTMR2'}], + 'FabricDesign': { + 'Name': '2xMX9116n_Fabric_Switching_Engines_in_different_chassis'}}, + "expected_payload": {'Name': 'MX9116N', 'OverrideLLDPConfiguration': 'Disabled', + 'FabricDesignMapping': [ + {'DesignNode': 'Switch-A', 'PhysicalNode': '6XLTMR2'}, + {'DesignNode': 'Switch-B', 'PhysicalNode': '6XLVMR2'}], + 'FabricDesign': { + 'Name': '2xMX9116n_Fabric_Switching_Engines_in_different_chassis'}, + 'Id': 'fa9f1b12-c003-4772-8b90-601d0bf87c69'}} + ]) + def test_idempotency_check_for_state_present_modify_check_mode_case01(self, mocker, val): + f_module = self.get_module_mock(params={}, check_mode=True) + error_message = val["msg"] + with pytest.raises(Exception) as err: + self.module.idempotency_check_for_state_present("8f25f714-9ea8-48e9-8eac-162d5d842e9f", + val['current_payload'], val['expected_payload'], + f_module) + assert err.value.args[0] == error_message + + def test_idempotency_check_for_state_present_modify_non_check_mode_case01(self, mocker): + f_module = self.get_module_mock(params={}, check_mode=False) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.compare_payloads', + return_value=False) + with pytest.raises(Exception, match=IDEMPOTENCY_MSG): + self.module.idempotency_check_for_state_present("8f25f714-9ea8-48e9-8eac-162d5d842e9f", + {}, {}, + f_module) + + def test_idempotency_check_for_state_present_create_non_check_mode_case01(self, mocker): + f_module = self.get_module_mock(params={}, check_mode=True) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.compare_payloads', + return_value=False) + with pytest.raises(Exception, match=CHECK_MODE_CHANGE_FOUND_MSG): + self.module.idempotency_check_for_state_present(None, + {}, {}, + f_module) + + def test_design_node_dict_update_case_01(self): + design_node_map = [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ] + val = self.module.design_node_dict_update(design_node_map) + assert val == { + 'PhysicalNode1': Constants.service_tag1, + 'PhysicalNode2': Constants.service_tag2 + } + + def test_design_node_dict_update_case_02(self): + design_node_map = [ + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ] + val = self.module.design_node_dict_update(design_node_map) + assert val == { + 'PhysicalNode2': Constants.service_tag2 + } + + def test_design_node_dict_update_case_03(self): + design_node_map = [ + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ] + val = self.module.design_node_dict_update(design_node_map) + assert val == { + 'PhysicalNode2': Constants.service_tag2 + } + + @pytest.mark.parametrize("modify_payload", [ + { + 'PhysicalNode1': Constants.service_tag2, + 'PhysicalNode2': Constants.service_tag1 + } + ]) + def test_validate_switches_overlap_case_01(self, modify_payload): + current_dict = { + 'PhysicalNode1': Constants.service_tag1, + 'PhysicalNode2': Constants.service_tag2 + } + modify_dict = modify_payload + f_module = self.get_module_mock(params={"primary_switch_service_tag": Constants.service_tag2, + "secondary_switch_service_tag": Constants.service_tag1 + }) + with pytest.raises(Exception, match="The modify operation does not support primary_switch_service_tag update."): + self.module.validate_switches_overlap(current_dict, modify_dict, f_module) + + @pytest.mark.parametrize("modify_payload", [ + { + 'PhysicalNode1': Constants.service_tag2, + 'PhysicalNode2': Constants.service_tag1 + } + ]) + def test_validate_switches_overlap_case_02(self, modify_payload): + current_dict = { + 'PhysicalNode1': Constants.service_tag2, + 'PhysicalNode2': Constants.service_tag1 + } + modify_dict = modify_payload + f_module = self.get_module_mock(params={"primary_switch_service_tag": Constants.service_tag2, + "secondary_switch_service_tag": Constants.service_tag1 + }) + self.module.validate_switches_overlap(current_dict, modify_dict, f_module) + + def test_validate_switches_overlap_case_03(self): + """ + interchanging switches should be allowed + """ + current_dict = { + 'PhysicalNode1': Constants.service_tag1, + 'PhysicalNode2': Constants.service_tag2 + } + modify_dict = { + 'PhysicalNode1': Constants.service_tag1, + 'PhysicalNode2': Constants.service_tag2 + } + f_module = self.get_module_mock(params={"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2 + }) + self.module.validate_switches_overlap(current_dict, modify_dict, f_module) + + def test_fabric_design_map_payload_creation_case01(self, mocker): + modify_payload = [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ] + current_payload = [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "xyz123" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "abc456" + } + ] + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_switches_overlap', return_value=None) + f_module = self.get_module_mock(params={}) + design_map = self.module.fabric_design_map_payload_creation(modify_payload, current_payload, f_module) + assert design_map == modify_payload + + def test_fabric_design_map_payload_creation_case02(self, mocker): + modify_payload = [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + } + ] + current_payload = [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "xyz123" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "abc456" + } + ] + f_module = self.get_module_mock(params={}) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_switches_overlap', return_value=None) + design_map = self.module.fabric_design_map_payload_creation(modify_payload, current_payload, f_module) + assert design_map == [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "abc456" + } + ] + + def test_fabric_design_map_payload_creation_case03(self, mocker): + modify_payload = [ + ] + current_payload = [ + ] + f_module = self.get_module_mock(params={}) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_switches_overlap', return_value=None) + design_map = self.module.fabric_design_map_payload_creation(modify_payload, current_payload, f_module) + assert design_map == [] + + def test_merge_payload_case_01(self): + modify_payload = { + "Name": "new_name", + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + } + current_payload = { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "Description": "This is a fabric.", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + } + f_module = self.get_module_mock(params={}) + payload = self.module.merge_payload(modify_payload, current_payload, f_module) + assert payload["Name"] == modify_payload["Name"] + assert payload["Id"] == modify_payload["Id"] + assert payload["Description"] == current_payload["Description"] + assert payload["FabricDesignMapping"] == current_payload["FabricDesignMapping"] + assert payload["FabricDesign"] == current_payload["FabricDesign"] + + def test_merge_payload_case_02(self): + modify_payload = { + "Name": "new_name", + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }], + "FabricDesign": { + "Name": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis" + } + } + current_payload = { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "Description": "This is a fabric.", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + } + f_module = self.get_module_mock(params={}) + payload = self.module.merge_payload(modify_payload, current_payload, f_module) + assert payload["Name"] == modify_payload["Name"] + assert payload["Id"] == modify_payload["Id"] + assert payload["Description"] == current_payload["Description"] + assert payload["FabricDesign"] == modify_payload["FabricDesign"] + assert payload["FabricDesignMapping"] == [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ] + + def test_merge_payload_case_03(self): + modify_payload = { + "Name": "new_name", + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "FabricDesign": { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + } + current_payload = { + "Id": "8f25f714-9ea8-48e9-8eac-162d5d842e9f", + "Name": "Fabric-1", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1 + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2 + } + ], + "Description": "This is a fabric." + } + f_module = self.get_module_mock(params={}) + payload = self.module.merge_payload(modify_payload, current_payload, f_module) + assert payload["Name"] == modify_payload["Name"] + assert payload["Id"] == modify_payload["Id"] + assert payload["Description"] == current_payload["Description"] + assert payload["FabricDesign"] == modify_payload["FabricDesign"] + assert payload["FabricDesignMapping"] == current_payload["FabricDesignMapping"] + + def test_get_fabric_design(self, ome_connection_mock_for_smart_fabric, ome_response_mock): + resp_data = { + "Name": "2xMX5108n_Ethernet_Switches_in_same_chassis" + } + ome_response_mock.json_data = resp_data + fabric_design_uri = "/api/NetworkService/Fabrics('0bebadec-b61b-4b16-b354-5196396a4a18')/FabricDesign" + fabric_design = self.module.get_fabric_design(fabric_design_uri, ome_connection_mock_for_smart_fabric) + assert fabric_design == {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + + def test_get_current_payload(self, mocker, ome_connection_mock_for_smart_fabric): + fabric_details = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "OverrideLLDPConfiguration": "NA", + "ScaleVLANProfile": "NA", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": { + "@odata.id": "/api/NetworkService/Fabrics('1312cceb-c3dd-4348-95c1-d8541a17d776')/FabricDesign" + } + } + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_design', + return_value={"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}) + payload = self.module.get_current_payload(fabric_details, ome_connection_mock_for_smart_fabric) + assert payload == { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + + def test_get_current_payload_case02(self, mocker, ome_connection_mock_for_smart_fabric): + fabric_details = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "OverrideLLDPConfiguration": "Disabled", + "ScaleVLANProfile": "NA", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": { + "@odata.id": "/api/NetworkService/Fabrics('1312cceb-c3dd-4348-95c1-d8541a17d776')/FabricDesign" + } + } + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_design', + return_value={"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}) + payload = self.module.get_current_payload(fabric_details, ome_connection_mock_for_smart_fabric) + assert payload == { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "OverrideLLDPConfiguration": "Disabled", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + + @pytest.mark.parametrize("params, expected", [({"name": "fabric1"}, {"Name": "fabric1"}), + ({"name": "fabric1", "description": "fabric desc"}, + {"Name": "fabric1", "Description": "fabric desc"}), + ({"name": "fabric1", "description": "fabric desc", + "override_LLDP_configuration": "Enabled"}, + {"Name": "fabric1", "Description": "fabric desc", + "OverrideLLDPConfiguration": "Enabled"} + )]) + def test_create_modify_payload_case_01(self, params, expected, ome_default_args): + ome_default_args.update(params) + payload = self.module.create_modify_payload(ome_default_args, None, "1.1") + assert payload == expected + + def test_create_modify_payload_case_02(self, ome_default_args): + params = {"name": "fabric1", "new_name": "fabric2", "primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX5108n_Ethernet_Switches_in_same_chassis", + "override_LLDP_configuration": "Disabled"} + ome_default_args.update(params) + payload = self.module.create_modify_payload(ome_default_args, "1312cceb-c3dd-4348-95c1-d8541a17d776", "1.0") + assert payload["FabricDesignMapping"] == [{"DesignNode": "Switch-A", + "PhysicalNode": Constants.service_tag1}, + {"DesignNode": "Switch-B", + "PhysicalNode": Constants.service_tag2} + ] + assert payload["Name"] == "fabric2" + assert "OverrideLLDPConfiguration" not in payload + assert payload["FabricDesign"] == {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + assert payload["Id"] == "1312cceb-c3dd-4348-95c1-d8541a17d776" + + def test_get_fabric_id_cse_01(self): + fabric_id, fabric_id_details = self.module.get_fabric_id_details("Fabric_1", all_fabric_details) + assert fabric_id == "1312cceb-c3dd-4348-95c1-d8541a17d776" + assert fabric_id_details == all_fabric_details[0] + + def test_get_fabric_id_cse_02(self): + fabric_id, fabric_id_details = self.module.get_fabric_id_details("Fabric_New", all_fabric_details) + assert fabric_id is None + assert fabric_id_details is None + + def test_get_fabric_id_cse_03(self): + fabric_id, fabric_id_details = self.module.get_fabric_id_details("Fabric_1", []) + assert fabric_id is None + assert fabric_id_details is None + + @pytest.mark.parametrize("identifier, expected_type", [("primary_switch_service_tag", "NETWORK_IOM"), + ("secondary_switch_service_tag", "NETWORK_IOM"), + ("hostname", "CHASSIS")]) + def test_validate_device_type_case_01(self, ome_default_args, identifier, expected_type): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2}) + f_module = self.get_module_mock(params={identifier: "val"}) + with pytest.raises(Exception, match=DEVICE_SERVICE_TAG_TYPE_ERROR_MSG.format(identifier, expected_type)): + self.module.validate_device_type("SERVER", identifier, {}, f_module) + + @pytest.mark.parametrize("identifier", ["primary_switch_service_tag", "secondary_switch_service_tag"]) + def test_validate_device_type_case_02(self, ome_default_args, identifier): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX5108n_Ethernet_Switches_in_same_chassis" + }) + + f_module = self.get_module_mock(params=ome_default_args) + with pytest.raises(Exception, match=DESIGN_MODEL_ERROR_MSG.format(identifier, 'MX5108n')): + self.module.validate_device_type("NETWORK_IOM", identifier, device_details, f_module) + + @pytest.mark.parametrize("identifier", ["primary_switch_service_tag", "secondary_switch_service_tag"]) + def test_validate_device_type_case_03(self, ome_default_args, identifier): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis" + }) + + f_module = self.get_module_mock(params=ome_default_args) + self.module.validate_device_type("NETWORK_IOM", identifier, device_details, f_module) + + def test_validate_service_tag_case_01(self, mocker, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis" + }) + + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_device_type', return_value=None) + ome_connection_mock_for_smart_fabric.get_device_id_from_service_tag.return_value = {"value": device_details, + "Id": Constants.device_id1} + self.module.validate_service_tag(Constants.service_tag1, "primary_switch_service_tag", + {2000: "CHASSIS", 4000: "NETWORK_IOM", + 1000: "SERVER", + 3000: "STORAGE"}, ome_connection_mock_for_smart_fabric, f_module) + + def test_validate_service_tag_exception_case_01(self, mocker, ome_connection_mock_for_smart_fabric, + ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis" + }) + + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_device_type', return_value=None) + ome_connection_mock_for_smart_fabric.get_device_id_from_service_tag.return_value = {"value": {}, "Id": None} + with pytest.raises(Exception, match=DEVICE_SERVICE_TAG_NOT_FOUND_ERROR_MSG.format(Constants.service_tag1)): + self.module.validate_service_tag(Constants.service_tag1, "primary_switch_service_tag", + {2000: "CHASSIS", 4000: "NETWORK_IOM", + 1000: "SERVER", + 3000: "STORAGE"}, ome_connection_mock_for_smart_fabric, f_module) + + @pytest.mark.parametrize("params", [{"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis" + }, + {"primary_switch_service_tag": None, + "secondary_switch_service_tag": None, + } + ]) + def test_validate_devices_case_01(self, params, mocker, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update(params) + + f_module = self.get_module_mock(params=ome_default_args) + ome_connection_mock_for_smart_fabric.get_device_type.return_value = {2000: "CHASSIS", 4000: "NETWORK_IOM", + 1000: "SERVER", + 3000: "STORAGE"} + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_service_tag', return_value=None) + self.module.validate_devices(Constants.service_tag1, ome_connection_mock_for_smart_fabric, f_module) + + def test_validate_devices_case_02(self, mocker, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag2, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis" + }) + + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_service_tag', return_value=None) + ome_connection_mock_for_smart_fabric.get_device_type.return_value = {2000: "CHASSIS", + 4000: "NETWORK_IOM", + 1000: "SERVER", + 3000: "STORAGE"} + self.module.validate_devices(Constants.service_tag1, ome_connection_mock_for_smart_fabric, f_module) + + def test_required_field_check_for_create_case_01(self, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis", + "state": "present" + }) + + f_module = self.get_module_mock(params=ome_default_args) + self.module.required_field_check_for_create("fabric_id", f_module) + + def test_required_field_check_for_create_case_02(self, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis", + "state": "present" + }) + + f_module = self.get_module_mock(params=ome_default_args) + self.module.required_field_check_for_create(None, f_module) + + @pytest.mark.parametrize("params", [{"primary_switch_service_tag": Constants.service_tag1}, + {"secondary_switch_service_tag": Constants.service_tag1}, + {"fabric_design": Constants.service_tag1}, + {"fabric_design": Constants.service_tag1, + "primary_switch_service_tag": Constants.service_tag1}, + {"fabric_design": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag1}, + {"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2}, + {"primary_switch_service_tag": None, + "secondary_switch_service_tag": None}, + {"primary_switch_service_tag": None, + "secondary_switch_service_tag": None} + ]) + def test_required_field_check_for_create_case_03(self, params, ome_default_args): + ome_default_args.update(params) + f_module = self.get_module_mock(params=ome_default_args) + with pytest.raises(Exception, match=REQUIRED_FIELD): + self.module.required_field_check_for_create(None, f_module) + + def test_process_output_case01(self, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis", + "state": "present" + }) + f_module = self.get_module_mock(params=ome_default_args) + with pytest.raises(Exception, match="Fabric modification operation is initiated.") as err: + self.module.process_output("Fabric1", True, "Fabric modification operation is initiated.", "1234", + ome_connection_mock_for_smart_fabric, f_module) + err.value.fail_kwargs['fabric_id'] == "1234" + + def test_process_output_case02(self, mocker, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis", + "state": "present" + }) + f_module = self.get_module_mock(params=ome_default_args) + resp = { + "error": { + "code": "Base.1.0.GeneralError", + "message": "A general error has occurred. See ExtendedInfo for more information.", + "@Message.ExtendedInfo": + [ + { + "MessageId": "CDEV7154", + "RelatedProperties": [], + "Message": "Fabric update is successful. The OverrideLLDPConfiguration attribute is not" + " provided " + " in the payload, so it preserves the previous value.", + "MessageArgs": [], + "Severity": "Informational", + "Resolution": "Please update the Fabric with the OverrideLLDPConfiguration as Disabled or" + " Enabled " + " if necessary. " + } + ] + } + } + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = {"value": all_fabric_details, + "total_count": 2} + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_id_details', + return_value=(all_fabric_details[0]["Id"], all_fabric_details[0])) + with pytest.raises(Exception, match="Fabric creation operation is initiated.") as err: + self.module.process_output("Fabric1", resp, "Fabric creation operation is initiated.", None, + ome_connection_mock_for_smart_fabric, f_module) + err.value.fail_kwargs['fabric_id'] == all_fabric_details[0]["Id"] + err.value.fail_kwargs['additional_info'] == resp + + def test_process_output_case03(self, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis", + "state": "present" + }) + f_module = self.get_module_mock(params=ome_default_args) + with pytest.raises(Exception, match="Fabric creation operation is initiated.") as err: + self.module.process_output("Fabric1", "1234", "Fabric creation operation is initiated.", None, + ome_connection_mock_for_smart_fabric, f_module) + err.value.fail_kwargs['fabric_id'] == "1234" + + def test_create_modify_fabric_modify_case_01(self, ome_connection_mock_for_smart_fabric, ome_default_args, mocker, + ome_response_mock): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis", + "state": "present" + }) + + mocker.patch(MODULE_PATH + 'ome_smart_fabric.required_field_check_for_create', + return_value=None) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_msm_device_details', + return_value=(Constants.service_tag1, "1.1")) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_devices', return_value=None) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_modify', return_value=None) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_id_details', + return_value=(all_fabric_details[0]["Id"], all_fabric_details[0])) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.create_modify_payload', + return_value={"Name": "fabric2", "Description": "fabric desc2", + "OverrideLLDPConfiguration": "Enabled"}) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_current_payload', + return_value={ + "Name": "fabric1", + "Description": "fabric desc1", + "OverrideLLDPConfiguration": "Enabled", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "3QM4WV2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "GTCT8T2" + } + ], + "FabricDesign": { + "Name": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" + } + }) + mocker_merge_payload = mocker.patch(MODULE_PATH + 'ome_smart_fabric.merge_payload', + return_value={ + "Name": "fabric2", + "Description": "fabric desc2", + "OverrideLLDPConfiguration": "Enabled", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "3QM4WV2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "GTCT8T2" + } + ], + "FabricDesign": { + "Name": "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" + } + }) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.idempotency_check_for_state_present', return_value=None) + mocker_process_output = mocker.patch(MODULE_PATH + 'ome_smart_fabric.process_output', return_value=None) + ome_response_mock.json_data = "true" + f_module = self.get_module_mock(params=ome_default_args) + self.module.create_modify_fabric("Fabric1", all_fabric_details, ome_connection_mock_for_smart_fabric, + f_module) + assert mocker_process_output.called + assert mocker_merge_payload.called + + def test_create_modify_fabric_create_case_02(self, ome_connection_mock_for_smart_fabric, ome_default_args, mocker, + ome_response_mock): + ome_default_args.update({"primary_switch_service_tag": Constants.service_tag1, + "secondary_switch_service_tag": Constants.service_tag2, + "fabric_design": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis", + "state": "present" + }) + + f_module = self.get_module_mock(params=ome_default_args) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.required_field_check_for_create', + return_value=None) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_msm_device_details', + return_value=(Constants.service_tag1, "1.1")) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.validate_devices', return_value=None) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_id_details', + return_value=(None, {})) + mocker_create_modify_payload = mocker.patch(MODULE_PATH + 'ome_smart_fabric.create_modify_payload', + return_value={"Name": "fabric2", "Description": "fabric desc2", + "OverrideLLDPConfiguration": "Enabled"}) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.idempotency_check_for_state_present', return_value=None) + ome_response_mock.json_data = "123456789abcd" + mocker_process_output = mocker.patch(MODULE_PATH + 'ome_smart_fabric.process_output', return_value=None) + self.module.create_modify_fabric("Fabric1", all_fabric_details, ome_connection_mock_for_smart_fabric, + f_module) + assert mocker_process_output.called + assert mocker_create_modify_payload.called + + def test_check_fabric_exits_for_state_absent_non_check_mode_case01(self, mocker, + ome_connection_mock_for_smart_fabric, + ome_default_args): + ome_default_args.update({ + "state": "absent", + "name": "Fabric1" + }) + + f_module = self.get_module_mock(params=ome_default_args, check_mode=False) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_id_details', + return_value=(None, {})) + with pytest.raises(Exception, match=FABRIC_NOT_FOUND_ERROR_MSG.format("Fabric1")): + self.module.check_fabric_exits_for_state_absent(all_fabric_details[0], f_module, "Fabric1") + + def test_check_fabric_exits_for_state_absent_non_check_mode_case02(self, mocker, + ome_connection_mock_for_smart_fabric, + ome_default_args): + ome_default_args.update({ + "state": "absent", + "name": "Fabric1" + }) + + f_module = self.get_module_mock(params=ome_default_args, check_mode=False) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_id_details', + return_value=(all_fabric_details[0]["Id"], all_fabric_details[0])) + fabric_id = self.module.check_fabric_exits_for_state_absent(all_fabric_details[0], f_module, "Fabric1") + assert fabric_id == all_fabric_details[0]["Id"] + + def test_check_fabric_exits_for_state_absent_check_mode_case01(self, mocker, + ome_connection_mock_for_smart_fabric, + ome_default_args): + ome_default_args.update({ + "state": "absent", + "name": "Fabric1" + }) + + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_id_details', + return_value=(None, {})) + with pytest.raises(Exception, match=CHECK_MODE_CHANGE_NOT_FOUND_MSG): + self.module.check_fabric_exits_for_state_absent(all_fabric_details[0], f_module, "Fabric1") + + def test_check_fabric_exits_for_state_absent_check_mode_case02(self, mocker, + ome_connection_mock_for_smart_fabric, + ome_default_args): + ome_default_args.update({ + "state": "absent", + "name": "Fabric1" + }) + + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.get_fabric_id_details', + return_value=(all_fabric_details[0]["Id"], all_fabric_details[0])) + with pytest.raises(Exception, match=CHECK_MODE_CHANGE_FOUND_MSG): + self.module.check_fabric_exits_for_state_absent(all_fabric_details[0], f_module, "Fabric1") + + def test_delete_fabric(self, ome_connection_mock_for_smart_fabric, ome_default_args, mocker): + ome_default_args.update({ + "state": "absent", + "name": "Fabric1" + }) + + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + mocker.patch(MODULE_PATH + 'ome_smart_fabric.check_fabric_exits_for_state_absent', + return_value=all_fabric_details[0]["Id"]) + with pytest.raises(Exception, match="Fabric deletion operation is initiated.") as err: + self.module.delete_fabric(all_fabric_details, ome_connection_mock_for_smart_fabric, f_module, "Fabric1") + err.value.fail_kwargs['fabric_id'] == all_fabric_details[0]["Id"] + + def test_fabric_actions_case_01(self, mocker, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update({ + "state": "absent", + "name": "Fabric1" + }) + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = {"value": all_fabric_details, + "total_count": 2} + delete_fabric = mocker.patch(MODULE_PATH + 'ome_smart_fabric.delete_fabric', + return_value=None) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + self.module.fabric_actions(ome_connection_mock_for_smart_fabric, f_module) + assert delete_fabric.called + + def test_fabric_actions_case_02(self, mocker, ome_connection_mock_for_smart_fabric, ome_default_args): + ome_default_args.update({ + "state": "present", + "name": "Fabric1" + }) + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = {"value": all_fabric_details, + "total_count": 2} + create_modify_fabric = mocker.patch(MODULE_PATH + 'ome_smart_fabric.create_modify_fabric', + return_value=None) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + self.module.fabric_actions(ome_connection_mock_for_smart_fabric, f_module) + assert create_modify_fabric.called + + def test_get_service_tag_with_fqdn_success_case(self, ome_default_args, ome_connection_mock_for_smart_fabric): + ome_default_args.update({"hostname": "M-YYYY.abcd.lab"}) + resp_data = { + "@odata.context": "/api/$metadata#Collection(DeviceService.Device)", + "@odata.count": 2, + "value": [ + { + "@odata.type": "#DeviceService.Device", + "@odata.id": "/api/DeviceService/Devices(Constants.device_id1)", + "Id": Constants.device_id1, + "Type": 2000, + "Identifier": Constants.service_tag1, + "DeviceServiceTag": Constants.service_tag1, + "ChassisServiceTag": None, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "ManagedState": 3000, + "Status": 4000, + "ConnectionState": True, + "AssetTag": None, + "SystemId": 2031, + "DeviceName": "MX-Constants.service_tag1", + "LastInventoryTime": "2020-07-11 17:00:18.925", + "LastStatusTime": "2020-07-11 09:00:07.444", + "DeviceSubscription": None, + "DeviceCapabilities": [ + 18, + 8, + 201, + 202 + ], + "SlotConfiguration": { + "ChassisName": None + }, + "DeviceManagement": [ + { + "ManagementId": 111111, + "NetworkAddress": ome_default_args["hostname"], + "MacAddress": "xx:yy:zz:x1x1", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag1", + "DnsName": "M-YYYY.abcd.lab", + "ManagementProfile": [ + { + "ManagementProfileId": 111111, + "ProfileId": "MSM_BASE", + "ManagementId": 111111, + "ManagementURL": "https://" + ome_default_args["hostname"] + ":443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + } + ] + }, + { + "ManagementId": 33333, + "NetworkAddress": "[1234.abcd:5678:345]", + "MacAddress": "22:xx:yy:11", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag1", + "DnsName": "M-YYYY.abcd.lab", + "ManagementProfile": [ + { + "ManagementProfileId": 33333, + "ProfileId": "MSM_BASE", + "ManagementId": 33333, + "ManagementURL": "https://[1234:abcd:567:xyzs]:443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + } + ] + } + ], + "Actions": None + }, + { + "@odata.type": "#DeviceService.Device", + "@odata.id": "/api/DeviceService/Devices(Constants.device_id1)", + "Id": Constants.device_id1, + "Type": 2000, + "Identifier": Constants.service_tag2, + "DeviceServiceTag": Constants.service_tag2, + "ChassisServiceTag": None, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "ManagedState": 3000, + "Status": 4000, + "ConnectionState": True, + "AssetTag": None, + "SystemId": 2031, + "DeviceName": "MX-Constants.service_tag2", + "LastInventoryTime": "2020-07-11 17:00:18.925", + "LastStatusTime": "2020-07-11 09:00:07.444", + "DeviceSubscription": None, + "DeviceCapabilities": [ + 18, + 8, + 201, + 202 + ], + "SlotConfiguration": { + "ChassisName": None + }, + "DeviceManagement": [ + { + "ManagementId": 111111, + "NetworkAddress": ome_default_args["hostname"], + "MacAddress": "xx:yy:zz:x1x1", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag2", + "DnsName": "M-XXXX.abcd.lab", + "ManagementProfile": [ + { + "ManagementProfileId": 111111, + "ProfileId": "MSM_BASE", + "ManagementId": 111111, + "ManagementURL": "https://" + ome_default_args["hostname"] + ":443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + } + ] + }, + { + "ManagementId": 22222, + "NetworkAddress": "[1234.abcd:5678:345]", + "MacAddress": "22:xx:yy:11", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag2", + "DnsName": "M-XXXX.abcd.lab", + "ManagementProfile": [{ + "ManagementProfileId": 22222, + "ProfileId": "MSM_BASE", + "ManagementId": 22222, + "ManagementURL": "https://[1234:abcd:567:xyzs]:443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + }] + } + ], + "Actions": None + } + ] + } + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + service_tag = self.module.get_service_tag_with_fqdn(ome_connection_mock_for_smart_fabric, f_module) + assert service_tag == Constants.service_tag1 + + def test_get_service_tag_with_fqdn_success_case2(self, ome_default_args, ome_connection_mock_for_smart_fabric): + ome_default_args.update({"hostname": Constants.hostname1}) + resp_data = { + "@odata.context": "/api/$metadata#Collection(DeviceService.Device)", + "@odata.count": 2, + "value": [ + { + "@odata.type": "#DeviceService.Device", + "@odata.id": "/api/DeviceService/Devices(Constants.device_id1)", + "Id": Constants.device_id1, + "Type": 2000, + "Identifier": Constants.service_tag1, + "DeviceServiceTag": Constants.service_tag1, + "ChassisServiceTag": None, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "ManagedState": 3000, + "Status": 4000, + "ConnectionState": True, + "AssetTag": None, + "SystemId": 2031, + "DeviceName": "MX-Constants.service_tag1", + "LastInventoryTime": "2020-07-11 17:00:18.925", + "LastStatusTime": "2020-07-11 09:00:07.444", + "DeviceSubscription": None, + "DeviceCapabilities": [ + 18, + 8, + 201, + 202 + ], + "SlotConfiguration": { + "ChassisName": None + }, + "DeviceManagement": [ + { + "ManagementId": 111111, + "NetworkAddress": "192.168.1.1", + "MacAddress": "xx:yy:zz:x1x1", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag1", + "DnsName": "M-YYYY.abcd.lab", + "ManagementProfile": [ + { + "ManagementProfileId": 111111, + "ProfileId": "MSM_BASE", + "ManagementId": 111111, + "ManagementURL": "https://" + ome_default_args["hostname"] + ":443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + } + ] + }, + { + "ManagementId": 33333, + "NetworkAddress": "[1234.abcd:5678:345]", + "MacAddress": "22:xx:yy:11", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag1", + "DnsName": "M-YYYY.abcd.lab", + "ManagementProfile": [ + { + "ManagementProfileId": 33333, + "ProfileId": "MSM_BASE", + "ManagementId": 33333, + "ManagementURL": "https://[1234:abcd:567:xyzs]:443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + } + ] + } + ], + "Actions": None + }, + { + "@odata.type": "#DeviceService.Device", + "@odata.id": "/api/DeviceService/Devices(Constants.device_id1)", + "Id": Constants.device_id1, + "Type": 2000, + "Identifier": Constants.service_tag2, + "DeviceServiceTag": Constants.service_tag2, + "ChassisServiceTag": None, + "Model": "PowerEdge MX7000", + "PowerState": 17, + "ManagedState": 3000, + "Status": 4000, + "ConnectionState": True, + "AssetTag": None, + "SystemId": 2031, + "DeviceName": "MX-Constants.service_tag2", + "LastInventoryTime": "2020-07-11 17:00:18.925", + "LastStatusTime": "2020-07-11 09:00:07.444", + "DeviceSubscription": None, + "DeviceCapabilities": [ + 18, + 8, + 201, + 202 + ], + "SlotConfiguration": { + "ChassisName": None + }, + "DeviceManagement": [ + { + "ManagementId": 111111, + "NetworkAddress": "192.168.1.2", + "MacAddress": "xx:yy:zz:x1x1", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag2", + "DnsName": "M-XXXX.abcd.lab", + "ManagementProfile": [ + { + "ManagementProfileId": 111111, + "ProfileId": "MSM_BASE", + "ManagementId": 111111, + "ManagementURL": "https://" + ome_default_args["hostname"] + ":443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + } + ] + }, + { + "ManagementId": 22222, + "NetworkAddress": "[1234.abcd:5678:345]", + "MacAddress": "22:xx:yy:11", + "ManagementType": 2, + "InstrumentationName": "MX-Constants.service_tag2", + "DnsName": "M-XXXX.abcd.lab", + "ManagementProfile": [ + { + "ManagementProfileId": 22222, + "ProfileId": "MSM_BASE", + "ManagementId": 22222, + "ManagementURL": "https://[1234:abcd:567:xyzs]:443", + "HasCreds": 0, + "Status": 1000, + "StatusDateTime": "2020-07-11 17:00:18.925" + } + ] + } + ], + "Actions": None + } + ] + } + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + service_tag = self.module.get_service_tag_with_fqdn(ome_connection_mock_for_smart_fabric, f_module) + assert service_tag is None + + def test_get_service_tag_with_fqdn_success_case3(self, ome_default_args, ome_connection_mock_for_smart_fabric): + ome_default_args.update({"hostname": Constants.hostname1}) + resp_data = {"value": []} + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + ome_connection_mock_for_smart_fabric.get_all_items_with_pagination.return_value = resp_data + service_tag = self.module.get_service_tag_with_fqdn(ome_connection_mock_for_smart_fabric, f_module) + assert service_tag is None + + def test_fabric_validate_modify_case01(self, ome_default_args): + ome_default_args.update({"fabric_design": "2xMX5108n_Ethernet_Switches_in_same_chassis"}) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + current_payload = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + self.module.validate_modify(f_module, current_payload) + + def test_fabric_validate_modify_case02(self, ome_default_args): + ome_default_args.update({"name": "abc"}) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + current_payload = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + self.module.validate_modify(f_module, current_payload) + + def test_fabric_validate_modify_case03(self, ome_default_args): + ome_default_args.update({"fabric_design": "2xMX5108n_Ethernet_Switches_in_same_chassis"}) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + current_payload = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"}, + "FabricDesignMapping": [ + { + "DesignNode": "Switch-A", + "PhysicalNode": "2HB7NX2" + }, + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + } + self.module.validate_modify(f_module, current_payload) + + def test_fabric_validate_modify_case05(self, ome_default_args): + ome_default_args.update({"primary_switch_service_tag": "abc"}) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + current_payload = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + self.module.validate_modify(f_module, current_payload) + + def test_fabric_validate_modify_case07(self, ome_default_args): + ome_default_args.update({"name": "abc"}) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + current_payload = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesignMapping": [ + { + "DesignNode": "Switch-B", + "PhysicalNode": "2HBFNX2" + } + ], + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + self.module.validate_modify(f_module, current_payload) + + @pytest.mark.parametrize("param", [{"secondary_switch_service_tag": "abc"}, {"primary_switch_service_tag": "abc"}]) + def test_fabric_validate_modify_case08(self, param, ome_default_args): + ome_default_args.update(param) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + current_payload = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesignMapping": [ + ], + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + self.module.validate_modify(f_module, current_payload) + + @pytest.mark.parametrize("param", [{"secondary_switch_service_tag": "abc"}, {"primary_switch_service_tag": "abc"}]) + def test_fabric_validate_modify_case09(self, param, ome_default_args): + ome_default_args.update(param) + f_module = self.get_module_mock(params=ome_default_args, check_mode=True) + current_payload = { + "Id": "1312cceb-c3dd-4348-95c1-d8541a17d776", + "Name": "Fabric_", + "Description": "create new fabric1", + "FabricDesign": {"Name": "2xMX5108n_Ethernet_Switches_in_same_chassis"} + } + self.module.validate_modify(f_module, current_payload) diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py new file mode 100644 index 00000000..6670499e --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py @@ -0,0 +1,386 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_smart_fabric_uplink +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_smart_fabric_uplink.' + + +@pytest.fixture +def ome_connection_mock_for_smart_fabric_uplink(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeSmartFabricUplink(FakeAnsibleModule): + module = ome_smart_fabric_uplink + + @pytest.mark.parametrize("params", + [{"success": True, "json_data": {"value": [{"Name": "vlan_name", "Id": 123}]}, "id": 123}, + {"success": True, "json_data": {"value": []}, "id": 0}, + {"success": False, "json_data": {"value": [{"Name": "vlan_name", "Id": 123}]}, "id": 0}, + {"success": True, "json_data": {"value": [{"Name": "vlan_name1", "Id": 123}]}, "id": 0}]) + def test_get_item_id(self, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + id, vlans = self.module.get_item_id(ome_connection_mock_for_smart_fabric_uplink, "vlan_name", "uri") + assert id == params["id"] + + @pytest.mark.parametrize( + "params", [{"uplinks": [{"Ports": [1, 2]}, {"Ports": []}], "portlist": [1, 2]}, + {"uplinks": [{"Ports": [1, 2]}, {"Ports": [3, 4]}, {"Ports": [5, 4]}], + "portlist": [1, 2, 3, 4, 5, 4]}, + {"uplinks": [{"Ports": [1, 2]}, {"Ports": [3, 4]}], "portlist": [1, 2, 3, 4]}, ]) + def test_get_all_uplink_ports(self, params): + portlist = self.module.get_all_uplink_ports(params.get("uplinks")) + assert portlist == params.get("portlist") + + @pytest.mark.parametrize("params", [{"inp": {"tagged_networks": ["vlan_name"]}, "success": True, + "json_data": {"ApplicableUplinkNetworks": [{"Name": "vlan_name", "Id": 123}]}, + "payload": [123]}, ]) + def test_validate_networks(self, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params.get("inp", {})) + vlan_payload = self.module.validate_networks(f_module, ome_connection_mock_for_smart_fabric_uplink, 1, 2) + assert vlan_payload == params["payload"] + + @pytest.mark.parametrize("params", [{"inp": {"tagged_networks": ["vlan_name1"]}, "success": True, + "json_data": {"ApplicableUplinkNetworks": [{"Name": "vlan_name", "Id": 123}]}, + "payload": [123], + "error_msg": "Networks with names vlan_name1 are not applicable or valid."}, + {"inp": {"tagged_networks": ["vlan_name1", "vlan_name2"]}, "success": True, + "json_data": {"ApplicableUplinkNetworks": [{"Name": "vlan_name", "Id": 123}]}, + "payload": [123], + "error_msg": "Networks with names {0} are not applicable " + "or valid.".format( + ",".join(set(["vlan_name1", "vlan_name2"])))}, ]) + def test_validate_networks_failure(self, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params.get("inp", {})) + error_message = params["error_msg"] + with pytest.raises(Exception) as err: + self.module.validate_networks(f_module, ome_connection_mock_for_smart_fabric_uplink, 1, 2) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"inp": {"primary_switch_service_tag": "ABC123", "primary_switch_ports": ["ethernet1/1/7", "ethernet1/1/4"]}, + "success": True, "json_data": { + "InventoryInfo": [{"PortNumber": "ethernet1/1/6"}, {"PortNumber": "ethernet1/1/7"}, + {"PortNumber": "ethernet1/1/4"}]}, "get_item_id": (0, []), "payload": [123], + "uplinks": [{"Ports": [{"Id": "ethernet1/1/6"}]}, {"Ports": [{"Id": "ethernet1/1/4"}]}], + "error_msg": "Device with service tag ABC123 does not exist."}]) + def test_validate_ioms_failure(self, mocker, params, ome_connection_mock_for_smart_fabric_uplink, + ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + "get_item_id", return_value=(params.get("get_item_id"))) + f_module = self.get_module_mock(params=params.get("inp", {})) + error_message = params["error_msg"] + with pytest.raises(Exception) as err: + self.module.validate_ioms(f_module, ome_connection_mock_for_smart_fabric_uplink, params.get("uplinks")) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [ + {"inp": {"primary_switch_service_tag": "ABC123", "primary_switch_ports": ["ethernet1/1/7", "ethernet1/1/4"]}, + "success": True, "json_data": { + "InventoryInfo": [{"PortNumber": "ethernet1/1/6"}, {"PortNumber": "ethernet1/1/7"}, + {"PortNumber": "ethernet1/1/4"}]}, "get_item_id": (2, []), "payload": [123], + "uplinks": [{"Ports": [{"Id": "ethernet1/1/6"}]}, {"Ports": [{"Id": "ethernet1/1/4"}]}], + "ioms": ['ABC123:ethernet1/1/7', 'ABC123:ethernet1/1/4']}]) + def test_validate_ioms(self, mocker, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + "get_item_id", return_value=(params.get("get_item_id"))) + f_module = self.get_module_mock(params=params.get("inp", {})) + ioms = self.module.validate_ioms(f_module, ome_connection_mock_for_smart_fabric_uplink, params.get("uplinks")) + assert ioms == params.get("ioms") + + @pytest.mark.parametrize("params", [{"inp": {"untagged_network": "vlan_name1"}, "success": True, + "json_data": { + "ApplicableUplinkNetworks": [{"Name": "vlan_name", "VlanMaximum": 123}]}, + "vlan_id": 123, + "error_msg": "Native VLAN name vlan_name1 is not applicable or valid."}, ]) + def test_validate_native_vlan_failure(self, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params.get("inp", {})) + error_message = params["error_msg"] + with pytest.raises(Exception) as err: + self.module.validate_native_vlan(f_module, ome_connection_mock_for_smart_fabric_uplink, 1, 2) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("params", [{"inp": {"untagged_network": "vlan_name"}, "success": True, "json_data": { + "ApplicableUplinkNetworks": [{"Name": "vlan_name", "VlanMaximum": 123}]}, "vlan_id": 123}, ]) + def test_validate_native_vlan_failure(self, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params.get("inp", {})) + vlan_payload = self.module.validate_native_vlan(f_module, ome_connection_mock_for_smart_fabric_uplink, 1, 2) + assert vlan_payload == params["vlan_id"] + + def test_delete_uplink(self, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + ome_response_mock.success = True + ome_response_mock.json_data = {} + f_module = self.get_module_mock(params={"fabric_name": "f1", "name": "uplink1"}) + with pytest.raises(Exception, match="Successfully deleted the uplink.") as err: + self.module.delete_uplink(f_module, ome_connection_mock_for_smart_fabric_uplink, 12, 123) + + @pytest.mark.parametrize("params", [{"inp": {"fabric_name": "f1", "name": "uplink1"}, + "error_msg": "Mandatory parameter uplink_type not provided for uplink creation."}, + {"inp": {"fabric_name": "f1", "name": "uplink1", "uplink_type": "Ethernet"}, + "error_msg": "Mandatory parameter tagged_networks not provided for uplink creation."}, + {"inp": {"fabric_name": "f1", "name": "uplink1", "uplink_type": "FEthernet", + "tagged_networks": ["vlan1"]}, "get_item_id": (0, []), + "error_msg": "Uplink Type FEthernet does not exist."}, { + "inp": {"fabric_name": "f1", "name": "uplink1", "uplink_type": "Ethernet", + "tagged_networks": ["vlan1"]}, "get_item_id": (2, []), + "error_msg": "Provide port details."}, { + "inp": {"fabric_name": "f1", "name": "uplink1", "uplink_type": "Ethernet", + "tagged_networks": ["vlan1"], + "primary_switch_service_tag": "ABC123", + "secondary_switch_service_tag": "ABC123"}, "get_item_id": (2, []), + "error_msg": "Primary and Secondary service tags must not be the same."}, { + "inp": {"fabric_name": "f1", "name": "uplink1", "uplink_type": "Ethernet", + "tagged_networks": ["vlan1"], + "primary_switch_service_tag": "ABC123", + "secondary_switch_service_tag": "XYZ123"}, "get_item_id": (2, []), + "validate_ioms": ["ST1:123", "ST2:345"], "validate_networks": [1, 2], + "check_mode": True, "error_msg": "Changes found to be applied."}, { + "inp": {"fabric_name": "f1", "name": "uplink1", "uplink_type": "Ethernet", + "tagged_networks": ["vlan1"], + "primary_switch_service_tag": "ABC123", + "secondary_switch_service_tag": "XYZ123"}, "get_item_id": (2, []), + "validate_ioms": ["ST1:123", "ST2:345"], "validate_networks": [1, 2], + "error_msg": "Successfully created the uplink."}, { + "inp": {"fabric_name": "f1", "name": "uplink1", "uplink_type": "Ethernet", + "tagged_networks": ["vlan1"], + "primary_switch_service_tag": "ABC123", + "secondary_switch_service_tag": "XYZ123", "ufd_enable": "Enabled", + "description": "uplink description", "untagged_network": "vlan2"}, + "get_item_id": (2, []), "validate_ioms": ["ST1:123", "ST2:345"], + "validate_networks": [1, 2], "validate_native_vlan": 1, + "error_msg": "Successfully created the uplink."}, ]) + def test_create_uplink(self, mocker, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + f_module = self.get_module_mock(params=params.get("inp", {}), check_mode=params.get("check_mode", False)) + mocker.patch(MODULE_PATH + "get_item_id", return_value=(params.get("get_item_id"))) + mocker.patch(MODULE_PATH + "validate_ioms", return_value=(params.get("validate_ioms"))) + mocker.patch(MODULE_PATH + "validate_networks", return_value=(params.get("validate_networks"))) + mocker.patch(MODULE_PATH + "validate_native_vlan", return_value=(params.get("validate_native_vlan"))) + error_message = params["error_msg"] + with pytest.raises(Exception) as err: + self.module.create_uplink(f_module, ome_connection_mock_for_smart_fabric_uplink, params.get("fabric_id", 0), + []) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize( + "params", [{"inp": {"fabric_name": "f1", "name": "uplink1", "new_name": "uplink2", + "description": "modified from OMAM", "uplink_type": "Ethernet", + "ufd_enable": "Enabled", "untagged_network": "vlan2"}, + "uplink_id": {"Id": "9cf5a5ee-aecc-45d1-a113-5c4055ab3b4c", "Name": "create1", + "Description": "CREATED from OMAM", + "MediaType": "Ethernet", "NativeVLAN": 0, "UfdEnable": "NA", + "Ports": [{"Id": "2HBFNX2:ethernet1/1/14"}, {"Id": "2HB7NX2:ethernet1/1/13"}], + "Networks": [{"Id": 36011}]}, + "uplinks": [], + "get_item_id": (2, []), "validate_ioms": ["ST1:123", "ST2:345"], + "validate_networks": [1, 2], "validate_native_vlan": 1, + "error_msg": "Successfully modified the uplink."}, + {"inp": {"fabric_name": "f1", "name": "uplink1", "new_name": "uplink2", + "description": "modified from OMAM", "uplink_type": "Ethernet", + "ufd_enable": "Enabled", "untagged_network": "vlan2"}, + "uplink_id": {"Id": "9cf5a5ee-aecc-45d1-a113-5c4055ab3b4c", "Name": "create1", + "Description": "CREATED from OMAM", "MediaType": "Ethernet", "NativeVLAN": 0, + "UfdEnable": "NA", + "Ports": [{"Id": "2HBFNX2:ethernet1/1/14"}, {"Id": "2HB7NX2:ethernet1/1/13"}], + "Networks": [{"Id": 36011}]}, + "uplinks": [], "get_item_id": (2, []), + "validate_ioms": ["ST1:123", "ST2:345"], "validate_networks": [1, 2], "validate_native_vlan": 1, + "check_mode": True, "error_msg": "Changes found to be applied."}, + {"inp": {"fabric_name": "f1", "name": "uplink1", "new_name": "uplink2", + "uplink_type": "FEthernet"}, + "uplink_id": {"Id": "9cf5a5ee-aecc-45d1-a113-5c4055ab3b4c", "Name": "create1", + "Description": "CREATED from OMAM", + "MediaType": "Ethernet", "NativeVLAN": 0, "UfdEnable": "NA", + "Ports": [{"Id": "2HBFNX2:ethernet1/1/14"}, {"Id": "2HB7NX2:ethernet1/1/13"}], + "Networks": [{"Id": 36011}]}, + "uplinks": [], "get_item_id": (2, []), + "validate_ioms": ["ST1:123", "ST2:345"], "validate_networks": [1, 2], + "validate_native_vlan": 1, "error_msg": "Uplink Type cannot be modified."}, ]) + def test_modify_uplink(self, mocker, params, ome_connection_mock_for_smart_fabric_uplink, ome_response_mock): + f_module = self.get_module_mock(params=params.get("inp", {}), check_mode=params.get("check_mode", False)) + mocker.patch(MODULE_PATH + "get_item_id", return_value=(params.get("get_item_id"))) + mocker.patch(MODULE_PATH + "validate_ioms", return_value=(params.get("validate_ioms"))) + mocker.patch(MODULE_PATH + "validate_networks", return_value=(params.get("validate_networks"))) + mocker.patch(MODULE_PATH + "validate_native_vlan", return_value=(params.get("validate_native_vlan"))) + error_message = params["error_msg"] + with pytest.raises(Exception) as err: + self.module.modify_uplink(f_module, ome_connection_mock_for_smart_fabric_uplink, params.get("fabric_id", 0), + params.get("uplink_id", {}), params.get("uplinks", [])) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize( + "params", [{"inp": {"name": "uplink1", "fabric_name": "fabric1"}, + "error_msg": "state is present but any of the following are missing: new_name, description," + " uplink_type, ufd_enable, primary_switch_service_tag, primary_switch_ports, " + "secondary_switch_service_tag, secondary_switch_ports, tagged_networks, untagged_network"}, + {"inp": {"name": "uplink1"}, + "error_msg": "missing required arguments: fabric_name"}, + {"inp": {"fabric_name": "fabric1"}, + "error_msg": "missing required arguments: name"}, ]) + def test_main_case_failures(self, mocker, params, ome_default_args, ome_connection_mock_for_smart_fabric_uplink, + ome_response_mock): + ome_default_args.update(params.get("inp")) + ome_response_mock.json_data = params.get("json_data") + mocker.patch(MODULE_PATH + "get_item_id", return_value=(params.get("get_item_id"))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == params.get("error_msg") + + @pytest.mark.parametrize( + "params", [{"inp": {"state": "absent", "name": "uplink1", "fabric_name": "fabric1", "ufd_enable": "Enabled"}, + "get_item_id": (0, []), "error_msg": "Fabric with name fabric1 does not exist."}, + {"inp": {"state": "absent", "name": "uplink1", "fabric_name": "fabric1", "ufd_enable": "Enabled"}, + "get_item_id": (1, []), + "get_item_and_list": ({'Id': 1}, []), "error_msg": "Successfully deleted the uplink."}, + {"inp": {"state": "absent", "name": "uplink1", "fabric_name": "fabric1", "ufd_enable": "Enabled"}, + "get_item_id": (1, []), + "get_item_and_list": ({'Id': 1}, []), "check_mode": True, + "error_msg": "Changes found to be applied."}, ]) + def _test_main_case_failures2(self, mocker, params, ome_default_args, ome_connection_mock_for_smart_fabric_uplink, + ome_response_mock): + ome_default_args.update(params.get("inp")) + ome_response_mock.json_data = params.get("json_data") + mocker.patch(MODULE_PATH + "get_item_id", return_value=(params.get("get_item_id", (0, [])))) + mocker.patch(MODULE_PATH + "get_item_and_list", return_value=(params.get("get_item_and_list"))) + result = self.execute_module(ome_default_args, check_mode=params.get("check_mode", False)) + assert result['msg'] == params.get("error_msg") + + @pytest.mark.parametrize("params", [ + {"fail_json": True, "json_data": {"JobId": 1234}, + "get_item_id": (0, []), + "mparams": {"state": "absent", "name": "uplink1", "fabric_name": "fabric1", "ufd_enable": "Enabled"}, + 'message': "Fabric with name fabric1 does not exist.", "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "get_item_id": (1, []), "get_item_and_list": ({}, []), "check_mode": True, + "mparams": {"state": "absent", "name": "uplink1", "fabric_name": "fabric1", "ufd_enable": "Enabled"}, + 'message': "No changes found to be applied to the uplink configuration.", "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "get_item_id": (1, []), "get_item_and_list": ({}, []), "check_mode": False, + "mparams": {"state": "absent", "name": "uplink1", "fabric_name": "fabric1", "ufd_enable": "Enabled"}, + 'message': "Uplink uplink1 does not exist.", "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "get_item_id": (1, []), "get_item_and_list": ({"Name": 'u1', 'Id': 12}, []), "check_mode": True, + "mparams": {"state": "absent", "name": "uplink1", "fabric_name": "fabric1", "ufd_enable": "Enabled"}, + 'message': "Changes found to be applied.", "success": True + }, + {"fail_json": True, "json_data": {"JobId": 1234}, + "get_item_id": (1, []), "get_item_and_list": + ({"Id": "12", "Name": "u1", "Description": "Ethernet_Uplink", "NativeVLAN": 1, "UfdEnable": "NA", + "Ports": [{"Id": "2HB7NX2:ethernet1/1/13", "Name": ""}, + {"Id": "2HB7NX2:ethernet1/1/12", "Name": ""}], + "Networks": [{"Id": 31554, "Name": "VLAN2", }]}, + [{"Id": "12", "Name": "u1", "Description": "Ethernet_Uplink", "NativeVLAN": 1, "UfdEnable": "NA", + "Ports": [{"Id": "2HB7NX2:ethernet1/1/13", "Name": "", }, + {"Id": "2HB7NX2:ethernet1/1/12", "Name": "", }], + "Networks": [{"Id": 31554, "Name": "VLAN2", }]}, + {"Name": 'u2', 'Id': 13}]), + "mparams": {"state": "present", "name": "u1", "fabric_name": "fabric1", + "primary_switch_service_tag": "SVTAG1", "primary_switch_ports": [1, 2], + "secondary_switch_service_tag": 'SVTAG1', "secondary_switch_ports": [1, 2]}, + 'message': "Primary and Secondary service tags must not be the same.", "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "get_item_id": (1, []), "get_item_and_list": + ({}, [{"Id": "12", "Name": "u1", "Description": "Ethernet_Uplink", "NativeVLAN": 1, + "UfdEnable": "NA", "Ports": [{"Id": "2HB7NX2:ethernet1/1/13", "Name": "", }, + {"Id": "2HB7NX2:ethernet1/1/12", "Name": "", }], + "Networks": [{"Id": 31554, "Name": "VLAN2", }]}, {"Name": 'u2', 'Id': 13}]), + "validate_networks": ['a', 'b'], "validate_ioms": ['a', 'b'], + "mparams": {"state": "present", "name": "u1", "fabric_name": "fabric1", "uplink_type": 'Ethernet', + "tagged_networks": ['a', 'b'], + "primary_switch_service_tag": "SVTAG1", "primary_switch_ports": [1, 2], + "secondary_switch_service_tag": 'SVTAG2', "secondary_switch_ports": [1, 2]}, + 'message': "Successfully created the uplink.", "success": True + }, + {"fail_json": False, "json_data": {"JobId": 1234}, + "get_item_id": (1, []), "get_item_and_list": + ({"Id": "12", "Name": "u1", "Description": "Ethernet_Uplink", "NativeVLAN": 1, "UfdEnable": "NA", + "Ports": [{"Id": "2HB7NX2:ethernet1/1/13", "Name": "", }, + {"Id": "2HB7NX2:ethernet1/1/12", "Name": "", }], + "Networks": [{"Id": 31554, "Name": "VLAN2", }]}, + [{"Id": "12", "Name": "u1", "Description": "Ethernet_Uplink", "NativeVLAN": 1, + "UfdEnable": "NA", "Ports": [{"Id": "2HB7NX2:ethernet1/1/13", "Name": "", }, + {"Id": "2HB7NX2:ethernet1/1/12", "Name": "", }], + "Networks": [{"Id": 31554, "Name": "VLAN2", }]}, {"Name": 'u2', 'Id': 13}]), + "validate_networks": ['a', 'b'], "validate_ioms": ['a', 'b'], + "mparams": {"state": "present", "name": "u1", "fabric_name": "fabric1", + "tagged_networks": ['a', 'b'], + "primary_switch_service_tag": "SVTAG1", "primary_switch_ports": [1, 2], + "secondary_switch_service_tag": 'SVTAG2', "secondary_switch_ports": [1, 2]}, + 'message': "Successfully modified the uplink.", "success": True + }, + ]) + def test_main(self, params, ome_connection_mock_for_smart_fabric_uplink, ome_default_args, ome_response_mock, + mocker): + mocker.patch(MODULE_PATH + 'get_item_id', return_value=params.get("get_item_id")) + mocker.patch(MODULE_PATH + 'get_item_and_list', return_value=params.get("get_item_and_list")) + mocker.patch(MODULE_PATH + 'validate_networks', return_value=params.get("validate_networks")) + mocker.patch(MODULE_PATH + 'validate_ioms', return_value=params.get("validate_ioms")) + ome_response_mock.success = True + ome_response_mock.json_data = params.get("json_data") + ome_default_args.update(params.get('mparams')) + if params.get("fail_json", False): + result = self._run_module_with_fail_json(ome_default_args) + else: + result = self._run_module(ome_default_args, check_mode=params.get("check_mode", False)) + assert result["msg"] == params['message'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_smart_fabric_uplink_main_exception_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_smart_fabric_uplink, + ome_response_mock): + ome_default_args.update({"name": "uplink1", "state": "present", "fabric_name": "f1", "new_name": "uplink2"}) + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'get_item_id', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'get_item_id', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'get_item_id', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'uplink_id' not in result + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py new file mode 100644 index 00000000..27c84ffa --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.2.0 +# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_template +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_template.' + + +@pytest.fixture +def ome_connection_mock_for_template(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + ome_connection_mock_obj.get_all_report_details.return_value = {"report_list": []} + return ome_connection_mock_obj + + +TEMPLATE_RESOURCE = {"TEMPLATE_RESOURCE": "TemplateService/Templates"} + + +class TestOmeTemplate(FakeAnsibleModule): + module = ome_template + + @pytest.fixture + def get_template_resource_mock(self, mocker): + response_class_mock = mocker.patch( + MODULE_PATH + '_get_resource_parameters') + return response_class_mock + + def test_get_service_tags_success_case(self, ome_connection_mock_for_template, ome_response_mock): + ome_connection_mock_for_template.get_all_report_details.return_value = { + "report_list": [{"Id": Constants.device_id1, + "DeviceServiceTag": Constants.service_tag1}]} + f_module = self.get_module_mock({'device_id': [], 'device_service_tag': [Constants.service_tag1]}) + data = self.module.get_device_ids(f_module, ome_connection_mock_for_template) + assert data == [Constants.device_id1] + + def test_get_device_ids_failure_case01(self, ome_connection_mock_for_template, ome_response_mock, ome_default_args): + ome_response_mock.json_data = {'value': []} + ome_response_mock.success = False + f_module = self.get_module_mock(params={'device_id': ["#@!1"]}) + with pytest.raises(Exception) as exc: + self.module.get_device_ids(f_module, ome_connection_mock_for_template) + assert exc.value.args[0] == "Unable to complete the operation because the entered target device id(s) " \ + "'{0}' are invalid.".format("#@!1") + + @pytest.mark.parametrize("params", + [{"mparams": { + "attributes": { + "Attributes": [ + { + "Id": 93812, + "IsIgnored": False, + "Value": "Aisle Five" + }, + { + "DisplayName": 'System, Server Topology, ServerTopology 1 Aisle Name', + "IsIgnored": False, + "Value": "Aisle 5" + } + ] + }}, "success": True, + "json_data": { + "Id": 11, + "Name": "ProfileViewEditAttributes", + "AttributeGroupNames": [], + "AttributeGroups": [ + { + "GroupNameId": 5, + "DisplayName": "System", + "SubAttributeGroups": [ + { + "GroupNameId": 33016, + "DisplayName": "Server Operating System", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93820, + "DisplayName": "ServerOS 1 Server Host Name", + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + } + ] + }, + { + "GroupNameId": 33019, + "DisplayName": "Server Topology", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93812, + "DisplayName": "ServerTopology 1 Aisle Name", + "Value": "Aisle 5", + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 93811, + "DisplayName": "ServerTopology 1 Data Center Name", + "Description": None, + "Value": "BLG 2nd Floor DS 1", + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 93813, + "DisplayName": "ServerTopology 1 Rack Name", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 93814, + "DisplayName": "ServerTopology 1 Rack Slot", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + } + ] + } + ], + "Attributes": [] + }, + { + "GroupNameId": 9, + "DisplayName": "iDRAC", + "SubAttributeGroups": [ + { + "GroupNameId": 32688, + "DisplayName": "Active Directory", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93523, + "DisplayName": "ActiveDirectory 1 Active Directory RAC Name", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + } + ] + }, + { + "GroupNameId": 32930, + "DisplayName": "NIC Information", + "SubAttributeGroups": [], + "Attributes": [ + { + "AttributeId": 93035, + "DisplayName": "NIC 1 DNS RAC Name", + "Description": None, + "Value": None, + "IsReadOnly": False, + "IsIgnored": True, + }, + { + "AttributeId": 92510, + "DisplayName": "NIC 1 Enable VLAN", + "Description": None, + "Value": "Disabled", + "IsReadOnly": False, + "IsIgnored": False, + } + ] + } + ], + "Attributes": []}]}, + "diff": 2}]) + def test_attributes_check(self, params, ome_connection_mock_for_template, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.attributes_check(f_module, ome_connection_mock_for_template, + params['mparams']['attributes'], 123) + assert result == params["diff"] + + def test_get_device_ids_failure_case_02(self, ome_connection_mock_for_template, ome_response_mock, + ome_default_args): + ome_connection_mock_for_template.get_all_report_details.return_value = { + "report_list": [{"Id": Constants.device_id1, + "DeviceServiceTag": Constants.service_tag1}, + {"Id": Constants.device_id2, + "DeviceServiceTag": "tag2"} + ]} + f_module = self.get_module_mock(params={'device_id': [Constants.device_id2], 'device_service_tag': ["abcd"]}) + with pytest.raises(Exception) as exc: + self.module.get_device_ids(f_module, ome_connection_mock_for_template) + assert exc.value.args[0] == "Unable to complete the operation because the entered target service tag(s) " \ + "'{0}' are invalid.".format('abcd') + + def test_get_device_ids_for_no_device_failue_case_03(self, ome_connection_mock_for_template, ome_response_mock, + ome_default_args): + ome_connection_mock_for_template.get_all_report_details.return_value = { + "report_list": [{"Id": Constants.device_id1, + "DeviceServiceTag": Constants.service_tag1} + ], "resp_obj": ome_response_mock} + f_module = self.get_module_mock(params={'device_service_tag': [Constants.service_tag1], 'device_id': []}) + with pytest.raises(Exception) as exc: + device_ids = self.module.get_device_ids(f_module, ome_connection_mock_for_template) + assert exc.value.args[0] == "Failed to fetch the device ids." + + def test_get_view_id_success_case(self, ome_connection_mock_for_template, ome_response_mock): + ome_response_mock.json_data = {'value': [{"Description": "", 'Id': 2}]} + ome_response_mock.status_code = 200 + ome_response_mock.success = True + result = self.module.get_view_id(ome_response_mock, "Deployment") + assert result == 2 + + create_payload = {"Fqdds": "All", # Mandatory for create + "ViewTypeId": 4, "attributes": {"Name": "create template name"}, "SourceDeviceId": 2224} + + @pytest.mark.parametrize("param", [{"Fqdds": "All", # Mandatory for create + "ViewTypeId": 4, "attributes": {"Name": "create template name"}, + "SourceDeviceId": 2224}]) + def test_get_create_payload(self, param, ome_response_mock, ome_connection_mock_for_template): + f_module = self.get_module_mock(params=param) + data = self.module.get_create_payload(f_module, ome_connection_mock_for_template, 2224, 4) + assert data['Fqdds'] == "All" + + def test_get_template_by_id_success_case(self, ome_response_mock): + ome_response_mock.json_data = {'value': []} + ome_response_mock.status_code = 200 + ome_response_mock.success = True + f_module = self.get_module_mock() + data = self.module.get_template_by_id(f_module, ome_response_mock, 17) + assert data + + def test_get_template_by_name_success_case(self, ome_response_mock, ome_connection_mock_for_template): + ome_response_mock.json_data = {'value': [{"Name": "test Sample Template import1", "Id": 24}]} + ome_response_mock.status_code = 200 + ome_response_mock.success = True + f_module = self.get_module_mock() + data = self.module.get_template_by_name("test Sample Template import1", f_module, + ome_connection_mock_for_template) + assert data["Name"] == "test Sample Template import1" + assert data["Id"] == 24 + + def test_get_group_devices_all(self, ome_response_mock, ome_connection_mock_for_template): + ome_response_mock.json_data = {'value': [{"Name": "Device1", "Id": 24}]} + ome_response_mock.status_code = 200 + ome_response_mock.success = True + f_module = self.get_module_mock() + data = self.module.get_group_devices_all(ome_connection_mock_for_template, "uri") + assert data == [{"Name": "Device1", "Id": 24}] + + def _test_get_template_by_name_fail_case(self, ome_response_mock): + ome_response_mock.json_data = {'value': [{"Name": "template by name for template name", "Id": 12}]} + ome_response_mock.status_code = 500 + ome_response_mock.success = False + f_module = self.get_module_mock() + with pytest.raises(Exception) as exc: + self.module.get_template_by_name("template by name for template name", f_module, ome_response_mock) + assert exc.value.args[0] == "Unable to complete the operation because the" \ + " requested template with name {0} is not present." \ + .format("template by name for template name") + + create_payload = {"command": "create", "device_id": [25007], + "ViewTypeId": 4, "attributes": {"Name": "texplate999", "Fqdds": "All"}, "template_view_type": 4} + inter_payload = { + "Name": "texplate999", + "SourceDeviceId": 25007, + "Fqdds": "All", + "TypeId": 2, + "ViewTypeId": 2 + } + payload_out = ('TemplateService/Templates', + { + "Name": "texplate999", + "SourceDeviceId": 25007, + "Fqdds": "All", + "TypeId": 2, + "ViewTypeId": 2 + }, "POST") + + @pytest.mark.parametrize("params", [{"inp": create_payload, "mid": inter_payload, "out": payload_out}]) + def test__get_resource_parameters_create_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template, params): + f_module = self.get_module_mock(params=params["inp"]) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=[25007]) + mocker.patch(MODULE_PATH + 'get_view_id', + return_value=["Deployment"]) + mocker.patch(MODULE_PATH + 'get_create_payload', + return_value=params["mid"]) + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert data == params["out"] + + modify_payload = {"command": "modify", "device_id": [25007], "template_id": 1234, + "ViewTypeId": 4, "attributes": {"Name": "texplate999", "Fqdds": "All"}, "template_view_type": 4} + inter_payload = { + "Name": "texplate999", + "SourceDeviceId": 25007, + "Fqdds": "All", + "TypeId": 2, + "ViewTypeId": 2 + } + payload_out = ('TemplateService/Templates(1234)', + { + "Name": "texplate999", + "SourceDeviceId": 25007, + "Fqdds": "All", + "TypeId": 2, + "ViewTypeId": 2 + }, "PUT") + + @pytest.mark.parametrize("params", [{"inp": modify_payload, "mid": inter_payload, "out": payload_out}]) + def test__get_resource_parameters_modify_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template, params): + f_module = self.get_module_mock(params=params["inp"]) + mocker.patch(MODULE_PATH + 'get_template_by_id', + return_value={}) + mocker.patch(MODULE_PATH + 'get_modify_payload', + return_value={}) + mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"}) + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert data == ('TemplateService/Templates(1234)', {}, 'PUT') + + def test__get_resource_parameters_delete_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template): + f_module = self.get_module_mock({"command": "delete", "template_id": 1234}) + mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"}) + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert data == ('TemplateService/Templates(1234)', {}, 'DELETE') + + def test__get_resource_parameters_export_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template): + f_module = self.get_module_mock({"command": "export", "template_id": 1234}) + mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"}) + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert data == ('TemplateService/Actions/TemplateService.Export', {'TemplateId': 1234}, 'POST') + + def test__get_resource_parameters_deploy_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template): + f_module = self.get_module_mock({"command": "deploy", "template_id": 1234}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=[Constants.device_id1]) + mocker.patch(MODULE_PATH + 'get_deploy_payload', + return_value={"deploy_payload": "value"}) + mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"}) + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert data == ('TemplateService/Actions/TemplateService.Deploy', {"deploy_payload": "value"}, 'POST') + + def test__get_resource_parameters_clone_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template): + f_module = self.get_module_mock({"command": "clone", "template_id": 1234, "template_view_type": 2}) + mocker.patch(MODULE_PATH + 'get_view_id', + return_value=2) + mocker.patch(MODULE_PATH + 'get_clone_payload', + return_value={"clone_payload": "value"}) + mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"}) + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert data == ('TemplateService/Actions/TemplateService.Clone', {"clone_payload": "value"}, 'POST') + + def test__get_resource_parameters_import_success_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template): + f_module = self.get_module_mock({"command": "import", "template_id": 1234, "template_view_type": 2}) + mocker.patch(MODULE_PATH + 'get_view_id', + return_value=2) + mocker.patch(MODULE_PATH + 'get_import_payload', + return_value={"import_payload": "value"}) + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert data == ('TemplateService/Actions/TemplateService.Import', {"import_payload": "value"}, 'POST') + + @pytest.mark.parametrize("params", [{"inp": {"command": "modify"}, "mid": inter_payload, "out": payload_out}]) + def test__get_resource_parameters_modify_template_none_failure_case(self, mocker, ome_response_mock, + ome_connection_mock_for_template, params): + f_module = self.get_module_mock(params=params["inp"]) + with pytest.raises(Exception) as exc: + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert exc.value.args[0] == "Enter a valid template_name or template_id" + + @pytest.mark.parametrize("params", + [{"success": True, "json_data": {"value": [{"Name": "template_name", "Id": 123}]}, + "id": 123, "gtype": True}, + {"success": True, "json_data": {}, "id": 0, "gtype": False}, + {"success": False, "json_data": {"value": [{"Name": "template_name", "Id": 123}]}, + "id": 0, "gtype": False}, + {"success": True, "json_data": {"value": [{"Name": "template_name1", "Id": 123}]}, + "id": 12, "gtype": False}]) + def test_get_type_id_valid(self, params, ome_connection_mock_for_template, + ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + id = self.module.get_type_id_valid(ome_connection_mock_for_template, params["id"]) + assert id == params["gtype"] + + @pytest.mark.parametrize("params", + [{"success": True, "json_data": {"value": [{"Description": "Deployment", "Id": 2}]}, + "view": "Deployment", "gtype": 2}, + {"success": True, "json_data": {}, "view": "Compliance", "gtype": 1}, + {"success": False, "json_data": {"value": [{"Description": "template_name", "Id": 1}]}, + "view": "Deployment", "gtype": 2}, + {"success": True, "json_data": {"value": [{"Description": "template_name1", "Id": 2}]}, + "view": "Deployment", "gtype": 2}]) + def test_get_view_id(self, params, ome_connection_mock_for_template, + ome_response_mock): + ome_response_mock.success = params["success"] + ome_response_mock.json_data = params["json_data"] + id = self.module.get_view_id(ome_connection_mock_for_template, params["view"]) + assert id == params["gtype"] + + @pytest.mark.parametrize("param", + [{"pin": {"NetworkBootIsoModel": {"ShareDetail": {"Password": "share_password"}}}}, + {"pin": {"NetworkBootIsoModel": {"ShareDetail": {"Password1": "share_password"}}}}, + {"pin": {"NetworkBootIsoModel": {"ShareDetail": [{"Password1": "share_password"}]}}}]) + def test_password_no_log(self, param): + attributes = param["pin"] + self.module.password_no_log(attributes) + + def test__get_resource_parameters_create_failure_case_02(self, mocker, ome_response_mock, + ome_connection_mock_for_template): + f_module = self.get_module_mock({"command": "create", "template_name": "name"}) + mocker.patch(MODULE_PATH + 'get_device_ids', + return_value=[Constants.device_id1, Constants.device_id2]) + mocker.patch(MODULE_PATH + 'get_template_by_name', + return_value=("template", 1234)) + with pytest.raises(Exception) as exc: + data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template) + assert exc.value.args[0] == "Create template requires only one reference device" + + def test_main_template_success_case2(self, ome_default_args, mocker, module_mock, ome_connection_mock_for_template, + get_template_resource_mock, ome_response_mock): + ome_connection_mock_for_template.__enter__.return_value = ome_connection_mock_for_template + ome_connection_mock_for_template.invoke_request.return_value = ome_response_mock + ome_response_mock.json_data = { + "value": [{"device_id": "1111", "command": "create", "attributes": {"Name": "new 1template name"}}]} + ome_response_mock.status_code = 200 + ome_default_args.update( + {"device_id": "1111", "command": "create", "attributes": {"Name": "new 1template name"}}) + ome_response_mock.success = True + mocker.patch(MODULE_PATH + '_get_resource_parameters', + return_value=(TEMPLATE_RESOURCE, "template_payload", "POST")) + result = self._run_module(ome_default_args) + assert result['changed'] is True + assert result['msg'] == "Successfully created a template with ID {0}".format(ome_response_mock.json_data) + + def test_get_import_payload_success_case_01(self, ome_connection_mock_for_template): + f_module = self.get_module_mock(params={"attributes": {"Name": "template1", "Content": "Content"}}) + self.module.get_import_payload(f_module, ome_connection_mock_for_template, 2) + + def test_get_deploy_payload_success_case_01(self): + module_params = {"attributes": {"Name": "template1"}} + self.module.get_deploy_payload(module_params, [Constants.device_id1], 1234) + + @pytest.mark.parametrize("param", + [{"mparams": {"attributes": {"Name": "template1"}}, "name": "template0", + "template_id": 123, + "clone_payload": {"SourceTemplateId": 123, "NewTemplateName": "template1", + "ViewTypeId": 2}}]) + def test_get_clone_payload_success_case_01(self, param, ome_connection_mock_for_template): + f_module = self.get_module_mock(param["mparams"]) + module_params = param["mparams"] + payload = self.module.get_clone_payload(f_module, ome_connection_mock_for_template, param['template_id'], 2) + assert payload == param['clone_payload'] + + @pytest.mark.parametrize("param", + [{"inp": {"command": "create", "template_name": "name", "device_id": [None], + "device_service_tag": [None]}, + "msg": "Argument device_id or device_service_tag has null values"}, + {"inp": {"command": "deploy", "template_name": "name", "device_id": [None], + "device_service_tag": [None]}, + "msg": "Argument device_id or device_service_tag has null values"}, + {"inp": {"command": "import", "template_name": "name", "device_id": [], + "device_service_tag": []}, + "msg": "Argument 'Name' required in attributes for import operation"}, + {"inp": {"command": "import", "attributes": {"Name": "name"}, "device_id": [], + "device_service_tag": []}, + "msg": "Argument 'Content' required in attributes for import operation"}, + {"inp": {"command": "clone", "template_name": "name", "device_id": [], + "device_service_tag": []}, + "msg": "Argument 'Name' required in attributes for clone operation"} + ]) + def test_validate_inputs(self, param, mocker): + f_module = self.get_module_mock(param["inp"]) + mocker.patch(MODULE_PATH + 'password_no_log') + with pytest.raises(Exception) as exc: + self.module._validate_inputs(f_module) + assert exc.value.args[0] == param["msg"] + + @pytest.mark.parametrize("param", [ + {"inp": {"command": "deploy", "template_name": "name", + "device_group_names": ["mygroup"]}, + "group": {'Id': 23, "Name": "mygroup"}, + "dev_list": [1, 2, 3]}]) + def test_get_group_details(self, param, ome_connection_mock_for_template, mocker, + ome_response_mock): + f_module = self.get_module_mock(param["inp"]) + ome_response_mock.json_data = { + "value": [{'Id': 1, "Name": "mygroup3"}, {'Id': 2, "Name": "mygroup2"}, {'Id': 3, "Name": "mygroup"}]} + ome_response_mock.status_code = 200 + mocker.patch(MODULE_PATH + 'get_group_devices_all', return_value=[{'Id': 1}, {'Id': 2}, {'Id': 3}]) + dev_list = self.module.get_group_details(ome_connection_mock_for_template, f_module) + assert dev_list == param["dev_list"] + + @pytest.mark.parametrize("param", [ + {"inp": {"command": "deploy", "template_name": "name", + "device_group_names": ["mygroup"]}, + "group": {'Id': 23, "Name": "mygroup"}, + "dev_list": [1, 2, 3]}]) + def test_modify_payload(self, param, ome_connection_mock_for_template, mocker, + ome_response_mock): + f_module = self.get_module_mock(param["inp"]) + ome_response_mock.json_data = { + "value": [{'Id': 1, "Name": "mygroup3"}, {'Id': 2, "Name": "mygroup2"}, {'Id': 3, "Name": "mygroup"}]} + ome_response_mock.status_code = 200 + mocker.patch(MODULE_PATH + 'get_group_devices_all', return_value=[{'Id': 1}, {'Id': 2}, {'Id': 3}]) + dev_list = self.module.get_group_details(ome_connection_mock_for_template, f_module) + assert dev_list == param["dev_list"] + + @pytest.mark.parametrize("params", [ + {"mparams": {"command": "modify", "name": "profile", "attributes": { + "Attributes": [ + { + "Id": 93812, + "IsIgnored": False, + "Value": "Aisle Five" + }, + { + "DisplayName": 'System, Server Topology, ServerTopology 1 Aisle Name', + "IsIgnored": False, + "Value": "Aisle 5" + }]}}, + "success": True, "template": {"Name": "template_name", "Id": 123, "Description": "temp described"}, + "json_data": 0, "get_template_by_name": {"Name": "template1", "Id": 122, "Description": "temp described"}, + "res": "No changes found to be applied."}, + {"mparams": {"command": "modify", "name": "profile", "attributes": { + "Name": "new_name", + "Attributes": [ + { + "Id": 93812, + "IsIgnored": False, + "Value": "Aisle Five" + }, + { + "DisplayName": 'System, Server Topology, ServerTopology 1 Aisle Name', + "IsIgnored": False, + "Value": "Aisle 5" + }]}}, "success": True, + "template": {"Name": "template_name", "Id": 123, "Description": "temp described"}, "json_data": 0, + "get_template_by_name": {"Name": "template1", "Id": 122, "Description": "temp described"}, + "res": "Template with name 'new_name' already exists."} + ]) + def test_modify_payload(self, params, ome_connection_mock_for_template, mocker, + ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + mocker.patch(MODULE_PATH + 'get_template_by_name', return_value=params.get('get_template_by_name')) + mocker.patch(MODULE_PATH + 'attributes_check', return_value=params.get('attributes_check', 0)) + f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False)) + error_message = params["res"] + with pytest.raises(Exception) as err: + self.module.get_modify_payload(f_module, ome_connection_mock_for_template, params.get('template')) + assert err.value.args[0] == error_message + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, TypeError, ConnectionError, + HTTPError, URLError, SSLError]) + def test_main_template_exception_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_template, ome_response_mock): + ome_default_args.update({"command": "export", "template_name": "t1", 'attributes': {'Attributes': "myattr1"}}) + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'password_no_log') + mocker.patch(MODULE_PATH + '_get_resource_parameters', side_effect=exc_type("url open error")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + '_get_resource_parameters', side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + '_get_resource_parameters', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py new file mode 100644 index 00000000..0e6cbca4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.1.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import ome_template_identity_pool +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ssl import SSLError +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_template_identity_pool.' +template1 = \ + { + "@odata.context": "/api/$metadata#TemplateService.Template", + "@odata.type": "#TemplateService.Template", + "@odata.id": "/api/TemplateService/Templates(9)", + "Id": 9, + "Name": "template", + "Description": None, + "Content": None, + "SourceDeviceId": 10116, + "TypeId": 2, + "ViewTypeId": 2, + "TaskId": 10125, + "HasIdentityAttributes": True, + "Status": 2060, + "IdentityPoolId": 1, + "IsPersistencePolicyValid": True, + "IsStatelessAvailable": True, + "IsBuiltIn": False, + "CreatedBy": "admin", + "CreationTime": "2022-02-02 09:33:25.887057", + "LastUpdatedBy": "admin", + "LastUpdatedTime": "2022-02-02 13:53:37.443315", + "Views@odata.navigationLink": "/api/TemplateService/Templates(9)/Views", + "AttributeDetails": { + "@odata.id": "/api/TemplateService/Templates(9)/AttributeDetails" + } + } + + +@pytest.fixture +def ome_connection_mock_template_identity_pool(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOMETemplateIdentityPool(FakeAnsibleModule): + module = ome_template_identity_pool + + @pytest.mark.parametrize("exc_type", [HTTPError, URLError, ValueError, TypeError, ConnectionError, SSLError]) + def test_main_template_identity_failure(self, exc_type, mocker, ome_default_args, + ome_connection_mock_template_identity_pool): + ome_default_args.update({"template_name": "template"}) + ome_connection_mock_template_identity_pool.json_data = {"template_name": "ansible_template"} + json_str = to_text(json.dumps({"data": "out"})) + if exc_type == URLError: + mocker.patch( + MODULE_PATH + 'get_template_id', + side_effect=exc_type('url error')) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'get_template_id', + side_effect=exc_type('error')) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch( + MODULE_PATH + 'get_identity_id', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str)) + ) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'msg' in result + + def test_main_success(self, mocker, ome_default_args, ome_connection_mock_template_identity_pool, + ome_response_mock): + mocker.patch(MODULE_PATH + "get_template_id", return_value=template1) + mocker.patch(MODULE_PATH + "get_identity_id", return_value=10) + ome_default_args.update({"template_name": "template", "identity_pool_name": "pool_name"}) + ome_response_mock.json_data = {"msg": "Successfully assigned identity pool to template.", "changed": True} + ome_response_mock.success = True + ome_response_mock.status_code = 200 + result = self.execute_module(ome_default_args) + assert "msg" in result + assert result["msg"] == "Successfully attached identity pool to " \ + "template." + + def test_get_template_vlan_info(self, ome_connection_mock_template_identity_pool, ome_response_mock): + f_module = self.get_module_mock(params={"nic_identifier": "NIC Slot 4"}) + temp_net_details = { + "AttributeGroups": [ + { + "GroupNameId": 1001, + "DisplayName": "NICModel", + "SubAttributeGroups": [{ + "GroupNameId": 1, + "DisplayName": "NIC Slot 4", + "SubAttributeGroups": [], + "Attributes": [] + }], + "Attributes": [] + }, + { + "GroupNameId": 1005, + "DisplayName": "NicBondingTechnology", + "SubAttributeGroups": [], + "Attributes": [{"AttributeId": 0, + "DisplayName": "Nic Bonding Technology", + "Description": None, "Value": "LACP", + "IsIgnored": False}] + } + ] + } + ome_response_mock.success = True + ome_response_mock.json_data = temp_net_details + nic_bonding_tech = self.module.get_template_vlan_info(ome_connection_mock_template_identity_pool, 12) + assert nic_bonding_tech == "LACP" + + def test_get_template_id(self, ome_connection_mock_template_identity_pool, ome_response_mock): + ome_response_mock.json_data = {"value": [{"Name": "template", "Id": 9, "IdentityPoolId": 1}]} + ome_response_mock.success = True + f_module = self.get_module_mock(params={"template_name": "template"}) + res_temp = self.module.get_template_id(ome_connection_mock_template_identity_pool, f_module) + assert res_temp == {"Name": "template", "Id": 9, "IdentityPoolId": 1} + + def test_get_identity_id(self, ome_connection_mock_template_identity_pool): + data = {"report_list": [{"Name": "pool_name", "Id": 10}]} + ome_connection_mock_template_identity_pool.get_all_report_details.return_value = data + f_module = self.get_module_mock(params={"identity_pool_name": "pool_name"}) + result = self.module.get_identity_id(ome_connection_mock_template_identity_pool, f_module) + assert result == 10 + + def test_get_identity_id_fail(self, ome_connection_mock_template_identity_pool, ome_response_mock): + data = {"report_list": [{"Name": "pool_name", "Id": 10}]} + ome_connection_mock_template_identity_pool.get_all_report_details.return_value = data + f_module = self.get_module_mock(params={"identity_pool_name": "invalid_pool_name"}) + with pytest.raises(Exception) as exc: + self.module.get_identity_id(ome_connection_mock_template_identity_pool, f_module) + assert exc.value.args[0] == "Unable to complete the operation because the requested identity pool with " \ + "name 'invalid_pool_name' is not present." diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py new file mode 100644 index 00000000..8f8bb328 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.1.3 +# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import ome_template_info +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_template_info_mock(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_template_info.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeTemplateInfo(FakeAnsibleModule): + module = ome_template_info + + @pytest.mark.parametrize("module_params,data", [({"system_query_options": {"filter": "abc"}}, "$filter")]) + def test_get_query_parameters(self, module_params, data): + res = self.module._get_query_parameters(module_params) + if data is not None: + assert data in res + else: + assert res is None + + def test_get_template_info_success_case01(self, ome_default_args, ome_connection_template_info_mock, + ome_response_mock): + ome_response_mock.json_data = {"value": [""]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert 'template_info' in result + + def test_get_template_info_success_case02(self, mocker, ome_default_args, ome_connection_template_info_mock, + ome_response_mock): + ome_default_args.update({"template_id": "24"}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"template_id": "24"}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'template_info' in result + + def test_get_template_info_success_case03(self, mocker, ome_default_args, ome_connection_template_info_mock, + ome_response_mock): + mocker.patch(MODULE_PATH + 'ome_template_info._get_query_parameters', + return_value={"filter": "abc"}) + ome_default_args.update({"system_query_options": {"filter": "abc"}}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"filter": "abc"}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'template_info' in result + + def test_get_template_info_failure_case(self, ome_default_args, ome_connection_template_info_mock, + ome_response_mock): + ome_response_mock.status_code = 500 + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == 'Failed to fetch the template facts' + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_ome_template_info_main_exception_handling_case(self, exc_type, mocker, ome_default_args, + ome_connection_template_info_mock, ome_response_mock): + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + ome_connection_template_info_mock.invoke_request.side_effect = exc_type('test') + else: + ome_connection_template_info_mock.invoke_request.side_effect = exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str)) + result = self._run_module_with_fail_json(ome_default_args) + assert 'template_info' not in result + assert 'msg' in result + assert result['failed'] is True diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py new file mode 100644 index 00000000..c182b2b9 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py @@ -0,0 +1,349 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +from io import StringIO +from ssl import SSLError + +import pytest +from ansible.module_utils._text import to_text +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.plugins.modules import ome_template_network_vlan +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +NO_CHANGES_MSG = "No changes found to be applied." +CHANGES_FOUND = "Changes found to be applied." +SUCCESS_MSG = "Successfully applied the network settings to the template." +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_template_network_vlan.' + + +@pytest.fixture +def ome_connection_mock_for_template_network_vlan(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeTemplateNetworkVlan(FakeAnsibleModule): + module = ome_template_network_vlan + + @pytest.mark.parametrize("params", [{"mparams": {"template_id": 123}, "success": True, "json_data": { + "value": [{"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}]}, + "res": {"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}}, + {"mparams": {"template_name": "vlan_name"}, "success": True, "json_data": { + "value": [{"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}]}, + "res": {"Name": "vlan_name", "Id": 123, "IdentityPoolId": 23}}]) + def test_get_template_details( + self, params, ome_connection_mock_for_template_network_vlan, ome_response_mock): + ome_response_mock.success = params.get("success", True) + ome_response_mock.json_data = params["json_data"] + f_module = self.get_module_mock(params=params["mparams"]) + result = self.module.get_template_details( + f_module, ome_connection_mock_for_template_network_vlan) + assert result == params["res"] + + @pytest.mark.parametrize("kv", [{"key": "1", "dct": {"one": "1", "two": "2"}, "res": "one"}, + {"key": "3", "dct": {"one": "1", "two": "2"}, "res": None}]) + def test_get_key(self, kv): + val = kv["key"] + d = kv["dct"] + k = self.module.get_key(val, d) + assert k == kv["res"] + + def test_get_vlan_name_id_map( + self, ome_connection_mock_for_template_network_vlan, ome_response_mock): + ome_response_mock.success = True + ome_response_mock.json_data = { + "value": [{"Name": "vlan1", "Id": 1}, {"Name": "vlan2", "Id": 2}]} + d = self.module.get_vlan_name_id_map( + ome_connection_mock_for_template_network_vlan) + assert d == {"vlan1": 1, "vlan2": 2} + + def test_get_template_vlan_info( + self, ome_connection_mock_for_template_network_vlan, ome_response_mock): + f_module = self.get_module_mock( + params={"nic_identifier": "NIC Slot 4"}) + temp_net_details = {"AttributeGroups": [{"GroupNameId": 1001, "DisplayName": "NICModel", "SubAttributeGroups": [ + {"GroupNameId": 1, "DisplayName": "NIC Slot 4", "SubAttributeGroups": [{"GroupNameId": 1, + "SubAttributeGroups": [ + {"GroupNameId": 1, + "DisplayName": "Partition", + "SubAttributeGroups": [], + "Attributes": [ + {"CustomId": 2302, + "DisplayName": "Vlan Tagged", + "Value": "12765, 12767, 12768"}, + {"CustomId": 2302, + "DisplayName": "Vlan UnTagged", + "Value": "12766"}]}], + "Attributes": []}, + {"GroupNameId": 2, + "DisplayName": "Port ", + "SubAttributeGroups": [ + {"GroupNameId": 1, + "DisplayName": "Partition ", + "SubAttributeGroups": [], + "Attributes": [ + {"CustomId": 2301, + "DisplayName": "Vlan Tagged", + "Value": "12766"}, + {"CustomId": 2301, + "DisplayName": "Vlan UnTagged", + "Value": "12767"}]}], + "Attributes": []}], + "Attributes": []}], "Attributes": []}, {"GroupNameId": 1005, "DisplayName": "NicBondingTechnology", + "SubAttributeGroups": [], "Attributes": [ + {"AttributeId": 0, "CustomId": 0, "AttributeEditInfoId": 0, "DisplayName": "Nic Bonding Technology", + "Description": None, "Value": "NIC bonding enabled", "IsReadOnly": False, "IsIgnored": False, + "IsSecure": False, "IsLinkedToSecure": False, "TargetSpecificTypeId": 0}]}]} + ome_response_mock.success = True + ome_response_mock.json_data = temp_net_details + port_id_map, port_untagged_map, port_tagged_map, port_nic_bond_map, nic_bonding_tech = self.module.get_template_vlan_info( + f_module, ome_connection_mock_for_template_network_vlan, 12) + assert port_id_map == {1: 2302, 2: 2301} + assert port_untagged_map == {1: 12766, 2: 12767} + assert port_tagged_map == {1: [12765, 12767, 12768], 2: [12766]} + + def test_get_vlan_payload( + self, mocker, ome_connection_mock_for_template_network_vlan): + f_module = self.get_module_mock(params={"template_id": 12}) + untag_dict = {1: 12766} + tagged_dict = {2: [12765, 12766]} + port_id_map = {1: 2302, 2: 2301} + port_untagged_map = {1: 12766, 2: 12767} + port_tagged_map = {1: [12765, 12767, 12768], 2: [12766]} + port_nic_bond_map = {1: True, 2: False} + nic_bonding_tech = "LACP" + mocker.patch(MODULE_PATH + 'get_template_details', + return_value={"Name": "vlan_name", "Id": 12, "IdentityPoolId": 23}) + mocker.patch(MODULE_PATH + 'get_template_vlan_info', return_value=( + port_id_map, port_untagged_map, port_tagged_map, port_nic_bond_map, nic_bonding_tech)) + payload = self.module.get_vlan_payload(f_module, ome_connection_mock_for_template_network_vlan, untag_dict, + tagged_dict) + assert payload["TemplateId"] == 12 + assert payload["VlanAttributes"] == [ + {"ComponentId": 2302, "Tagged": [ + 12765, 12767, 12768], "Untagged": 12766, 'IsNicBonded': True}, + {"ComponentId": 2301, "Tagged": [12765, 12766], "Untagged": 12767, 'IsNicBonded': False}] + + @pytest.mark.parametrize("params", [ + {"untag_dict": {1: 12766}, "tagged_dict": {2: [12765, 12766]}, + "port_id_map": {1: 2302, 2: 2301}, "port_untagged_map": {1: 12766}, "port_tagged_map": {2: [12765, 12766]}, + "mparams": {"template_id": 12}, "port_nic_bond_map": {1: True, 2: False}, 'nic_bonding_tech': "LACP", + 'message': "No changes found to be applied."}, + {"untag_dict": {3: 12766}, "tagged_dict": {2: [12765, 12766]}, + "port_id_map": {1: 2302, 2: 2301}, "port_untagged_map": {1: 12766}, "port_tagged_map": {2: [12765, 12766]}, + "mparams": {"template_id": 12}, "port_nic_bond_map": {1: True, 2: False}, 'nic_bonding_tech': "LACP", + 'message': "Invalid port(s) dict_keys([3]) found for untagged VLAN"}, + {"untag_dict": {1: 12766}, "tagged_dict": {3: [12765, 12766]}, + "port_id_map": {1: 2302, 2: 2301}, "port_untagged_map": {1: 12766}, "port_tagged_map": {2: [12765, 12766]}, + "mparams": {"template_id": 12}, "port_nic_bond_map": {1: True, 2: False}, 'nic_bonding_tech': "LACP", + 'message': "Invalid port(s) dict_keys([3]) found for tagged VLAN"}, + ]) + def test_get_vlan_payload_msg( + self, params, ome_connection_mock_for_template_network_vlan, ome_default_args, ome_response_mock, mocker): + f_module = self.get_module_mock(params=params['mparams']) + mocker.patch(MODULE_PATH + 'get_template_details', + return_value={"Name": "vlan_name", "Id": 12, "IdentityPoolId": 23}) + mocker.patch(MODULE_PATH + 'get_template_vlan_info', return_value=( + params['port_id_map'], params['port_untagged_map'], params['port_tagged_map'], + params['port_nic_bond_map'], params['nic_bonding_tech'])) + with pytest.raises(Exception) as exc: + self.module.get_vlan_payload(f_module, ome_connection_mock_for_template_network_vlan, params['untag_dict'], + params['tagged_dict']) + assert exc.value.args[0] == params["message"] + + def test_validate_vlans( + self, mocker, ome_connection_mock_for_template_network_vlan): + f_module = self.get_module_mock(params={ + "tagged_networks": [{"port": 1, "tagged_network_ids": [1, 2]}, {"port": 2, "tagged_network_names": []}, + {"port": 3, "tagged_network_names": ["bronze"]}], + "untagged_networks": [{"port": 1, "untagged_network_name": "plat"}, {"port": 2, "untagged_network_id": 0}, + {"port": 3, "untagged_network_id": 4}]}) + mocker.patch(MODULE_PATH + 'get_vlan_name_id_map', + return_value={"vlan1": 1, "vlan2": 2, "gold": 3, "silver": 4, "plat": 5, "bronze": 6}) + untag_dict, tagged_dict = self.module.validate_vlans( + f_module, ome_connection_mock_for_template_network_vlan) + assert untag_dict == {1: 5, 2: 0, 3: 4} + assert tagged_dict == {1: [1, 2], 2: [], 3: [6]} + + @pytest.mark.parametrize("params", [ + {"inp": {"untagged_networks": [{"port": 2, "untagged_network_name": "plat"}, + {"port": 2, "untagged_network_id": 0}]}, + "msg": "port 2 is repeated for untagged_network_id"}, + {"inp": {"tagged_networks": [{"port": 1, "tagged_network_ids": [1, 7]}, {"port": 2, "tagged_network_names": []}, + {"port": 3, "tagged_network_names": ["bronze"]}]}, + "msg": "7 is not a valid vlan id port 1"}, + {"inp": {"tagged_networks": [{"port": 1, "tagged_network_ids": []}, + {"port": 3, "tagged_network_names": ["bronzy"]}]}, + "msg": "bronzy is not a valid vlan name port 3"}, + {"inp": {"untagged_networks": [{"port": 2, "untagged_network_name": "platy"}, + {"port": 3, "untagged_network_id": 0}]}, + "msg": "platy is not a valid vlan name for port 2"}, + {"inp": {"untagged_networks": [{"port": 2, "untagged_network_name": "plat"}, + {"port": 1, "untagged_network_id": 7}]}, + "msg": "untagged_network_id: 7 is not a valid vlan id for port 1"}, + {"inp": {"tagged_networks": [{"port": 1, "tagged_network_ids": [1]}], + "untagged_networks": [{"port": 1, "untagged_network_id": 1}]}, + "msg": "vlan 1('vlan1') cannot be in both tagged and untagged list for port 1"}]) + def test_validate_vlans_failure( + self, params, mocker, ome_connection_mock_for_template_network_vlan): + f_module = self.get_module_mock(params["inp"]) + mocker.patch(MODULE_PATH + 'get_vlan_name_id_map', + return_value={"vlan1": 1, "vlan2": 2, "gold": 3, "silver": 4, "plat": 5, "bronze": 6}) + with pytest.raises(Exception) as exc: + self.module.validate_vlans( + f_module, ome_connection_mock_for_template_network_vlan) + assert exc.value.args[0] == params["msg"] + + @pytest.mark.parametrize("modify_setting_payload", + [{"Description": "Identity pool with ethernet and fcoe settings2"}, {"Name": "pool2"}, + {"EthernetSettings": { + "Mac": {"IdentityCount": 61, "StartingMacAddress": "UFBQUFAA"}}}, + {"Description": "Identity pool with ethernet and fcoe settings2", + "EthernetSettings": {"Mac": {"IdentityCount": 60, "StartingMacAddress": "UFBQUFAA"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}}]) + def test_compare_payload_attributes_case_false( + self, modify_setting_payload): + existing_setting_payload = {"@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(23)", "Id": 23, + "Name": "pool1", "Description": "Identity pool with ethernet and fcoe settings1", + "CreatedBy": "admin", "CreationTime": "2020-01-31 09:28:16.491424", + "LastUpdatedBy": "admin", "LastUpdateTime": "2020-01-31 09:49:59.012549", + "EthernetSettings": { + "Mac": {"IdentityCount": 60, "StartingMacAddress": "UFBQUFAA"}}, + "IscsiSettings": None, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}, + "FcSettings": None, "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(23)/UsageCounts"}, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(23)/UsageIdentitySets"} + val = self.module.compare_nested_dict( + modify_setting_payload, existing_setting_payload) + assert val is False + + @pytest.mark.parametrize("vlan_payload", + [{"Name": "pool1", "EthernetSettings": {"Mac": {"StartingMacAddress": "qrvM3e6q"}}}, + {"Name": "pool1", "EthernetSettings": { + "Mac": {"IdentityCount": 70}}}, + {"Description": "Identity pool with ethernet setting", + "EthernetSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "qrvM3e6q"}}, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}}]) + def test_compare_payload_attributes_case_true(self, vlan_payload): + """setting values are same as existing payload and no need to apply the changes again""" + existing_setting_payload = {"@odata.context": "/api/$metadata#IdentityPoolService.IdentityPool", + "@odata.type": "#IdentityPoolService.IdentityPool", + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)", "Id": 30, + "Name": "pool1", "Description": "Identity pool with ethernet setting", + "CreatedBy": "admin", "CreationTime": "2020-01-31 11:31:13.621182", + "LastUpdatedBy": "admin", "LastUpdateTime": "2020-01-31 11:34:28.00876", + "EthernetSettings": { + "Mac": {"IdentityCount": 70, "StartingMacAddress": "qrvM3e6q"}}, + "IscsiSettings": None, + "FcoeSettings": {"Mac": {"IdentityCount": 70, "StartingMacAddress": "cHBwcHAA"}}, + "FcSettings": None, "UsageCounts": { + "@odata.id": "/api/IdentityPoolService/IdentityPools(30)/UsageCounts"}, + "UsageIdentitySets@odata.navigationLink": "/api/IdentityPoolService/IdentityPools(30)/UsageIdentitySets"} + val = self.module.compare_nested_dict( + vlan_payload, existing_setting_payload) + assert val is True + + @pytest.mark.parametrize("params", [{"module_args": {"template_name": "vlan_name", "nic_identifier": "NIC1", + "untagged_networks": [ + {"port": 1, "untagged_network_name": "v1"}]}, + "untag_dict": {"1": 13, "2": 14, "3": 11, "4": 12}, + "tagged_dict": {"1": [10720], "2": [10719]}, + "port_id_map": {"1": 13, "2": 14, "3": 11, "4": 12}, + "port_untagged_map": {"1": 10719, "2": 10720, "3": 0, "4": 0}, + "port_tagged_map": {"1": [10720], "2": [10719], "3": [], "4": []}, + "port_nic_bond_map": {"1": "false", "2": "false", "3": "false", "4": "false"}, + "nic_bonding_tech": True, "check_mode": True, "msg": CHANGES_FOUND}]) + def test_ome_template_network_vlan_check_mode(self, params, ome_connection_mock_for_template_network_vlan, + ome_response_mock, ome_default_args, mocker): + mocker.patch( + MODULE_PATH + 'validate_vlans', + return_value=( + params.get("untag_dict"), + params.get("tagged_dict"))) + mocker.patch(MODULE_PATH + 'get_template_details', + return_value={"Name": "vlan_name", "Id": 12, "IdentityPoolId": 23}) + mocker.patch(MODULE_PATH + 'get_template_vlan_info', return_value=( + params.get("port_id_map"), params.get( + "port_untagged_map"), params.get("port_tagged_map"), + params.get("port_nic_bond_map"), params.get("nic_bonding_tech"))) + ome_default_args.update(params.get('module_args')) + result = self._run_module( + ome_default_args, check_mode=params.get( + 'check_mode', False)) + assert result['msg'] == params['msg'] + + @pytest.mark.parametrize("params", [ + {"fail_json": True, "json_data": {"JobId": 1234}, + "get_vlan_name_id_map": {"v1": 1}, + "mparams": {"template_name": "vlan_name", "nic_identifier": "NIC1", + "untagged_networks": [{"port": 1, "untagged_network_name": "v1"}, + {"port": 1, "untagged_network_name": "v1"}]}, + 'message': "port 1 is repeated for untagged_network_name", "success": True + }, + {"fail_json": True, "json_data": {"JobId": 1234}, + "get_vlan_name_id_map": {"v1": 1, "v2": 2}, + "mparams": {"template_name": "vlan_name", "nic_identifier": "NIC1", + "untagged_networks": [{"port": 1, "untagged_network_name": "v1"}, + {"port": 2, "untagged_network_name": "v2"}], + "tagged_networks": [{"port": 3, "tagged_network_names": ['bronzy']}]}, + 'message': "bronzy is not a valid vlan name port 3", "success": True + } + ]) + def test_main(self, params, ome_connection_mock_for_template_network_vlan, ome_default_args, ome_response_mock, mocker): + mocker.patch(MODULE_PATH + 'get_vlan_name_id_map', return_value=params.get("get_vlan_name_id_map")) + # mocker.patch(MODULE_PATH + '_get_baseline_payload', return_value=params.get("_get_baseline_payload")) + ome_response_mock.success = True + ome_response_mock.json_data = params.get("json_data") + ome_default_args.update(params.get('mparams')) + if params.get("fail_json", False): + result = self._run_module_with_fail_json(ome_default_args) + else: + result = self._run_module(ome_default_args, check_mode=params.get("check_mode", False)) + assert result["msg"] == params['message'] + + @pytest.mark.parametrize("exc_type", + [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError]) + def test_ome_application_network_vlan_main_success_failure_case(self, exc_type, mocker, ome_default_args, + ome_connection_mock_for_template_network_vlan, + ome_response_mock): + ome_default_args.update({"nic_identifier": "NIC1", "template_id": 123, "tagged_networks": [ + {"port": 2, "tagged_network_ids": [22763], "tagged_network_names": ["gold", "silver"]}]}) + json_str = to_text(json.dumps({"info": "error_details"})) + if exc_type == URLError: + mocker.patch( + MODULE_PATH + 'validate_vlans', + side_effect=exc_type("TEST")) + result = self._run_module(ome_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch( + MODULE_PATH + 'validate_vlans', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'validate_vlans', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + assert 'proxy_configuration' not in result + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py new file mode 100644 index 00000000..ac3c1814 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.0.0 +# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import ome_user +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \ + AnsibleFailJSonException +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_for_user(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_user.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeUser(FakeAnsibleModule): + module = ome_user + + def test__validate_inputs_fail_case(self, ome_connection_for_user): + f_module = self.get_module_mock(params={"state": "absent", "user_id": None}) + with pytest.raises(Exception) as exc: + self.module._validate_inputs(f_module) + assert exc.value.args[0] == "One of the following 'user_id' or 'name' " \ + "option is required for state 'absent'" + + def test__validate_inputs_user_pass_case(self, mocker): + f_module = self.get_module_mock(params={"state": "absent", "user_id": 123}) + fail_module_mock = mocker.patch(MODULE_PATH + 'ome_user.fail_module') + self.module._validate_inputs(f_module) + fail_module_mock.assert_not_called() + + def test_get_user_id_from_name(self, ome_response_mock, ome_connection_for_user): + ome_response_mock.success = True + ome_response_mock.json_data = {'value': [{"UserName": "Testname", "Id": 24}]} + ome_response_mock.status_code = 200 + data = self.module.get_user_id_from_name(ome_connection_for_user, "Testname") + assert data == 24 + + def test_get_user_id_from_name01(self, ome_response_mock, ome_connection_for_user): + ome_response_mock.success = True + val = None + ome_response_mock.json_data = {'value': [{"UserName": "Testname", "Id": 24}]} + ome_response_mock.status_code = 200 + data = self.module.get_user_id_from_name(ome_connection_for_user, "Test") + assert data == val + + def test_get_user_id_from_name_case02(self, ome_connection_for_user): + val = None + data = self.module.get_user_id_from_name(ome_connection_for_user, None) + assert data == val + + def test__get_resource_parameters_present_success_case01(self, ome_response_mock, ome_connection_for_user, mocker): + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_date = {'value': []} + f_module = self.get_module_mock(params={"state": "present", + "user_id": 23, + "attributes": {"UserName": "user1", "Password": "UserPassword", + "RoleId": "10", "Enabled": True}}) + mocker.patch(MODULE_PATH + 'ome_user.get_user_id_from_name', return_value=23) + data = self.module._get_resource_parameters(f_module, ome_response_mock) + assert data == ('PUT', "AccountService/Accounts('23')", + {'Enabled': True, 'Id': 23, 'Password': 'UserPassword', 'RoleId': '10', 'UserName': 'user1'}) + + def test__get_resource_parameters_absent_success_case02(self, ome_response_mock, mocker, ome_connection_for_user, + ome_default_args): + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_date = {'value': []} + f_module = self.get_module_mock(params={"state": "absent", "user_id": 23}) + mocker.patch(MODULE_PATH + 'ome_user.get_user_id_from_name', return_value=23) + data = self.module._get_resource_parameters(f_module, ome_response_mock) + assert data == ('DELETE', "AccountService/Accounts('23')", None) + + def test__get_resource_parameters_case03(self, ome_response_mock, mocker, ome_default_args): + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_date = {'value': []} + f_module = self.get_module_mock(params={"state": "present", + "user_id": None, + "attributes": {"UserName": "user1", "Password": "UserPassword", + "RoleId": "10", "Enabled": True}}) + mocker.patch(MODULE_PATH + 'ome_user.get_user_id_from_name', return_value=None) + data = self.module._get_resource_parameters(f_module, ome_response_mock) + assert data == ('POST', "AccountService/Accounts", + {'Enabled': True, 'Password': 'UserPassword', 'RoleId': '10', 'UserName': 'user1'}) + + def test__get_resource_parameters_fail_case(self, ome_response_mock, mocker): + ome_response_mock.status_code = 200 + ome_response_mock.success = True + ome_response_mock.json_date = {'value': []} + f_module = self.get_module_mock(params={"state": "absent", "user_id": None}) + mocker.patch(MODULE_PATH + 'ome_user.get_user_id_from_name', return_value=None) + with pytest.raises(Exception) as exc: + self.module._get_resource_parameters(f_module, ome_response_mock) + assert exc.value.args[0] == "Unable to get the account because the specified account " \ + "does not exist in the system." + + def test__get_resource_parameters_fail_case_02(self, ome_response_mock, mocker): + fail_module_mock = mocker.patch(MODULE_PATH + 'ome_user.fail_module') + f_module = self.get_module_mock(params={"state": "absent", "user_id": None}) + mocker.patch(MODULE_PATH + 'ome_user.get_user_id_from_name', return_value=None) + res = self.module._get_resource_parameters(f_module, ome_response_mock) + assert (res[0], res[1], res[2]) == ('DELETE', "AccountService/Accounts('None')", None) + assert fail_module_mock.assert_not_called + + def test_main_user_success_case01(self, ome_default_args, mocker, ome_connection_for_user, ome_response_mock): + ome_default_args.update({"state": "absent", "user_id": 23}) + mocker.patch(MODULE_PATH + 'ome_user._validate_inputs') + mocker.patch(MODULE_PATH + 'ome_user._get_resource_parameters', + return_value=["DELETE", "ACCOUNT_RESOURCE", {"user_id": 23}]) + result = self._run_module(ome_default_args) + message_success = [ + "Successfully deleted the User", "Successfully modified a User", "Successfully created a User"] + assert result['changed'] is True + assert result['msg'] in message_success + + def test_main_user_success_case02(self, ome_default_args, mocker, ome_connection_for_user, ome_response_mock): + ome_default_args.update({"state": "present", + "user_id": 23, + "attributes": {"UserName": "user1", "Password": "UserPassword", + "RoleId": "10", "Enabled": True}}) + mocker.patch(MODULE_PATH + 'ome_user._validate_inputs') + mocker.patch(MODULE_PATH + 'ome_user._get_resource_parameters', + return_value=["PUT", "ACCOUNT_RESOURCE", {"user_id": 23}]) + result = self._run_module(ome_default_args) + message_success = [ + "Successfully deleted the User", "Successfully modified a User", "Successfully created a User"] + assert result['changed'] is True + assert result['msg'] in message_success + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_main_user_exception_case(self, exc_type, mocker, ome_default_args, ome_connection_for_user, + ome_response_mock): + ome_default_args.update({"state": "present", + "user_id": 23, + "attributes": {"UserName": "user1", "Password": "UserPassword", + "RoleId": "10", "Enabled": True}}) + mocker.patch(MODULE_PATH + 'ome_user._validate_inputs') + mocker.patch( + MODULE_PATH + 'ome_user._get_resource_parameters', return_value=("method", + "path", + "payload")) + ome_response_mock.json_data = {"value": []} + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + ome_connection_for_user.invoke_request.side_effect = exc_type('test') + else: + mocker.patch( + MODULE_PATH + 'ome_user._get_resource_parameters', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(ome_default_args) + assert 'msg' in result + assert result['failed'] is True + + @pytest.mark.parametrize("http_method, status_code", [('POST', 200), ('PUT', 200), ('DELETE', 204)]) + def test_exit_module_user_success_case(self, http_method, status_code, ome_response_mock): + ome_response_mock.status_code = status_code + ome_response_mock.success = True + ome_response_mock.json_date = {'value': []} + f_module = self.get_module_mock() + msg_dict = {'POST': "Successfully created a User", + 'PUT': "Successfully modified a User", + 'DELETE': "Successfully deleted the User"} + with pytest.raises(Exception) as exc: + self.module.exit_module(f_module, ome_response_mock, http_method) + assert exc.value.args[0] == msg_dict[http_method] diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py new file mode 100644 index 00000000..6d48cc18 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.1.1 +# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import ome_user_info +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def ome_connection_user_info_mock(mocker, ome_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'ome_user_info.RestOME') + ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + ome_connection_mock_obj.invoke_request.return_value = ome_response_mock + return ome_connection_mock_obj + + +class TestOmeUserInfo(FakeAnsibleModule): + module = ome_user_info + + @pytest.mark.parametrize("module_params,data", [({"system_query_options": {"filter": "abc"}}, "$filter")]) + def test_user_get_query_parameters(self, module_params, data, ome_connection_user_info_mock): + res = self.module._get_query_parameters(module_params) + if data is not None: + assert data in res + else: + assert res is None + + def test_user_info_main_success_case_all(self, ome_default_args, ome_connection_user_info_mock, ome_response_mock): + ome_response_mock.json_data = {"value": [{"account_id": 1, + "system_query_options": "the user based on UserName"}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert 'user_info' in result + + def test_user_info_main_success_case_account_id(self, ome_default_args, ome_connection_user_info_mock, + ome_response_mock): + ome_default_args.update({"account_id": 1}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"account_id": 1}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'user_info' in result + + def test_user_info_success_case03(self, ome_default_args, ome_connection_user_info_mock, ome_response_mock, mocker): + mocker.patch(MODULE_PATH + 'ome_user_info._get_query_parameters', + return_value={"filter": "abc"}) + ome_default_args.update({"system_query_options": {"filter": "abc"}}) + ome_response_mock.success = True + ome_response_mock.json_data = {"value": [{"filter": "abc"}]} + ome_response_mock.status_code = 200 + result = self._run_module(ome_default_args) + assert result['changed'] is False + assert 'user_info' in result + + def test_get_user_info_failure_case(self, ome_default_args, ome_connection_user_info_mock, ome_response_mock): + ome_response_mock.status_code = 500 + ome_response_mock.success = False + result = self._run_module_with_fail_json(ome_default_args) + assert result['msg'] == 'Unable to retrieve the account details.' + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_ome_user_info_main_exception_handling_case(self, exc_type, mocker, ome_default_args, + ome_connection_user_info_mock, ome_response_mock): + ome_response_mock.status_code = 400 + ome_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type not in [HTTPError, SSLValidationError]: + ome_connection_user_info_mock.invoke_request.side_effect = exc_type('test') + else: + ome_connection_user_info_mock.invoke_request.side_effect = exc_type('http://testhost.com', 400, + 'http error message', + {"accept-type": "application/json"}, + StringIO(json_str)) + if not exc_type == URLError: + result = self._run_module_with_fail_json(ome_default_args) + assert result['failed'] is True + else: + result = self._run_module(ome_default_args) + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py new file mode 100644 index 00000000..075406a7 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py @@ -0,0 +1,452 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 4.1.0 +# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from ansible_collections.dellemc.openmanage.plugins.modules import redfish_event_subscription +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' +DESTINATION_INVALID = "The Parameter destination must have an HTTPS destination. The HTTP destination is not allowed" +SUBSCRIPTION_EXISTS = "No changes found to be applied." +SUBSCRIPTION_DELETED = "Successfully deleted the subscription." +SUBSCRIPTION_UNABLE_DEL = "Unable to delete the subscription." +SUBSCRIPTION_UNABLE_ADD = "Unable to add a subscription." +SUBSCRIPTION_ADDED = "Successfully added the subscription." +DESTINATION_MISMATCH = "No changes found to be applied." +EVENT_TYPE_INVALID = "value of event_type must be one of: Alert, MetricReport, got: Metricreport" + + +@pytest.fixture +def redfish_connection_mock(mocker, redfish_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'redfish_event_subscription.Redfish') + redfish_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + redfish_connection_mock_obj.invoke_request.return_value = redfish_response_mock + return redfish_connection_mock_obj + + +class TestRedfishSubscription(FakeAnsibleModule): + module = redfish_event_subscription + + @pytest.mark.parametrize("val", [{"destination": "https://192.168.1.100:8188"}, + {"destination": "https://192.168.1.100:8189"}]) + def test_function_get_subscription_success(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args, val): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": val["destination"]}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + json_data1 = { + "@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination", + "@odata.id": "/redfish/v1/EventService/Subscriptions/c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80", + "@odata.type": "#EventDestination.v1_6_0.EventDestination", + "Context": "RedfishEvent", + "DeliveryRetryPolicy": "RetryForever", + "Description": "Event Subscription Details", + "Destination": "https://192.168.1.100:8189", + "EventFormatType": "Event", + "EventTypes": [ + "Alert" + ], + "EventTypes@odata.count": 1, + "HttpHeaders": [], + "HttpHeaders@odata.count": 0, + "Id": "c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80", + "MetricReportDefinitions": [], + "MetricReportDefinitions@odata.count": 0, + "Name": "EventSubscription c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80", + "OriginResources": [], + "OriginResources@odata.count": 0, + "Protocol": "Redfish", + "Status": { + "Health": "OK", + "HealthRollup": "OK", + "State": "Enabled" + }, + "SubscriptionType": "RedfishEvent" + } + json_data2 = { + "@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination", + "@odata.id": "/redfish/v1/EventService/Subscriptions/c6ff37fc-8204-11eb-b08f-2cea7ff7fe80", + "@odata.type": "#EventDestination.v1_6_0.EventDestination", + "Context": "RedfishEvent", + "DeliveryRetryPolicy": "RetryForever", + "Description": "Event Subscription Details", + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "EventTypes": [ + "MetricReport" + ], + "EventTypes@odata.count": 1, + "HttpHeaders": [], + "HttpHeaders@odata.count": 0, + "Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80", + "MetricReportDefinitions": [], + "MetricReportDefinitions@odata.count": 0, + "Name": "EventSubscription c6ff37fc-8204-11eb-b08f-2cea7ff7fe80", + "OriginResources": [], + "OriginResources@odata.count": 0, + "Protocol": "Redfish", + "Status": { + "Health": "OK", + "HealthRollup": "OK", + "State": "Enabled" + }, + "SubscriptionType": "RedfishEvent" + } + + mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription_details', + side_effect=[json_data1, json_data2]) + + redfish_response_mock.json_data = { + "@odata.context": "/redfish/v1/$metadata#EventDestinationCollection.EventDestinationCollection", + "@odata.id": "/redfish/v1/EventService/Subscriptions", + "@odata.type": "#EventDestinationCollection.EventDestinationCollection", + "Description": "List of Event subscriptions", + "Members": [ + { + "@odata.id": "/redfish/v1/EventService/Subscriptions/c6ff37fc-8204-11eb-b08f-2cea7ff7fe80" + }, + { + "@odata.id": "/redfish/v1/EventService/Subscriptions/c7e5c3fc-8204-11eb-bd10-2cea7ff7fe80" + } + ], + "Members@odata.count": 2, + "Name": "Event Subscriptions Collection" + } + redfish_response_mock.success = True + f_module = self.get_module_mock(params=redfish_default_args) + result = self.module.get_subscription(redfish_connection_mock, val["destination"]) + assert result["Destination"] == val["destination"] + + @pytest.mark.parametrize("val", [ + {"destination": "https://192.168.1.100:8188", "event_type": "MetricReport", + "event_format_type": "MetricReport"}, + {"destination": "https://192.168.1.100:8188", "event_type": "Alert", "event_format_type": "Event"}]) + def test_function_create_subscription(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args, val): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": val["destination"]}) + redfish_default_args.update({"event_type": val["event_type"]}) + redfish_default_args.update({"event_format_type": val["event_format_type"]}) + + redfish_response_mock.json_data = { + "Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80", + "Destination": val["destination"], + "EventFormatType": val["event_format_type"], + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": [val["event_type"]], + "SubscriptionType": "RedfishEvent" + } + redfish_response_mock.success = True + f_module = self.get_module_mock(params=redfish_default_args) + result = self.module.create_subscription(redfish_connection_mock, f_module) + assert result.json_data["Destination"] == val["destination"] + assert result.json_data["EventFormatType"] == val["event_format_type"] + assert result.json_data["EventTypes"] == [val["event_type"]] + + @pytest.mark.parametrize("val", [ + {"destination": "https://100.96.80.1:161", "event_type": "MetricReport", + "event_format_type": "MetricReport"}, + {"destination": "https://100.96.80.1:161", "event_type": "Alert", "event_format_type": "Event"}]) + def test_function_get_subscription_details(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args, val): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": val["destination"]}) + redfish_default_args.update({"event_type": val["event_type"]}) + redfish_default_args.update({"event_format_type": val["event_format_type"]}) + + redfish_response_mock.json_data = { + "@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination", + "@odata.id": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6", + "@odata.type": "#EventDestination.v1_9_0.EventDestination", + "Actions": { + "#EventDestination.ResumeSubscription": { + "target": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6/Actions/EventDestination.ResumeSubscription" + } + }, + "Context": "RedfishEvent", + "DeliveryRetryPolicy": "RetryForever", + "Description": "Event Subscription Details", + "Destination": val['destination'], + "EventFormatType": val["event_format_type"], + "EventTypes": [val["event_type"]], + "EventTypes@odata.count": 1, + "HttpHeaders": [], + "HttpHeaders@odata.count": 0, + "Id": "087b9026-0afa-11ec-8120-4cd98f5fc5a6", + "Name": "EventSubscription 087b9026-0afa-11ec-8120-4cd98f5fc5a6", + "Protocol": "Redfish", + "Status": { + "Health": "OK", + "HealthRollup": "OK", + "State": "Enabled" + }, + "SubscriptionType": "RedfishEvent" + } + redfish_response_mock.success = True + result = self.module.get_subscription_details(redfish_connection_mock, "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80") + assert result["Destination"] == val["destination"] + assert result["EventFormatType"] == val["event_format_type"] + assert result["EventTypes"] == [val["event_type"]] + + @pytest.mark.parametrize("val", [ + {"destination": "https://100.96.80.1:161", "event_type": "MetricReport", + "event_format_type": "MetricReport"}, + {"destination": "https://100.96.80.1:161", "event_type": "Alert", "event_format_type": "Event"}]) + def test_function_get_subscription_details_None(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args, val): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": val["destination"]}) + redfish_default_args.update({"event_type": val["event_type"]}) + redfish_default_args.update({"event_format_type": val["event_format_type"]}) + + redfish_response_mock.json_data = { + "@odata.context": "/redfish/v1/$metadata#EventDestination.EventDestination", + "@odata.id": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6", + "@odata.type": "#EventDestination.v1_9_0.EventDestination", + "Actions": { + "#EventDestination.ResumeSubscription": { + "target": "/redfish/v1/EventService/Subscriptions/087b9026-0afa-11ec-8120-4cd98f5fc5a6/Actions/EventDestination.ResumeSubscription" + } + }, + "Context": "RedfishEvent", + "DeliveryRetryPolicy": "RetryForever", + "Description": "Event Subscription Details", + "Destination": val['destination'], + "EventFormatType": val["event_format_type"], + "EventTypes": [val["event_type"]], + "EventTypes@odata.count": 1, + "HttpHeaders": [], + "HttpHeaders@odata.count": 0, + "Id": "087b9026-0afa-11ec-8120-4cd98f5fc5a6", + "Name": "EventSubscription 087b9026-0afa-11ec-8120-4cd98f5fc5a6", + "Protocol": "Redfish", + "Status": { + "Health": "OK", + "HealthRollup": "OK", + "State": "Enabled" + }, + "SubscriptionType": "RedfishEvent" + } + redfish_response_mock.success = False + result = self.module.get_subscription_details(redfish_connection_mock, "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80") + assert result is None + + @pytest.mark.parametrize("val", [ + {"destination": "https://100.96.80.1:161"}, + {"destination": "https://100.96.80.1:161"}]) + def test_function_delete_subscription(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args, val): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": val["destination"]}) + + redfish_response_mock.json_data = { + "@Message.ExtendedInfo": [ + { + "Message": "Successfully Completed Request", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.7.Success", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + }, + { + "Message": "The operation successfully completed.", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "IDRAC.2.4.SYS413", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "No response action is required.", + "Severity": "Informational" + } + ] + } + redfish_response_mock.success = True + result = self.module.delete_subscription(redfish_connection_mock, "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80") + assert result.json_data["@Message.ExtendedInfo"][0]["Message"] == "Successfully Completed Request" + assert result.json_data["@Message.ExtendedInfo"][1]["Message"] == "The operation successfully completed." + + def test_module_validation_input_params(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": "http://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + with pytest.raises(Exception) as err: + self._run_module(redfish_default_args) + assert err.value.args[0]['msg'] == DESTINATION_INVALID + + def test_module_absent_does_not_exist(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": "https://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + + redfish_connection_mock.patch( + MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None) + redfish_response_mock.success = True + result = self._run_module(redfish_default_args) + assert result["msg"] == DESTINATION_MISMATCH + + def test_module_absent_does_exist(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": "https://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + + json_data = { + "Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80", + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": ["MetricReport"], + "SubscriptionType": "RedfishEvent" + } + redfish_response_mock.success = True + mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=json_data) + mocker.patch(MODULE_PATH + 'redfish_event_subscription.delete_subscription', return_value=redfish_response_mock) + f_module = self.get_module_mock() + result = self._run_module(redfish_default_args) + print(result) + assert result["msg"] == SUBSCRIPTION_DELETED + + def test_module_absent_does_exist_error(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "absent"}) + redfish_default_args.update({"destination": "https://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + + json_data = { + "Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80", + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": ["MetricReport"], + "SubscriptionType": "RedfishEvent" + } + redfish_response_mock.success = False + mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=json_data) + mocker.patch(MODULE_PATH + 'redfish_event_subscription.delete_subscription', return_value=redfish_response_mock) + with pytest.raises(Exception) as err: + self._run_module(redfish_default_args) + assert err.value.args[0]['msg'] == SUBSCRIPTION_UNABLE_DEL + + def test_module_present_does_not_exist(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "present"}) + redfish_default_args.update({"destination": "https://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + + json_data = { + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": ["MetricReport"], + "SubscriptionType": "RedfishEvent" + } + mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None) + create_subscription_response_mock = redfish_response_mock + create_subscription_response_mock.json_data = json_data + mocker.patch(MODULE_PATH + 'redfish_event_subscription.create_subscription', + return_value=create_subscription_response_mock) + f_module = self.get_module_mock() + redfish_response_mock.success = True + result = self._run_module(redfish_default_args) + print(result) + assert result["msg"] == SUBSCRIPTION_ADDED + + def test_module_present_does_not_exist_error(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "present"}) + redfish_default_args.update({"destination": "https://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + + json_data = { + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": ["MetricReport"], + "SubscriptionType": "RedfishEvent" + } + mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None) + create_subscription_response_mock = redfish_response_mock + create_subscription_response_mock.json_data = json_data + mocker.patch(MODULE_PATH + 'redfish_event_subscription.create_subscription', + return_value=create_subscription_response_mock) + redfish_response_mock.success = False + with pytest.raises(Exception) as err: + self._run_module(redfish_default_args) + assert err.value.args[0]['msg'] == SUBSCRIPTION_UNABLE_ADD + + def test_module_present_does_not_exist_error_wrong_input(self, mocker, redfish_connection_mock, + redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "present"}) + redfish_default_args.update({"destination": "https://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "Metricreport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + + json_data = { + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": ["MetricReport"], + "SubscriptionType": "RedfishEvent" + } + mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=None) + create_subscription_response_mock = redfish_response_mock + create_subscription_response_mock.json_data = json_data + mocker.patch(MODULE_PATH + 'redfish_event_subscription.create_subscription', + return_value=create_subscription_response_mock) + f_module = self.get_module_mock() + redfish_response_mock.success = True + with pytest.raises(Exception) as err: + self._run_module(redfish_default_args) + print(err) + assert err.value.args[0]['msg'] == EVENT_TYPE_INVALID + + def test_module_present_does_exist(self, mocker, redfish_connection_mock, redfish_response_mock, + redfish_default_args): + redfish_default_args.update({"state": "present"}) + redfish_default_args.update({"destination": "https://192.168.1.100:8188"}) + redfish_default_args.update({"event_type": "MetricReport"}) + redfish_default_args.update({"event_format_type": "MetricReport"}) + + json_data = { + "Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80", + "Destination": "https://192.168.1.100:8188", + "EventFormatType": "MetricReport", + "Context": "RedfishEvent", + "Protocol": "Redfish", + "EventTypes": ["MetricReport"], + "SubscriptionType": "RedfishEvent" + } + mocker.patch(MODULE_PATH + 'redfish_event_subscription.get_subscription', return_value=json_data) + redfish_response_mock.success = True + result = self._run_module(redfish_default_args) + assert result["msg"] == SUBSCRIPTION_EXISTS diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py new file mode 100644 index 00000000..dac24df4 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.5.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +import sys + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import redfish_firmware +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from mock import MagicMock +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text +from mock import patch, mock_open + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' +JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}" + + +@pytest.fixture +def redfish_firmware_connection_mock(mocker, redfish_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'redfish_firmware.Redfish') + redfish_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + redfish_connection_mock_obj.invoke_request.return_value = redfish_response_mock + return redfish_connection_mock_obj + + +class TestRedfishFirmware(FakeAnsibleModule): + module = redfish_firmware + + @pytest.fixture + def os_mock(self, mocker): + try: + fi_mock = mocker.patch( + MODULE_PATH + 'redfish_firmware.payload_file.get("file")') + except AttributeError: + fi_mock = MagicMock() + obj = MagicMock() + fi_mock.read.return_value = obj + return fi_mock + + update_status = { + "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob", + "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_824742691385", + "@odata.type": "#DellJob.v1_0_2.DellJob", + "CompletionTime": "2020-02-23T21:51:30", + "Description": "Job Instance", + "EndTime": None, + "Id": "JID_824742691385", + "JobState": "Completed", + "JobType": "RepositoryUpdate", + "Message": "Job completed successfully.", + "MessageArgs": [ + "NA" + ], + "MessageArgs@odata.count": 1, + "MessageId": "RED001", + "Name": "Repository Update", + "PercentComplete": 100, + "StartTime": "TIME_NOW", + "Status": "Success", + "TargetSettingsURI": None, + "job_details": { + "Data": { + "StatusCode": 200, + "body": { + "@Message.ExtendedInfo": [ + { + "Message": "Successfully Completed Request", + "MessageArgs": [], + "MessageArgs@odata.count": 0, + "MessageId": "Base.1.5.Success", + "RelatedProperties": [], + "RelatedProperties@odata.count": 0, + "Resolution": "None", + "Severity": "OK" + } + ], + "PackageList": [ + { + "BaseLocation": None, + "ComponentID": "18981", + "ComponentType": "APAC", + "Criticality": "3", + "DisplayName": "Dell OS Driver Pack", + "JobID": "JID_824746139010", + "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE", + "PackageVersion": "19.10.12", + "RebootType": "NONE", + "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1" + }] + + } + } + } + } + + def test_main_redfish_firmware_success_case(self, redfish_firmware_connection_mock, redfish_default_args, mocker, + redfish_response_mock): + redfish_default_args.update({"image_uri": "/home/firmware_repo/component.exe"}) + redfish_firmware_connection_mock.headers.get("Location").return_value = "https://multipart/form-data" + redfish_firmware_connection_mock.headers.get("Location").split().return_value = "multipart/form-data" + mocker.patch(MODULE_PATH + 'redfish_firmware.firmware_update', + return_value=redfish_response_mock) + redfish_response_mock.json_data = {"image_uri": "http://home/firmware_repo/component.exe"} + redfish_response_mock.status_code = 201 + redfish_response_mock.success = True + result = self._run_module(redfish_default_args) + assert result == {'changed': True, + 'msg': 'Successfully submitted the firmware update task.', + 'task': {'id': redfish_response_mock.headers.get().split().__getitem__(), + 'uri': JOB_URI.format(job_id=redfish_response_mock.headers.get().split().__getitem__())}} + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_main_redfish_firmware_exception_handling_case(self, exc_type, mocker, redfish_default_args, + redfish_firmware_connection_mock, + redfish_response_mock): + redfish_default_args.update({"image_uri": "/home/firmware_repo/component.exe"}) + redfish_response_mock.json_data = {"value": [{"image_uri": "/home/firmware_repo/component.exe"}]} + redfish_response_mock.status_code = 400 + redfish_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'redfish_firmware.firmware_update', + side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + 'redfish_firmware.firmware_update', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(redfish_default_args) + assert 'task' not in result + assert 'msg' in result + assert result['failed'] is True + if exc_type == HTTPError: + assert 'error_info' in result + + def test_get_update_service_target_success_case(self, redfish_default_args, redfish_firmware_connection_mock, + redfish_response_mock): + redfish_default_args.update({"transfer_protocol": "HTTP"}) + f_module = self.get_module_mock(params=redfish_default_args) + redfish_response_mock.status_code = 200 + redfish_response_mock.success = True + redfish_response_mock.json_data = { + "Actions": { + "#UpdateService.SimpleUpdate": { + "TransferProtocol@Redfish.AllowableValues": ["HTTP"], + "target": "" + } + }, + "transfer_protocol": "HTTP", + "HttpPushUri": "http://dell.com", + "FirmwareInventory": { + "@odata.id": "2134" + } + } + result = self.module._get_update_service_target(redfish_firmware_connection_mock, f_module) + assert result == ('2134', 'http://dell.com', '') + + def test_get_update_service_target_uri_none_case(self, redfish_default_args, redfish_firmware_connection_mock, + redfish_response_mock): + redfish_default_args.update({"transfer_protocol": "HTTP"}) + f_module = self.get_module_mock(params=redfish_default_args) + redfish_response_mock.status_code = 200 + redfish_response_mock.success = True + redfish_response_mock.json_data = { + "Actions": { + "#UpdateService.SimpleUpdate": { + "TransferProtocol@Redfish.AllowableValues": ["HTTP"], + "target": None + } + }, + "transfer_protocol": "HTTP", + "HttpPushUri": None, + "FirmwareInventory": { + "@odata.id": None + } + } + with pytest.raises(Exception) as ex: + self.module._get_update_service_target(redfish_firmware_connection_mock, f_module) + assert ex.value.args[0] == "Target firmware version does not support redfish firmware update." + + def test_get_update_service_target_failed_case(self, redfish_default_args, redfish_firmware_connection_mock, + redfish_response_mock): + redfish_default_args.update({"transfer_protocol": "HTTP"}) + f_module = self.get_module_mock(params=redfish_default_args) + redfish_response_mock.status_code = 200 + redfish_response_mock.success = True + redfish_response_mock.json_data = { + "Actions": { + "#UpdateService.SimpleUpdate": { + "TransferProtocol@Redfish.AllowableValues": [""] + } + }, + "transfer_protocol": "HTTP", + "HttpPushUri": "http://dell.com", + "FirmwareInventory": { + "@odata.id": "2134" + } + } + with pytest.raises(Exception) as ex: + self.module._get_update_service_target(redfish_firmware_connection_mock, f_module) + assert ex.value.args[0] == "Target firmware version does not support {0} protocol.".format("HTTP") + + def test_firmware_update_success_case01(self, redfish_default_args, redfish_firmware_connection_mock, + redfish_response_mock, mocker): + mocker.patch(MODULE_PATH + 'redfish_firmware._get_update_service_target', + return_value=('2134', 'http://dell.com', 'redfish')) + redfish_default_args.update({"image_uri": "http://home/firmware_repo/component.exe", + "transfer_protocol": "HTTP"}) + f_module = self.get_module_mock(params=redfish_default_args) + redfish_response_mock.status_code = 200 + redfish_response_mock.success = True + redfish_response_mock.json_data = {"image_uri": "http://home/firmware_repo/component.exe", + "transfer_protocol": "HTTP"} + result = self.module.firmware_update(redfish_firmware_connection_mock, f_module) + assert result == redfish_response_mock + + def test_firmware_update_success_case02(self, redfish_default_args, redfish_firmware_connection_mock, + redfish_response_mock, mocker): + mocker.patch(MODULE_PATH + "redfish_firmware._get_update_service_target", + return_value=('2134', 'nhttp://dell.com', 'multipart/form-data')) + mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.redfish_firmware._encode_form_data", + return_value=({"file": (3, "nhttp://dell.com", "multipart/form-data")}, "multipart/form-data")) + redfish_default_args.update({"image_uri": "nhttp://home/firmware_repo/component.exe", + "transfer_protocol": "HTTP"}) + f_module = self.get_module_mock(params=redfish_default_args) + redfish_response_mock.status_code = 200 + redfish_response_mock.success = True + redfish_response_mock.json_data = {"image_uri": "nhttp://home/firmware_repo/component.exe", + "transfer_protocol": "HTTP"} + if sys.version_info.major == 3: + builtin_module_name = 'builtins' + else: + builtin_module_name = '__builtin__' + with patch("{0}.open".format(builtin_module_name), mock_open(read_data="data")) as mock_file: + result = self.module.firmware_update(redfish_firmware_connection_mock, f_module) + assert result == redfish_response_mock + + def test_firmware_update_success_case03(self, redfish_default_args, redfish_firmware_connection_mock, + redfish_response_mock, mocker): + mocker.patch(MODULE_PATH + "redfish_firmware._get_update_service_target", + return_value=('2134', 'nhttp://dell.com', 'multipart/form-data')) + mocker.patch(MODULE_PATH + "redfish_firmware._encode_form_data", + return_value=({"file": (3, "nhttp://dell.com", "multipart/form-data")}, "multipart/form-data")) + redfish_default_args.update({"image_uri": "nhttp://home/firmware_repo/component.exe", + "transfer_protocol": "HTTP"}) + f_module = self.get_module_mock(params=redfish_default_args) + redfish_response_mock.status_code = 201 + redfish_response_mock.success = True + redfish_response_mock.json_data = {"image_uri": "nhttp://home/firmware_repo/component.exe", + "transfer_protocol": "HTTP"} + if sys.version_info.major == 3: + builtin_module_name = 'builtins' + else: + builtin_module_name = '__builtin__' + with patch("{0}.open".format(builtin_module_name), mock_open(read_data="data")) as mock_file: + result = self.module.firmware_update(redfish_firmware_connection_mock, f_module) + assert result == redfish_response_mock diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py new file mode 100644 index 00000000..1477015a --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py @@ -0,0 +1,475 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 2.1.3 +# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import redfish_powerstate +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text + +tarrget_error_msg = "The target device does not support the system reset" \ + " feature using Redfish API." +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def redfish_connection_mock_for_powerstate(mocker, redfish_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'redfish_powerstate.Redfish') + redfish_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + redfish_connection_mock_obj.invoke_request.return_value = redfish_response_mock + return redfish_connection_mock_obj + + +class TestRedfishPowerstate(FakeAnsibleModule): + module = redfish_powerstate + + def test_fetch_powerstate_resource_success_case_01(self, redfish_connection_mock_for_powerstate, + redfish_response_mock): + """dynamically fetch the computer system id if one member exists in system""" + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "Systems": { + "@odata.id": "/redfish/v1/Systems" + }, + "Members": [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1" + } + ], + "Actions": { + "#ComputerSystem.Reset": { + "target": "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset", + "ResetType@Redfish.AllowableValues": [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + } + }, + "PowerState": "On" + } + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + assert self.module.powerstate_map["allowable_enums"] == [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + assert self.module.powerstate_map['power_uri'] == '/redfish/v1/Systems/System.Embedded.1/Actions' \ + '/ComputerSystem.Reset' + assert self.module.powerstate_map['current_state'] == 'On' + + def test_fetch_powerstate_resource_resource_id_given_success_case(self, + redfish_connection_mock_for_powerstate, + redfish_response_mock): + """case when system id is explicitly provided""" + f_module = self.get_module_mock(params={"resource_id": "System.Embedded.2"}) + redfish_response_mock.json_data = { + "Systems": { + "@odata.id": "/redfish/v1/Systems" + }, + "Members": [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1" + }, + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.2" + } + ], + "Actions": { + "#ComputerSystem.Reset": { + "target": "/redfish/v1/Systems/System.Embedded.2/Actions/ComputerSystem.Reset", + "ResetType@Redfish.AllowableValues": [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + } + }, + "PowerState": "On" + } + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + assert self.module.powerstate_map["allowable_enums"] == [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + assert self.module.powerstate_map['power_uri'] == '/redfish/v1/Systems/System.Embedded.2/Actions' \ + '/ComputerSystem.Reset' + assert self.module.powerstate_map['current_state'] == 'On' + + def test_fetch_powerstate_resource_resource_id_not_given_failure_case(self, + redfish_connection_mock_for_powerstate, + redfish_response_mock): + """case when system id not provided but multipble resource exists""" + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "Systems": { + "@odata.id": "/redfish/v1/Systems" + }, + "Members": [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1" + }, + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.2" + } + ], + "Actions": { + "#ComputerSystem.Reset": { + "target": "/redfish/v1/Systems/System.Embedded.2/Actions/ComputerSystem.Reset", + "ResetType@Redfish.AllowableValues": [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + } + }, + "PowerState": "On" + } + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + with pytest.raises(Exception) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + assert exc.value.args[0] == "Multiple devices exists in the system, but option 'resource_id' is not specified." + + def test_fetch_powerstate_resource_resource_id_invalid_failure_case(self, + redfish_connection_mock_for_powerstate, + redfish_response_mock): + """failure case when system id is explicitly provided but which is not valid""" + f_module = self.get_module_mock(params={"resource_id": "System.Embedded.3"}) + redfish_response_mock.json_data = { + "Systems": { + "@odata.id": "/redfish/v1/Systems" + }, + "Members": + [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1" + }, + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.2" + } + ], + "Actions": { + "#ComputerSystem.Reset": { + "target": "/redfish/v1/Systems/System.Embedded.2/Actions/ComputerSystem.Reset", + "ResetType@Redfish.AllowableValues": [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + } + }, + "PowerState": "On" + } + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + with pytest.raises(Exception) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + assert exc.value.args[0] == "Invalid device Id 'System.Embedded.3' is provided" + + def test_fetch_powerstate_resource_error_case_01(self, redfish_connection_mock_for_powerstate, + redfish_response_mock): + """failure case when system does not supports redfish computer system in schema""" + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "@odata.id": "/redfish/v1/Systems", + "Members": [ + ], + } + + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + with pytest.raises(Exception) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + assert exc.value.args[0] == tarrget_error_msg + + def test_fetch_powerstate_resource_error_case_02(self, redfish_connection_mock_for_powerstate, + redfish_response_mock): + """failuere case when system does not supports redfish computer system action in schema""" + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "Systems": { + "@odata.id": "/redfish/v1/Systems" + }, + "Members": [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1" + } + ], + "Actions": { + + }} + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + with pytest.raises(Exception) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + assert exc.value.args[0] == tarrget_error_msg + + def test_fetch_powerstate_resource_error_case_03(self, redfish_connection_mock_for_powerstate, + redfish_response_mock): + """failuere case when system does not supports and throws http error not found""" + f_module = self.get_module_mock() + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + redfish_connection_mock_for_powerstate.invoke_request.side_effect = HTTPError('http://testhost.com', 404, + json.dumps(tarrget_error_msg), {}, + None) + with pytest.raises(Exception) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + + def test_fetch_powerstate_resource_error_case_04(self, redfish_connection_mock_for_powerstate, + redfish_response_mock): + """failure case when system does not supports and throws http error 400 bad request""" + f_module = self.get_module_mock() + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + redfish_connection_mock_for_powerstate.invoke_request.side_effect = HTTPError('http://testhost.com', 400, + tarrget_error_msg, + {}, None) + with pytest.raises(Exception, match=tarrget_error_msg) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + + def test_fetch_powerstate_resource_error_case_05(self, redfish_connection_mock_for_powerstate, + redfish_response_mock): + f_module = self.get_module_mock() + msg = "connection error" + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + redfish_connection_mock_for_powerstate.invoke_request.side_effect = URLError(msg) + with pytest.raises(Exception, match=msg) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + + def test_fetch_powerstate_resource_error_case_06(self, redfish_connection_mock_for_powerstate, + redfish_response_mock): + """when both system id and mebers of id not provided""" + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "Systems": { + "@odata.id": "/redfish/v1/Systems" + }, + "Members": [ + ], + "Actions": { + + }} + redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/" + with pytest.raises(Exception) as exc: + self.module.fetch_power_uri_resource(f_module, redfish_connection_mock_for_powerstate) + assert exc.value.args[0] == tarrget_error_msg + + power_vals = [{"apply": "On", "current": "On", "result": False}, + {"apply": "On", "current": "PoweringOn", "result": False}, + {"apply": "On", "current": "Off", "result": True}, + {"apply": "On", "current": "PoweringOff", "result": True}, + {"apply": "ForceOn", "current": "On", "result": False}, + {"apply": "ForceOn", "current": "PoweringOn", "result": False}, + {"apply": "ForceOn", "current": "Off", "result": True}, + {"apply": "ForceOn", "current": "PoweringOff", "result": True}, + {"apply": "PushPowerButton", "current": "On", "result": True}, + {"apply": "PushPowerButton", "current": "PoweringOn", "result": True}, + {"apply": "PushPowerButton", "current": "Off", "result": True}, + {"apply": "PushPowerButton", "current": "PoweringOff", "result": True}, + {"apply": "ForceOff", "current": "On", "result": True}, + {"apply": "ForceOff", "current": "PoweringOn", "result": True}, + {"apply": "ForceOff", "current": "Off", "result": False}, + {"apply": "ForceOff", "current": "PoweringOff", "result": False}, + {"apply": "ForceRestart", "current": "On", "result": True}, + {"apply": "ForceRestart", "current": "PoweringOn", "result": True}, + {"apply": "ForceRestart", "current": "Off", "result": False}, + {"apply": "ForceRestart", "current": "PoweringOff", "result": False}, + {"apply": "GracefulRestart", "current": "On", "result": True}, + {"apply": "GracefulRestart", "current": "PoweringOn", "result": True}, + {"apply": "GracefulRestart", "current": "Off", "result": False}, + {"apply": "GracefulRestart", "current": "PoweringOff", "result": False}, + {"apply": "GracefulShutdown", "current": "On", "result": True}, + {"apply": "GracefulShutdown", "current": "PoweringOn", "result": True}, + {"apply": "GracefulShutdown", "current": "Off", "result": False}, + {"apply": "GracefulShutdown", "current": "PoweringOff", "result": False}, + {"apply": "Nmi", "current": "On", "result": True}, + {"apply": "Nmi", "current": "PoweringOn", "result": True}, + {"apply": "Nmi", "current": "Off", "result": False}, + {"apply": "Nmi", "current": "PoweringOff", "result": False}, + {"apply": "PowerCycle", "current": "On", "result": True}, + {"apply": "PowerCycle", "current": "PoweringOn", "result": True}, + {"apply": "PowerCycle", "current": "Off", "result": False}, + {"apply": "PowerCycle", "current": "PoweringOff", "result": False}, + + ] + + @pytest.mark.parametrize("power_map", power_vals) + def test_is_change_applicable_for_power_state(self, power_map): + apply_state = power_map["apply"] + current_state = power_map["current"] + result = power_map["result"] + res = self.module.is_change_applicable_for_power_state(current_state, apply_state) + assert res is result + + def test_is_change_applicable_for_power_state_case_02(self): + apply_state = "xyz" + current_state = "On" + result = False + res = self.module.is_change_applicable_for_power_state(current_state, apply_state) + assert res is result + + def test_is_valid_reset_type(self): + f_module = self.get_module_mock() + reset_type = "GracefulRestart" + allowable_enum = [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + error_msg = "The target device does not support a" \ + " graceful restart operation.The acceptable values for device reset types" \ + " are {0}.".format(", ".join(allowable_enum)) + with pytest.raises(Exception) as exc: + self.module.is_valid_reset_type(reset_type, allowable_enum, f_module) + assert exc.value.args[0] == error_msg + + def test_is_valid_reset_type_case2(self): + f_module = self.get_module_mock() + reset_type = "ForceOff" + allowable_enum = [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ] + self.module.is_valid_reset_type(reset_type, allowable_enum, f_module) + + @pytest.mark.parametrize("val", [{"change_applicable": True, "check_mode_msg": "Changes found to be applied."}, + {"change_applicable": False, "check_mode_msg": "No Changes found to be applied."}]) + def test_run_change_power_state_case_with_checkmode(self, mocker, val): + change_applicable = val["change_applicable"] + message = val["check_mode_msg"] + f_module = self.get_module_mock(params={"reset_type": "On"}, check_mode=True) + self.module.powerstate_map.update({"allowable_enums": [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ]}) + self.module.powerstate_map.update({'power_uri': '/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem' + '.Reset'}) + self.module.powerstate_map.update({'current_state': 'On'}) + + mocker.patch(MODULE_PATH + 'redfish_powerstate.fetch_power_uri_resource', + return_value=None) + mocker.patch(MODULE_PATH + 'redfish_powerstate.is_valid_reset_type', + return_value=None) + mocker.patch(MODULE_PATH + 'redfish_powerstate.is_change_applicable_for_power_state', + return_value=change_applicable) + + with pytest.raises(Exception, match=message): + self.module.run_change_power_state(redfish_connection_mock_for_powerstate, f_module) + + @pytest.mark.parametrize("val", [{"change_applicable": True, "status_code": 204}, + {"change_applicable": False, "status_code": 200}, + {"change_applicable": True, "status_code": 200}]) + def test_run_change_power_state_case_without_checkmode(self, mocker, val, redfish_connection_mock_for_powerstate, + redfish_response_mock): + redfish_response_mock.status_code = val["status_code"] + change_applicable = val["change_applicable"] + f_module = self.get_module_mock(params={"reset_type": "On"}) + self.module.powerstate_map.update({"allowable_enums": [ + "On", + "ForceOff", + "ForceRestart", + "GracefulShutdown", + "PushPowerButton", + "Nmi", + "PowerCycle" + ]}) + self.module.powerstate_map.update({'power_uri': '/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem' + '.Reset'}) + self.module.powerstate_map.update({'current_state': 'On'}) + if change_applicable is True: + if val["status_code"] == 204: + redfish_response_mock.success = True + message = "Successfully performed the reset type operation 'On'." + else: + redfish_response_mock.success = False + message = "Unable to perform the reset type operation 'On'." + else: + message = "The device is already powered on." + mocker.patch(MODULE_PATH + 'redfish_powerstate.fetch_power_uri_resource', + return_value=None) + mocker.patch(MODULE_PATH + 'redfish_powerstate.is_valid_reset_type', + return_value=None) + mocker.patch(MODULE_PATH + 'redfish_powerstate.is_change_applicable_for_power_state', + return_value=change_applicable) + + with pytest.raises(Exception, match=message): + self.module.run_change_power_state(redfish_connection_mock_for_powerstate, f_module) + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError, + Exception]) + def test_main_redfish_powerstate_exception_handling_case(self, exc_type, redfish_default_args, + redfish_connection_mock_for_powerstate, + redfish_response_mock, mocker): + redfish_default_args.update({"reset_type": "On"}) + redfish_response_mock.status_code = 400 + redfish_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + if exc_type == URLError: + mocker.patch(MODULE_PATH + 'redfish_powerstate.run_change_power_state', + side_effect=exc_type("url open error")) + result = self._run_module(redfish_default_args) + assert result["unreachable"] is True + elif exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'redfish_powerstate.run_change_power_state', + side_effect=exc_type("exception message")) + result = self._run_module_with_fail_json(redfish_default_args) + assert result['failed'] is True + else: + mocker.patch(MODULE_PATH + 'redfish_powerstate.run_change_power_state', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(redfish_default_args) + assert result['failed'] is True + assert 'msg' in result diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py new file mode 100644 index 00000000..55fb3535 --- /dev/null +++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py @@ -0,0 +1,610 @@ +# -*- coding: utf-8 -*- + +# +# Dell EMC OpenManage Ansible Modules +# Version 5.3.0 +# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved. + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import json +from ansible_collections.dellemc.openmanage.plugins.modules import redfish_storage_volume +from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants +from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError +from ansible.module_utils.urls import ConnectionError, SSLValidationError +from io import StringIO +from ansible.module_utils._text import to_text + +MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.' + + +@pytest.fixture +def redfish_connection_mock_for_storage_volume(mocker, redfish_response_mock): + connection_class_mock = mocker.patch(MODULE_PATH + 'redfish_storage_volume.Redfish') + redfish_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value + redfish_connection_mock_obj.invoke_request.return_value = redfish_response_mock + return redfish_connection_mock_obj + + +class TestStorageVolume(FakeAnsibleModule): + module = redfish_storage_volume + + @pytest.fixture + def storage_volume_base_uri(self): + self.module.storage_collection_map.update({"storage_base_uri": "/redfish/v1/Systems/System.Embedded.1/Storage"}) + + arg_list1 = [{"state": "present"}, {"state": "present", "volume_id": "volume_id"}, + {"state": "absent", "volume_id": "volume_id"}, + {"command": "initialize", "volume_id": "volume_id"}, + {"state": "present", "volume_type": "NonRedundant", + "name": "name", "controller_id": "controller_id", + "drives": ["drive1"], + "block_size_bytes": 123, + "capacity_bytes": "1234567", + "optimum_io_size_bytes": "1024", + "encryption_types": "NativeDriveEncryption", + "encrypted": False, + "volume_id": "volume_id", "oem": {"Dell": "DellAttributes"}, + "initialize_type": "Slow" + }] + + @pytest.mark.parametrize("param", arg_list1) + def test_redfish_storage_volume_main_success_case_01(self, mocker, redfish_default_args, module_mock, + redfish_connection_mock_for_storage_volume, param): + mocker.patch(MODULE_PATH + 'redfish_storage_volume.validate_inputs') + mocker.patch(MODULE_PATH + 'redfish_storage_volume.fetch_storage_resource') + mocker.patch(MODULE_PATH + 'redfish_storage_volume.configure_raid_operation', + return_value={"msg": "Successfully submitted volume task.", + "task_uri": "task_uri", + "task_id": 1234}) + redfish_default_args.update(param) + result = self._run_module(redfish_default_args) + assert result["changed"] is True + assert result['msg'] == "Successfully submitted volume task." + assert result["task"]["id"] == 1234 + assert result["task"]["uri"] == "task_uri" + + arg_list2 = [ + {"state": "absent"}, + {"command": "initialize"}, {}] + + @pytest.mark.parametrize("param", arg_list2) + def test_redfish_storage_volume_main_failure_case_01(self, param, redfish_default_args, module_mock): + """required parameter is not passed along with specified report_type""" + redfish_default_args.update(param) + result = self._run_module_with_fail_json(redfish_default_args) + assert 'msg' in result + assert "task" not in result + assert result['failed'] is True + + @pytest.mark.parametrize("exc_type", + [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError]) + def test_redfish_storage_volume_main_exception_handling_case(self, exc_type, mocker, redfish_default_args, + redfish_connection_mock_for_storage_volume, + redfish_response_mock): + redfish_default_args.update({"state": "present"}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.validate_inputs') + redfish_response_mock.status_code = 400 + redfish_response_mock.success = False + json_str = to_text(json.dumps({"data": "out"})) + + if exc_type not in [HTTPError, SSLValidationError]: + mocker.patch(MODULE_PATH + 'redfish_storage_volume.configure_raid_operation', + side_effect=exc_type('test')) + else: + mocker.patch(MODULE_PATH + 'redfish_storage_volume.configure_raid_operation', + side_effect=exc_type('http://testhost.com', 400, 'http error message', + {"accept-type": "application/json"}, StringIO(json_str))) + result = self._run_module_with_fail_json(redfish_default_args) + assert 'task' not in result + assert 'msg' in result + assert result['failed'] is True + if exc_type == HTTPError: + assert 'error_info' in result + + msg1 = "Either state or command should be provided to further actions." + msg2 = "When state is present, either controller_id or volume_id must be specified to perform further actions." + + @pytest.mark.parametrize("input", + [{"param": {"xyz": 123}, "msg": msg1}, {"param": {"state": "present"}, "msg": msg2}]) + def test_validate_inputs_error_case_01(self, input): + f_module = self.get_module_mock(params=input["param"]) + with pytest.raises(Exception) as exc: + self.module.validate_inputs(f_module) + assert exc.value.args[0] == input["msg"] + + def test_get_success_message_case_01(self): + action = "create" + message = self.module.get_success_message(action, "JobService/Jobs/JID_1234") + assert message["msg"] == "Successfully submitted {0} volume task.".format(action) + assert message["task_uri"] == "JobService/Jobs/JID_1234" + assert message["task_id"] == "JID_1234" + + def test_get_success_message_case_02(self): + action = "create" + message = self.module.get_success_message(action, None) + assert message["msg"] == "Successfully submitted {0} volume task.".format(action) + + @pytest.mark.parametrize("input", [{"state": "present"}, {"state": "absent"}, {"command": "initialize"}]) + def test_configure_raid_operation(self, input, redfish_connection_mock_for_storage_volume, mocker): + f_module = self.get_module_mock(params=input) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_volume_create_modify', + return_value={"msg": "Successfully submitted create volume task.", + "task_uri": "JobService/Jobs", + "task_id": "JID_123"}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_volume_deletion', + return_value={"msg": "Successfully submitted delete volume task.", + "task_uri": "JobService/Jobs", + "task_id": "JID_456"}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_volume_initialization', + return_value={"msg": "Successfully submitted initialize volume task.", + "task_uri": "JobService/Jobs", + "task_id": "JID_789"}) + message = self.module.configure_raid_operation(f_module, redfish_connection_mock_for_storage_volume) + val = list(input.values()) + if val[0] == "present": + assert message["msg"] == "Successfully submitted create volume task." + assert message["task_id"] == "JID_123" + if val[0] == "absent": + assert message["msg"] == "Successfully submitted delete volume task." + assert message["task_id"] == "JID_456" + if val[0] == "initialize": + assert message["msg"] == "Successfully submitted initialize volume task." + assert message["task_id"] == "JID_789" + + def test_perform_volume_initialization_success_case_01(self, mocker, redfish_connection_mock_for_storage_volume, + storage_volume_base_uri): + message = {"msg": "Successfully submitted initialize volume task.", "task_uri": "JobService/Jobs", + "task_id": "JID_789"} + f_module = self.get_module_mock(params={"initialize_type": "Fast", "volume_id": "volume_id"}) + obj1 = mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_initialization_progress', return_value=[]) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action', return_value=message) + message = self.module.perform_volume_initialization(f_module, redfish_connection_mock_for_storage_volume) + assert message["msg"] == "Successfully submitted initialize volume task." + assert message["task_id"] == "JID_789" + + @pytest.mark.parametrize("operations", [[{"OperationName": "initialize", "PercentageComplete": 70}], + [{"OperationName": "initialize"}]]) + def test_perform_volume_initialization_failure_case_01(self, mocker, operations, + redfish_connection_mock_for_storage_volume): + f_module = self.get_module_mock(params={"volume_id": "volume_id"}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_initialization_progress', return_value=operations) + percentage_complete = operations[0].get("PercentageComplete") + with pytest.raises(Exception) as exc: + self.module.perform_volume_initialization(f_module, redfish_connection_mock_for_storage_volume) + if percentage_complete: + assert exc.value.args[0] == "Cannot perform the configuration operation because the configuration" \ + " job 'initialize' in progress is at '70' percentage." + else: + assert exc.value.args[0] == "Cannot perform the configuration operations because a" \ + " configuration job for the device already exists." + + def test_perform_volume_initialization_failure_case_02(self, mocker, redfish_connection_mock_for_storage_volume): + f_module = self.get_module_mock(params={}) + with pytest.raises(Exception) as exc: + self.module.perform_volume_initialization(f_module, redfish_connection_mock_for_storage_volume) + assert exc.value.args[0] == "'volume_id' option is a required property for initializing a volume." + + def test_perform_volume_deletion_success_case_01(self, mocker, redfish_connection_mock_for_storage_volume, + redfish_response_mock, storage_volume_base_uri): + redfish_response_mock.success = True + f_module = self.get_module_mock(params={"volume_id": "volume_id"}) + message = {"msg": "Successfully submitted delete volume task.", "task_uri": "JobService/Jobs", + "task_id": "JID_456"} + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action', + return_value=redfish_response_mock) + self.module.perform_volume_deletion(f_module, redfish_connection_mock_for_storage_volume) + assert message["msg"] == "Successfully submitted delete volume task." + assert message["task_id"] == "JID_456" + + def testperform_volume_deletion_failure_case_01(self, mocker, redfish_connection_mock_for_storage_volume): + f_module = self.get_module_mock(params={}) + with pytest.raises(Exception) as exc: + self.module.perform_volume_deletion(f_module, redfish_connection_mock_for_storage_volume) + assert exc.value.args[0] == "'volume_id' option is a required property for deleting a volume." + + def test_perform_volume_create_modify_success_case_01(self, mocker, storage_volume_base_uri, + redfish_connection_mock_for_storage_volume): + f_module = self.get_module_mock(params={"volume_id": "volume_id", "controller_id": "controller_id"}) + message = {"msg": "Successfully submitted create volume task.", "task_uri": "JobService/Jobs", + "task_id": "JID_123"} + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_controller_id_exists', return_value=True) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.volume_payload', return_value={"payload": "value"}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action', return_value=message) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_mode_validation', return_value=None) + message = self.module.perform_volume_create_modify(f_module, redfish_connection_mock_for_storage_volume) + assert message["msg"] == "Successfully submitted create volume task." + assert message["task_id"] == "JID_123" + + def test_perform_volume_create_modify_success_case_02(self, mocker, storage_volume_base_uri, + redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock(params={"volume_id": "volume_id"}) + message = {"msg": "Successfully submitted modify volume task.", "task_uri": "JobService/Jobs", + "task_id": "JID_123"} + redfish_response_mock.success = True + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.volume_payload', return_value={"payload": "value"}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action', return_value=message) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_mode_validation', return_value=None) + message = self.module.perform_volume_create_modify(f_module, redfish_connection_mock_for_storage_volume) + assert message["msg"] == "Successfully submitted modify volume task." + assert message["task_id"] == "JID_123" + + def test_perform_volume_create_modify_failure_case_01(self, mocker, storage_volume_base_uri, + redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock(params={"volume_id": "volume_id"}) + message = {"msg": "Successfully submitted modify volume task.", "task_uri": "JobService/Jobs", + "task_id": "JID_123"} + redfish_response_mock.success = True + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.volume_payload', return_value={}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action', return_value=message) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_mode_validation', return_value=None) + with pytest.raises(Exception) as exc: + self.module.perform_volume_create_modify(f_module, redfish_connection_mock_for_storage_volume) + assert exc.value.args[0] == "Input options are not provided for the modify volume task." + + def test_perform_storage_volume_action_success_case(self, mocker, redfish_response_mock, + redfish_connection_mock_for_storage_volume): + redfish_response_mock.headers.update({"Location": "JobService/Jobs/JID_123"}) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.get_success_message', return_value="message") + msg = self.module.perform_storage_volume_action("POST", "uri", redfish_connection_mock_for_storage_volume, + "create", payload={"payload": "value"}) + assert msg == "message" + + def test_perform_storage_volume_action_exception_case(self, redfish_response_mock, + redfish_connection_mock_for_storage_volume): + redfish_response_mock.headers.update({"Location": "JobService/Jobs/JID_123"}) + redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 400, + '', {}, None) + with pytest.raises(HTTPError) as ex: + self.module.perform_storage_volume_action("POST", "uri", redfish_connection_mock_for_storage_volume, + "create", payload={"payload": "value"}) + + def test_check_initialization_progress_case_01(self, mocker, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + redfish_response_mock.success = False + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock) + opeartion_data = self.module.check_initialization_progress(f_module, redfish_connection_mock_for_storage_volume, + "volume_id") + assert opeartion_data == [] + + def test_check_initialization_progress_case_02(self, mocker, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + redfish_response_mock.success = True + redfish_response_mock.json_data = {"Operations": "operation_value"} + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock) + opeartion_data = self.module.check_initialization_progress(f_module, redfish_connection_mock_for_storage_volume, + "volume_id") + assert opeartion_data == "operation_value" + + def test_check_volume_id_exists(self, mocker, redfish_connection_mock_for_storage_volume, storage_volume_base_uri, + redfish_response_mock): + f_module = self.get_module_mock() + redfish_response_mock.status_code = 200 + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_specified_identifier_exists_in_the_system', + return_value=redfish_response_mock) + resp = self.module.check_volume_id_exists(f_module, redfish_connection_mock_for_storage_volume, "volume_id") + assert resp.status_code == 200 + + def test_check_controller_id_exists_success_case_01(self, mocker, redfish_connection_mock_for_storage_volume, + storage_volume_base_uri, + redfish_response_mock): + f_module = self.get_module_mock(params={"controller_id": "controller_id"}) + redfish_response_mock.success = True + redfish_response_mock.json_data = {"Drives": "drive1"} + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_specified_identifier_exists_in_the_system', + return_value=redfish_response_mock) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_physical_disk_exists', + return_value=True) + output = self.module.check_controller_id_exists(f_module, redfish_connection_mock_for_storage_volume) + assert output is True + + def test_check_controller_id_exists_failure_case_01(self, mocker, redfish_connection_mock_for_storage_volume, + storage_volume_base_uri, + redfish_response_mock): + f_module = self.get_module_mock(params={"controller_id": "1234"}) + redfish_response_mock.success = False + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_specified_identifier_exists_in_the_system', + return_value=redfish_response_mock) + mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_physical_disk_exists', + return_value=True) + with pytest.raises(Exception) as exc: + self.module.check_controller_id_exists(f_module, redfish_connection_mock_for_storage_volume) + assert exc.value.args[0] == "Failed to retrieve the details of the specified Controller Id 1234." + + def test_check_specified_identifier_exists_in_the_system_success_case(self, + redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock(params={"controller_id": "1234"}) + redfish_response_mock.status_code = True + redfish_response_mock.json_data = {"id": "data"} + resp = self.module.check_specified_identifier_exists_in_the_system(f_module, + redfish_connection_mock_for_storage_volume, + "uri", + "Specified Controller 123" + " does not exist in the System.") + assert resp.json_data == {"id": "data"} + + def test_check_specified_identifier_exists_in_the_system_exception_case_01(self, + redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock(params={"controller_id": "1234"}) + redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', + 404, + "Specified Controller 123 does" + " not exist in the System.", + {}, None) + with pytest.raises(Exception) as exc: + self.module.check_specified_identifier_exists_in_the_system(f_module, + redfish_connection_mock_for_storage_volume, + "uri", + "Specified Controller 123" + " does not exist in the System.") + assert exc.value.args[0] == "Specified Controller 123 does not exist in the System." + + def test_check_specified_identifier_exists_in_the_system_exception_case_02(self, + redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock(params={"controller_id": "1234"}) + msg = "http error" + redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 400, + msg, {}, None) + with pytest.raises(Exception, match=msg) as exc: + self.module.check_specified_identifier_exists_in_the_system(f_module, + redfish_connection_mock_for_storage_volume, + "uri", + "Specified Controller 123 does not exist in the System.") + + def test_check_specified_identifier_exists_in_the_system_exception_case_03(self, + redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock(params={"controller_id": "1234"}) + redfish_connection_mock_for_storage_volume.invoke_request.side_effect = URLError('test') + with pytest.raises(URLError) as exc: + self.module.check_specified_identifier_exists_in_the_system(f_module, + redfish_connection_mock_for_storage_volume, + "uri", + "Specified Controller" + " 123 does not exist in the System.") + + def test_check_physical_disk_exists_success_case_01(self): + drive = [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1/" + "Storage/Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1" + } + ] + f_module = self.get_module_mock(params={"controller_id": "RAID.Mezzanine.1C-1", + "drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"]}) + val = self.module.check_physical_disk_exists(f_module, drive) + assert val is True + + def test_check_physical_disk_exists_success_case_02(self): + drive = [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/" + "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1" + } + ] + f_module = self.get_module_mock(params={"controller_id": "RAID.Mezzanine.1C-1", "drives": []}) + val = self.module.check_physical_disk_exists(f_module, drive) + assert val is True + + def test_check_physical_disk_exists_error_case_01(self): + drive = [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1/" + "Storage/Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1" + } + ] + f_module = self.get_module_mock(params={"controller_id": "RAID.Mezzanine.1C-1", "drives": ["invalid_drive"]}) + with pytest.raises(Exception) as exc: + self.module.check_physical_disk_exists(f_module, drive) + assert exc.value.args[0] == "Following Drive(s) invalid_drive are not attached to the specified" \ + " Controller Id: RAID.Mezzanine.1C-1." + + def test_check_physical_disk_exists_error_case_02(self): + drive = [ + ] + f_module = self.get_module_mock(params={"controller_id": "RAID.Mezzanine.1C-1", + "drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"]}) + with pytest.raises(Exception) as exc: + self.module.check_physical_disk_exists(f_module, drive) + assert exc.value.args[0] == "No Drive(s) are attached to the specified Controller Id: RAID.Mezzanine.1C-1." + + def test_volume_payload_case_01(self, storage_volume_base_uri): + param = { + "drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"], + "capacity_bytes": 299439751168, + "block_size_bytes": 512, + "encryption_types": "NativeDriveEncryption", + "encrypted": True, + "volume_type": "NonRedundant", + "name": "VD1", + "optimum_io_size_bytes": 65536, + "oem": {"Dell": {"DellVirtualDisk": {"BusProtocol": "SAS", "Cachecade": "NonCachecadeVD", + "DiskCachePolicy": "Disabled", + "LockStatus": "Unlocked", + "MediaType": "HardDiskDrive", + "ReadCachePolicy": "NoReadAhead", + "SpanDepth": 1, + "SpanLength": 2, + "WriteCachePolicy": "WriteThrough"}}}} + f_module = self.get_module_mock(params=param) + payload = self.module.volume_payload(f_module) + assert payload["Drives"][0]["@odata.id"] == "/redfish/v1/Systems/System.Embedded.1/Storage/" \ + "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1" + assert payload["VolumeType"] == "NonRedundant" + assert payload["Name"] == "VD1" + assert payload["BlockSizeBytes"] == 512 + assert payload["CapacityBytes"] == 299439751168 + assert payload["OptimumIOSizeBytes"] == 65536 + assert payload["Encrypted"] is True + assert payload["EncryptionTypes"] == ["NativeDriveEncryption"] + assert payload["Dell"]["DellVirtualDisk"]["ReadCachePolicy"] == "NoReadAhead" + + def test_volume_payload_case_02(self): + param = {"block_size_bytes": 512, + "volume_type": "NonRedundant", + "name": "VD1", + "optimum_io_size_bytes": 65536} + f_module = self.get_module_mock(params=param) + payload = self.module.volume_payload(f_module) + assert payload["VolumeType"] == "NonRedundant" + assert payload["Name"] == "VD1" + assert payload["BlockSizeBytes"] == 512 + assert payload["OptimumIOSizeBytes"] == 65536 + + def test_volume_payload_case_03(self, storage_volume_base_uri): + """Testing encrypted value in case value is passed false""" + param = { + "drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"], + "capacity_bytes": 299439751168, + "block_size_bytes": 512, + "encryption_types": "NativeDriveEncryption", + "encrypted": False, + "volume_type": "NonRedundant", + "name": "VD1", + "optimum_io_size_bytes": 65536, + "oem": {"Dell": {"DellVirtualDisk": {"BusProtocol": "SAS", "Cachecade": "NonCachecadeVD", + "DiskCachePolicy": "Disabled", + "LockStatus": "Unlocked", + "MediaType": "HardDiskDrive", + "ReadCachePolicy": "NoReadAhead", + "SpanDepth": 1, + "SpanLength": 2, + "WriteCachePolicy": "WriteThrough"}}}} + f_module = self.get_module_mock(params=param) + payload = self.module.volume_payload(f_module) + assert payload["Drives"][0]["@odata.id"] == "/redfish/v1/Systems/System.Embedded.1/" \ + "Storage/Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1" + assert payload["VolumeType"] == "NonRedundant" + assert payload["Name"] == "VD1" + assert payload["BlockSizeBytes"] == 512 + assert payload["CapacityBytes"] == 299439751168 + assert payload["OptimumIOSizeBytes"] == 65536 + assert payload["Encrypted"] is False + assert payload["EncryptionTypes"] == ["NativeDriveEncryption"] + assert payload["Dell"]["DellVirtualDisk"]["ReadCachePolicy"] == "NoReadAhead" + + def test_fetch_storage_resource_success_case_01(self, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "@odata.id": "/redfish/v1/Systems", + "Members": [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1" + } + ], + "Storage": { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage" + }, + } + redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/" + self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume) + assert self.module.storage_collection_map["storage_base_uri"] == "/redfish/v1/Systems/System.Embedded.1/Storage" + + def test_fetch_storage_resource_error_case_01(self, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "@odata.id": "/redfish/v1/Systems", + "Members": [ + { + "@odata.id": "/redfish/v1/Systems/System.Embedded.1" + } + ], + } + redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/" + with pytest.raises(Exception) as exc: + self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume) + assert exc.value.args[0] == "Target out-of-band controller does not support storage feature using Redfish API." + + def test_fetch_storage_resource_error_case_02(self, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + redfish_response_mock.json_data = { + "@odata.id": "/redfish/v1/Systems", + "Members": [ + ], + } + redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/" + with pytest.raises(Exception) as exc: + self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume) + assert exc.value.args[0] == "Target out-of-band controller does not support storage feature using Redfish API." + + def test_fetch_storage_resource_error_case_03(self, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + msg = "Target out-of-band controller does not support storage feature using Redfish API." + redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/" + redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 404, + json.dumps(msg), {}, None) + with pytest.raises(Exception) as exc: + self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume) + + def test_fetch_storage_resource_error_case_04(self, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + msg = "http error" + redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/" + redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 400, + msg, {}, None) + with pytest.raises(Exception, match=msg) as exc: + self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume) + + def test_fetch_storage_resource_error_case_05(self, redfish_connection_mock_for_storage_volume, + redfish_response_mock): + f_module = self.get_module_mock() + msg = "connection error" + redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/" + redfish_connection_mock_for_storage_volume.invoke_request.side_effect = URLError(msg) + with pytest.raises(Exception, match=msg) as exc: + self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume) + + def test_check_mode_validation(self, redfish_connection_mock_for_storage_volume, + redfish_response_mock, storage_volume_base_uri): + param = {"drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"], + "capacity_bytes": 214748364800, "block_size_bytes": 512, "encryption_types": "NativeDriveEncryption", + "encrypted": False, "volume_type": "NonRedundant", "optimum_io_size_bytes": 65536} + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + with pytest.raises(Exception) as exc: + self.module.check_mode_validation( + f_module, redfish_connection_mock_for_storage_volume, "create", + "/redfish/v1/Systems/System.Embedded.1/Storage/RAID.Integrated.1-1/Volumes/") + assert exc.value.args[0] == "Changes found to be applied." + redfish_response_mock.json_data = {"Members@odata.count": 0} + with pytest.raises(Exception) as exc: + self.module.check_mode_validation( + f_module, redfish_connection_mock_for_storage_volume, "create", + "/redfish/v1/Systems/System.Embedded.1/Storage/RAID.Integrated.1-1/Volumes/") + assert exc.value.args[0] == "Changes found to be applied." + redfish_response_mock.json_data = { + "Members@odata.count": 1, "Id": "Disk.Virtual.0:RAID.Integrated.1-1", + "Members": [{"@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/" + "RAID.Integrated.1-1/Volumes/Disk.Virtual.0:RAID.Integrated.1-1"}], + "Name": "VD0", "BlockSizeBytes": 512, "CapacityBytes": 214748364800, "Encrypted": False, + "EncryptionTypes": ["NativeDriveEncryption"], "OptimumIOSizeBytes": 65536, "VolumeType": "NonRedundant", + "Links": {"Drives": [{"@odata.id": "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"}]}} + param.update({"name": "VD0"}) + f_module = self.get_module_mock(params=param) + f_module.check_mode = True + with pytest.raises(Exception) as exc: + self.module.check_mode_validation( + f_module, redfish_connection_mock_for_storage_volume, "create", + "/redfish/v1/Systems/System.Embedded.1/Storage/RAID.Integrated.1-1/Volumes/") + assert exc.value.args[0] == "No changes found to be applied." diff --git a/ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml new file mode 100644 index 00000000..2b94f4cc --- /dev/null +++ b/ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml @@ -0,0 +1,33 @@ +name: CI +on: +- pull_request + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - stable-2.10 + - devel + runs-on: ubuntu-latest + steps: + + - name: Check out code + uses: actions/checkout@v1 + with: + path: ansible_collections/dellemc/os10 + + - name: Set up Python 3.6 + uses: actions/setup-python@v1 + with: + python-version: 3.6 + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Install ansible_collections.ansible.netcommon + run: ansible-galaxy collection install ansible.netcommon -p ../../ + + - name: Run sanity tests + run: ansible-test sanity --docker -v --color --python 3.6 diff --git a/ansible_collections/dellemc/os10/.gitignore b/ansible_collections/dellemc/os10/.gitignore new file mode 100644 index 00000000..c6fc14ad --- /dev/null +++ b/ansible_collections/dellemc/os10/.gitignore @@ -0,0 +1,387 @@ + +# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv + +### dotenv ### +.env + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# + +### Linux ### + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### pydev ### +.pydevproject + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### WebStorm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### WebStorm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/**/sonarlint/ + +# SonarQube Plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator/ + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/ansible_collections/dellemc/os10/FILES.json b/ansible_collections/dellemc/os10/FILES.json new file mode 100644 index 00000000..d695485f --- /dev/null +++ b/ansible_collections/dellemc/os10/FILES.json @@ -0,0 +1,8951 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dee254ee9bf55d8338407f70bb76c3880022002a91e1a804274a4cc733b454de", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/target-prefixes.network", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd6264633dfbb82b3b2abd95db6a4d24ce7c834747e37628ce9453d6b9d642a9", + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ad4c2834dd2c4c1d2bb9027b0d478a33f790f9e62e58bb63a247963acbd6dad", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1464bb853617218ab9d0cc4f4e9cf80806e79d88944d34f1c4552b5188880ba5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0e26f625d487441753393154f4cd2f714697dc87592f824e4ef3c5e13da1013", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5c5d89caa1a4b04bfd0a1259601946bb23eb8a92c2a9669b0f3ca9569f2d139", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4c03a49f371e8720181c1fa5ef9022129fa1699fef19edb21bf835d08f5d94e", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03a4a548eb2a047077b132d28616752341b92b8e196890b4fb72f4e1bb22a287", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_uplink_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7dcd2f26d1ed6ea1e001dd47cc618e79cd5f0a8fa6b3e36a4d7b72bcd3b85515", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "038c67cb186cb91c026e8c25d136473e00c866947ac6853296a1d49e2850f82e", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf26c47803f887e09c07bf229230916348fced9ebdcd207e0d0a02e5a6c18a16", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27430a9f600b854e9cc8117c8144f7a85b90e19b2d29bed6842a5540250022e1", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ae1a076a2505f9bcde827427c97e684c12933712e89b2c24de74f7e731147a9", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lldp_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac384b363d9d08fa48c87835782dcc96fef3069e56cdbabec327e4f6eed2aa96", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77602a7e28874df30ea877773c60758a42707cdf1b83907beaac668219dd24f4", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f24a87e3c9d4519e41dece5bb9b6ead5c5c05996517a1cdbe81665fd4ab2d5da", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f597f1a6b29c4dd82db6fc25e93a661c9caf161cc985c2c4cf9793317fba2d4", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "12d2266c315abb0949d991954df3a2069b04c8ebe600b5794f1e90323c319291", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79be2281e710189652a5f6a81289a73b20d971498d7fd1fdf9b699415336765f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlt_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f8869617be68b7ff69e659cd1d386f6ecd08b779ae05e41e9ac9648ce03388d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "936cdc70b6bdc97f5f5a3da9c54c2511e5476dae19633dad1230597817a6aaba", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "870430b4d011fb3b33683b31414dd43c25ab506e75dd8699888328c7b45ca47b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47cd3660628331fe5d34423506c9f99c85774ee9c11e4c4386ee84a4099b5cf2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5b0d42531d4e746162b097cbf4470738bc35be6081ed6cea196537aee66600d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ecmp_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tests/system_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/templates/system_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36d342ffd6d3f9f2b13f830e5f408dff15fe03da2cfdd3cdf340772e9feb7d53", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "399caf267b75f67d2c4b7d09144b1e540d5892ba25fe2f3ed4d8c52f6e5abca2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1461afa655d59defdc17d72a7babef798af905ddc4c1f374bf33ef898cac7978", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33c84177089c1e5e5b429b2a021508d1967297115f8c41a46bdfb756af09be9f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e25465fa854c72b6fa28bfd685b745696562ac31f1e41dad623228feefb39d23", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_system_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6fe2efa2aa815cd8e3c5ed5bac0a0bf3e52eecc692477321ff8fa00b6fd6a49b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0f818940efa77c7436a40f0ac4b817dbdd823f56ce122c996aacf1ecf107b73", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/toplevel.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20edcd141fa02b787ffaf488fd5ccbbb32f5f136e0cda2421e20544914d41cfa", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4753a3e2722dc38e608f5bbc92755855d4b6e94e0d1fa3f4c9dcf41d1e4d2d7f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64f0bd4b1e4debdda48f1b2231aa87e66183114bd534b02d601d215bcea6011c", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4f0825966fb1a889b2a1115592859be5d1a00bc5b40fd3891fcb798162ebd8b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/sublevel.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49bbe36eae4a02c056f03644d5b684d5999d808cd3847d6b4b9c73ed9d5dfc78", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5be8981c069e5d699a7b11f1b6a002882dc6dadae778f2d594f19ec61bab64d0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tasks/cli.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "046c1f590ecbc9ebc6891a7a266dd61ebc350fee85d1d5e1d4eed2ec013f095f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_config/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tests/lag_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates/lag_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dd124b87cd1413b1d35a8ade7f7d9e8243db707998f13560931fb02c5354623e", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc48f14b613ede66352421b7b1c0a7a2eeef0f41c174c3de4e3e55f2b02808de", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5f7d115aa8f99c563a03357e21947e12a1040233554d57655bbc9cd7046035c", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3fad9175702557fd1f2ad02c973b55ec61c313dfdf19a4684b383aeefa5f092", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e73c08ff1c788caf128298f7483d6796194d2bed998af5849ba1920c05d1b5b5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9ab3e363710d250c9fd7ee5da92bfad54e0712ac687a83a2a71d21fea615190", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_lag_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tests/logging_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8963a603439108496f3ecfcd6eaffeaf0dd8c2c1571176acb93670bb058596a8", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd1a483a4fe6caca31a171b7b5cdd8a137469c7589ac1f6da10e9844c380c619", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a56287580c306b275be51d53e9bee0d46d348e0d6b6897bed9347be5ff2396e5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4453291b55d2727d39fa686c78b7219de9715e5628740856d87ca5bf566f87c5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6457fe1c0aa31d88098e5501597a708139f0fe656caad2f4f528782809798e4c", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1f5a117dd578a05d487d512f0bf7d79b4ad9d89c6c01fdf6ed633b9ce330126", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be824897c97b322e7e95483a7a976f30144bb5c0dd92d784763ccacc7e8e8ef5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_logging_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tests/cli", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tests/cli/output.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9404404f4797bca49ffca7a124c7c2d05a261b93a4e889bc519963321f747d7e", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tests/cli/contains.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c64d46fd01e6afdb612effe95fd2fbdb9d169fdd8ffa61bfc8f887b24f6a1c1c", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tests/cli/timeout.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba6c7a58f08497d49fa11e30c572763d6a4c64fbda0f4b5c7970462ef6c173ad", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tests/cli/bad_operator.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57c45cd1008cda243635c2512d803fd40aff6eab6d48d0f3669d939a6aa1237d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tests/cli/invalid.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3c9ffb27c38e0c69998113c11d06b69534f184ab355b252c100089f998d8c5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tasks/cli.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ee0b5ac71e5a1585eb464148138380f40e591e6c8365462bc4d54ee40296927", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_command/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tests/acl_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70f29afff510ae18a5cd6b97ecdf75668764347523c15da6c68566ec45d3c6ec", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64b4b3d366d4c93405bc376c8876f64e904ac0f4ee50b8252607b44d64d26d29", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94291d22d4b2739c0a3f6722cc00c56a67851411d7f7b720901cbd11227e204d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8259bf1f41818cbd1b9d6a9ed594b9477507ddaf24a19b215bcaa95020b94339", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66730263cd92a99847b1a2c442f018ef44cfd0f54bf32ac7eb5c3212b54a292a", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45e939f49ae0073d331b3c194aee32057f2dffc9dd87c005e9893a7306c01043", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "271e90fed1f6a0d182d068ef9f1380b2954f24a562478dfcd51395421e01030f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a76bb998f3b124077f9a276cf43d4a84d9b3e53b470b871cfc9cfced9587ce90", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0890b6de50982637140d3c5ebfd589418877b7ec2365d2e3032536ff05fadac4", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_acl_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tests/interface_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b89b57c196502b4db8b96eded2c15962b8262605c0b576567f9aefa2100ac59a", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89a8788a11b9a1731059404bc4bffb93a9dfc9548597e93bbde4f6a17e8346ca", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b26d1cdacc6e8890a737c338365787b599fd3020b07ea78c07c2beb860818aa", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34507f8d30ed8d6627ac1bda49fe4498ebcbfc9853b7f88566945b0f81ea03a4", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc05af4226f3fb99f4fa76d99fd984bfad995ce4fbbd6a9e15e428271208ef7b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25bb95a1293563e7ed1f8cf1897431c31dc20560ad1c4cbc27337db4207c7d3a", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b03f32aa29954fb19a74f90669d8a77780dcb5e21353f80fd54512d01ffe0f9", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0c8cf327b741e2c8ebaa8794db305463aa1e2bd1e703688c00270782c099443", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e8d5c87d1efce5c559048284e61b672513a60c885e81fba37781a4a9317988b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_interface_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b9776c13eeec76aea6d785ac8302f71bc0a6a51f3d485fcc04ef89f58fc7830", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb6d5f9175d1291be9481a1b15573d5d58496c4c6e9d3ed11cf7aee1fd0c61f4", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e18d086b11fe776c0fb3214d8c44b86029cc4754dfdf6d44cf5cafa65579680", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3ee9a76bfcea7eabefde1ebc62545190df51ae77e021897a30d362d4777183c", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5890a7ed5a38cb9a39822c7ce2508043c0a3ab22652f5a2aec83ac79432a4b0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f40f95d65fc3baee4fa76364b08cebcb8b3d1f8e922fe760b47d5253cea488ab", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc047a96be30108030f3f11669ef7d444f8992c1f7314ec659be6ca7127cc8cf", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_aaa_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/role_test", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4cdf73caceefe7198e3aa9d515cb1d782579a383a31a67ddc2639d4e24c5a897", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3bd502404820f470e59cb02843ca6f4c5dd70d474bdf99ae1f54e26b3aa26f1", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3990c102131993c5bfcd3375aa0663aaa673e1e08386a12a77a7bd4a9efcf12f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba02d699d915ea12b9060d4aa1cffb6744ace4245ce9cf3da34fd3b68924f730", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab9c96a6bc824e55300b18f849cf38a3af5d64ed782dd7468af4a43d1eea2e8f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ed045f05985fe4a03cfc5d0101eb55a7b9a865841f5b5dc14f81ff2d0aacf11", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14c77c86c67145581da1148d524a2e417857aed04df4f40ccb2ace9360a755d3", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6713d9ad92f4d351dcb45f4125ad87f6fade7928da824dccfb3bdd906da6061b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8565083e10e769d00d8b324972a505d423dabb85fa95bae0580dbc00ef395dd6", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_bgp_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5cc4372241eb2df4b14f5d4acca951eddce534e4506727c8a923b8da03b89b7", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5083cc81b4bc1e3d57a9d40ce2b7627f183dc713f5da287509c9edc5afc3948b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "217b588ffda030b76fe4d034748ae9520c5599ab10f4a2408e7694263bf0e517", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "031484751006ce7f54eb80cdf76f10aa77caea142ab8584e8f9401cb94dded23", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d39f9d971d23178c3aea8415b8e3758dc280d0fd6e6c2954d8b76f1d11975bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30cf1b99dcce1631f708f93b338a5a87ca52a040755529200b99df871f5494e3", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_snmp_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41fd286bf325335e0558c55d926c230fb6c88ca184396f7fbce94ac01a645700", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f9f81b4822104265fba8ddf82705413e4d358f7d3f05316851db6cde31a3129", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "810cc6fb8ef4487a767c4e3761b85a6790bab055883c8bbc96219386cfd1b181", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b57bbbda1567a16770400417eea96f1a330c5d84892ed629923b5abbcfaf37bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba784953137edd01cb1221d4090234fe9bb7877150397c4d963ed241e5495534", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9fab029b4805d461a60368e84bbcfde5aada1b7e5f9b225c7801b07759620e6", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77e81f8cee030414317096006a3fbdf931b560bb9680d496ae9292e54ad1c09d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vrrp_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21c67da734056f54f0f661686d32cb70ef1e40462a23c5d1bfda041fa443b47d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32d91017598fe87187773edc5ec054b3ce252f98772f26a5e438f823c1501c69", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1637733203ace9d52bb3e84a800f97f804744559ddac33d40c4324b997826f9f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea0eedfb075fca42a7026b653a9ffee006039de86b0d689efe16aca7f55a90ae", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b44b9d483d108da304de5deba1b3bddf898ea1acf1a53368cf163dbc21044333", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40e22288d08ed92b3282c053ead73aab881e396a6eefa37f3bb520623107f9f7", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks_old", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks_old/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a1ff989bbc9d899940da328d57ca6a361aae2c0beb1a31eef3e52cb7fc9da0e", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tests/users_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/templates/users_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05d4cb24262b361ccf5503e3a851e01ea8e9e749ca4ef9b1f85098c183638ba1", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17fea757e088e8509ec4c02316c74462fef45874ed6f7f40bf342d8dc78dbed8", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "856c30f658de79f4fd5fa7c97ee33d75572ccb106dc05e546bedb2c9d23fc95d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22f70a2838c06e331fef2663423ed2d7151dc162903da79d5a031d8daf90c71b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_users_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6ed3afc848f77dc5bf4645082e764f64e2a46c468e8375905d7df2550f49a4bb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a086d274c7307960594d74bd1343db52aaf5efc732e6df268960f31a8b8ef9a9", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3222eeb3fd8518341ef5ee5f7a487c0788bbe854c92141874826c7327a5f0d25", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "513806605be25208e46714482778b8b95e0d87224ecdef69485f92e997ef6a26", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f58eb90b9bde477bacb06bde02825150a5db4f1b5155570e99a1f5142eaf9308", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c97f697071e92385e9e6830329f2550eae4e5069bdacf55b9c52b3312de803d", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e31a4ea0f664df37e4f6d70fbf845df27f397bfb45383704d6d10ec40419287", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "453c623503a121c01efc3150a9cf85839e79d940433987b336aa22441d36f64f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a95acc0962e1c997759874f98f3e8f0c61da997449a74a596d1b64e491563ce", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e5f3ea47b47d7c2f3f621420cf8fdb3461965ce6387ea4c08a4278a6df42a2c", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc10529635b27eb3fbb63341bf5d9b601e5077d27fc1a7d9df04c22a49433d6e", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_xstp_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da2cf3e3d67e2d2ee660a478d4de2abb3075d346f83746773e6e644e1894c5d5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f0d54565e30202e99fab5271a25445fd575c5bcf9fa2189adcad456560456f5", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7d018be27caad0ba12b873bea941393124eb882c779a4c2df166739d58fcd59", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc8439541f2b0a111b2261b4dc098c14fc4154534d6d3392bf3f69e39f03a837", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5843c89738aba44d1ffe89785b1500e0096638583adab6e66191cd4cb6a278c", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e02270afe3b8c27f33e289029e685b17565928ddfc38273bf7af2200c23baf36", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45e8e08ac1d4dbc419ef2fdff4a2117cb3ee6a1a83e81cca79b99a86657c011b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_vlan_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e26d9301cc00a368b8ef0ab68fd65bd0eb3876b3c435747772d50f46c21abc14", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2503f674bd07f01c1530098917205037b6a07e87a02acaa14a7b629b36fc102", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f328017cb2eea83066a992fe86379f12acee5906a14d7f0ff163345d1e5db6c6", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3445b062cd7971c48ab9945e5de43d27b78a324797aabdc541a20b72238e0415", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cb7300c38b7720edfed6beb987fb5545c33923b5886749e72c5413936038207", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d9f489833fc3c45b9925e75b8d5a1fed9ca67d0a6515bb97e43b5ce6115ffed", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_route_map_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b6614abd2f498fbfaaaab25c150c300d9611ae766db1a97e7538fa54f7b6219", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a22f6694e88cfb27b23029f8a4afb63449526eaeae292463832df6c7d5efca2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "278fb587b324a50694295730162e5c811622a28af0391f23497d41efe5b657cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7b175087cd7b576713915177cd1c3c28d9ca679f30af7e19cd2965693a486a3", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a22f6694e88cfb27b23029f8a4afb63449526eaeae292463832df6c7d5efca2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ecb58e50c5fce555b18c49d94fe4fd3dcbd462b0336ec836c66c517b6d10c29", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_prefix_list_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/tests/cli", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/tests/cli/facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ca0c676c3247ba63fd1a736444d479bbae1132e72e25cb0914510a3febdd03b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/tasks/cli.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ee0b5ac71e5a1585eb464148138380f40e591e6c8365462bc4d54ee40296927", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_facts/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tests/qos_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/templates/qos_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25606b78a5f8a7b79e8def63ae12eaff46d736320d09ca635d57def0aea7a1f3", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bfbcb597c7477c68ee508dca873550971e8545cf57afc6120a68d9a521aa0d8", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f60945c13a7edc8db88fa6f152f97094d3ee6c310566cc15895304903b91497a", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfce41c5684933653d6216ca71bc1ff414126188e973d475d551750c2f89c24f", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96ec3510b727a3c486a9e29f08ae120bd90bd70f55a5cacb44796ef8eea49bfa", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_qos_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1fe912833efabcbf0d06e33c1e13524b0548199f483e274fc1025774826c4ebe", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fc716243126e26a45456b49a81aac41405fe6ba2bca6c739b35b497a255bc09", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c45ca707d76cccebe4198a70c9426cd42847390cae92757aed4fd165f8570f8e", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "928ff4f33699b229a59276ceb1f24e5d22446835eb969b58cffe61c009035fc7", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de2ae09d153f14f3ee2691e64e441b045e246119169f67b60324b432b6157681", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5083cc81b4bc1e3d57a9d40ce2b7627f183dc713f5da287509c9edc5afc3948b", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "778ca7bb8974f2697e4591c0ee0d99e00cc7bba1c7e07a3b54aaffd67ab219ef", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b8378c4f0cb839eb2fc3456cfdf667b1a0856245c9850aee1d2f44a7c8c9d60", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks/tests.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks/testcase", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd", + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/os10_ntp_role/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/modules/network", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/os10_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9349583625868b561568aa6b370e5da954e6567baae5206d649fe005b97497b", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/test_os10_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5adc3e2b16b21d21d73cb8a9cdd0e28eaed0715414ab5d7ffd2cccad3e7bb89", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/test_os10_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c0a64846f3954cea9a644f416bc08b0bf3fde6b097fbf2286aa4c2e4fa65519", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/test_os10_command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "246875d5c4a286409f2bb523be2f987b7898ef4509fd42a0f11ed32a13274f17", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/show_system__display-xml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f22f3fe42e53f39403ee015ef68ce551201e6ef6c1a1ae4bc963982c5be7d855", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/show_version__display-xml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "258f08e34a8dc36e7d5a6c2457526f563172ee3a0ae2f3dd89b93d48d114a883", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/show_running-config", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e18498e855071e4d4a55d27f71e02cdfc2aee209a9fb1161c0fc260bae9d119c", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6dd7743eda61e2a7a4b5baea468a5bd9093790b50004f1d2977db910f65065c", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/show_version", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06298058714a4f3e00626004aac888b8cee0e93fc322f78b63902c2ee3daba6e", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/os10_config_config.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cfd0377665f893639236d5f87e031a944966b24439fb243962d1c1565069e59", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4aa2574cf8851166f5d42f53a3c9c4ce4c1c1d42f459d8e4c6f3aa5e8302493", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/show_interface__display-xml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b18e0c951436423f17735b55c4e9db9348402ffd8ec6e384fd929efe88d7386b", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/fixtures/os10_config_src.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55739f1fb3c63abe984ffcc2c663d1c66d4c5ad90a05d48ec2bdb2b8da1b8da9", + "format": 1 + }, + { + "name": "tests/unit/modules/network/os10/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8a4ac4bfdef88e75d6e748e35a42fb4915947dfa2b7dd788626fd829600e014", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb3e67ff678384b46656f54d2d1dac67880f21df825e084da1412582577a6924", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a36f6c078712863fd489b1811be21c6b23c3f901ad34ad259a93d08985f698a2", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ca93887811a83a2df421775191c0b50594520168a8ab9459a43a58b2160a01d", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720", + "format": 1 + }, + { + "name": "roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "563a9cadb1c7ce0ecb1f62f033c3959342805be145dc20f6b1bf81c23b2ed412", + "format": 1 + }, + { + "name": "roles/os10_vrf/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_vrf/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8d9410432844f8dc85022738544ba585f5913e870a5e01942d96e0e340ed99d", + "format": 1 + }, + { + "name": "roles/os10_vrf/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b39e5f6d223be657b598c57f645780fc7702ffe172edcc6d54acda11a4897cf", + "format": 1 + }, + { + "name": "roles/os10_vrf/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/templates/os10_vrf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "efdc1703a845b53bf89516f5c00fdda272125c03826be93f04cf28ced504703e", + "format": 1 + }, + { + "name": "roles/os10_vrf/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48333a7f4910ad871d4166baf30acc13b4ca8272df8e755cc8ed8d230859ae5e", + "format": 1 + }, + { + "name": "roles/os10_vrf/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20318378249dcdecd863ac6dbf6e9c974e9405508ef0044fb60a9df87bdfcd6f", + "format": 1 + }, + { + "name": "roles/os10_vrf/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25b33f208a9b255533b417a2789bc76b7e861b2660d99766a4990c2df201c30a", + "format": 1 + }, + { + "name": "roles/os10_vrf/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43a4548f4d185e37899418fbd9cb45007f7abd47d9e6c6bd767d5f528e0b7eb2", + "format": 1 + }, + { + "name": "roles/os10_vrf/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_vrf/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrf/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d45c4bfac82a5c80b7b0084b90a3a9c8df9209e02c335987292be4bed9cbd071", + "format": 1 + }, + { + "name": "roles/os10_qos", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47508b2209d8933ede8aa881d3e42507baf52088fdcf1682dff4cb3dbacd2409", + "format": 1 + }, + { + "name": "roles/os10_qos/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f", + "format": 1 + }, + { + "name": "roles/os10_qos/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d86ce4aedcadcb72a47078a74734369917f32c3c27c1b6b5e997174fb618bd3", + "format": 1 + }, + { + "name": "roles/os10_qos/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4012c691ed8545d2820f04721035e599d78a0521706aff7fa1d1808627d28d54", + "format": 1 + }, + { + "name": "roles/os10_qos/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/templates/os10_qos.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04eb355caed6674616fb61d6bd5326a721937c54bdf77a52a90ce0bf69e5b0b5", + "format": 1 + }, + { + "name": "roles/os10_qos/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1af7dfa2b058753cbddd43826c703f09173f4eaa9ba04f7cb4eadcfc5ae0970f", + "format": 1 + }, + { + "name": "roles/os10_qos/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25451af055cbfa149bbcfb9e931bf6cb33cf3014a2a94e5d7c52cd19f8ba822a", + "format": 1 + }, + { + "name": "roles/os10_qos/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "769984874a273a9eb32760bb64b4cb5da59f4fbe482f24dff3d9f4cb3a6f9a93", + "format": 1 + }, + { + "name": "roles/os10_qos/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f8741408dd745230550168b627a77abfbf367803b5a0e73edf56a337e2a11477", + "format": 1 + }, + { + "name": "roles/os10_qos/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_qos/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_qos/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba51d4193d632320fda2caf2c12cd37c99dd3c2d2b7eb20b802e65caa9c87efa", + "format": 1 + }, + { + "name": "roles/os10_dns", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d7a878dd74478a8e5a90b8f365f0d158ba08b1044984d6ad9d375314cb25f08", + "format": 1 + }, + { + "name": "roles/os10_dns/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94f9aebb9791d2731255777fbefe23c60fb22149d6cb6943a4f6929baf4d9689", + "format": 1 + }, + { + "name": "roles/os10_dns/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_dns/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43934ca8dcf90dbae738eee82b489df8e54d4a897f4f0e3aa7b89512cac2be6f", + "format": 1 + }, + { + "name": "roles/os10_dns/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/templates/os10_dns.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4888c3203fcf4d25339f7d1776d13cc143c516e655af0df26e5d139839d3bb09", + "format": 1 + }, + { + "name": "roles/os10_dns/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5adc81357a3738e7a94b9e4a6ecd2c784219131ec203bb1bcd305fb472a03ff", + "format": 1 + }, + { + "name": "roles/os10_dns/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da15b3c7b3ca860fa29c2b18580dad7256546e7fff3d4f60a213050ef38efbd0", + "format": 1 + }, + { + "name": "roles/os10_dns/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04ee6ff105792050e1812070d4cef75c1e18612e5eab0c7847f1c249598efcc0", + "format": 1 + }, + { + "name": "roles/os10_dns/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "864243055c727f0f5935c1f13bb6f44783890d6762922ea90a88e9eb3ad4a384", + "format": 1 + }, + { + "name": "roles/os10_dns/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_dns/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_dns/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e713093e28077901633582e5023d6f6336be1f394f0b07bdfd484d4539077fdf", + "format": 1 + }, + { + "name": "roles/os10_interface", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aeddd44f2c7f6d17211606d02555416c6bb3f3319bbff45ea634c665097715fa", + "format": 1 + }, + { + "name": "roles/os10_interface/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_interface/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "13faa3d363b73b65da07280e2d21c0087f8eca07c15491fdacd09c38f2616321", + "format": 1 + }, + { + "name": "roles/os10_interface/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e8c70ae112f0b6eedd6f7df1ada479e1a35ba8d1b584270dfd2bde6e207fe38", + "format": 1 + }, + { + "name": "roles/os10_interface/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/templates/os10_interface.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a9feed1029f79dadfa6eb1cc9bcbb39c57bc97190dc82ad61e74ef14ab598ae", + "format": 1 + }, + { + "name": "roles/os10_interface/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7ac54c9ccb6409b6e8ffd41ffb96db8384a1ff03addf9e25a34e3565fb1fc6d", + "format": 1 + }, + { + "name": "roles/os10_interface/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecfaae6853318db03899ad122d13731cdb4058ee4e77bb802d64ac0c6ea20824", + "format": 1 + }, + { + "name": "roles/os10_interface/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e5b8ea5bb8a09389a1eadc1eca701b5e9b0f073f5d748b6f28b4cf1bcf0869c", + "format": 1 + }, + { + "name": "roles/os10_interface/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1db1c2b5ac629c9ea3cc9f1c0fc8d56fa230cc1c8a85a7beb975f01a210aee54", + "format": 1 + }, + { + "name": "roles/os10_interface/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_interface/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_interface/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75d2a975b644cd7fef6108087c834dd4c144810a9945876f9b979d87fcdb3639", + "format": 1 + }, + { + "name": "roles/os10_prefix_list", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b53872392d389fff12d93f0f89a85893c3a4dff81b7a29cc40072ad487734183", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5efe4259fe1e8fa9a662aca40abf418b0f48eeeaedf7c59eddd06974f5b1179", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6e5429b918bcd489a5a07c21f480989d5eb72304eba5f524552b0a899b45fde", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/templates/os10_prefix_list.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7166b7aa4a5eadf5d7036b25ae7e6718b109f909cb768acb963ef6c5346a46bc", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "683ac64d2b9636f2e1469e8d738b88749d6896649b136c62d3c6321512c5a887", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "476f80dc41f83236cc4af3af174451d89d4588cc42ecc1a4e2a3d5b43f63bb3b", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea5b0afaf9945f59836bedcc4263108995086fd6962443f29658bd8245996c7f", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9f0a7fc55900b2dd1b724ef42637c401bab31748414f4db0c99c5bc73a1274e", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_prefix_list/meta/.galaxy_install_info", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fff6a8db965bb26187afecc326a56281f74bcde1d5759bedf9874c72aba696fc", + "format": 1 + }, + { + "name": "roles/os10_prefix_list/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a592727b6802b1e47956477d630a6d0f02ac3f38e9ff42e67ff4b00ce553abca", + "format": 1 + }, + { + "name": "roles/os10_system", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6f9fa3aebc2738e4c3f5f3a49fedfa92575553a0bf93a101a76967bc63954bb", + "format": 1 + }, + { + "name": "roles/os10_system/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_system/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c174145b3348883c81c119c7e941ad4da952b0d02001f04b80a942108939867", + "format": 1 + }, + { + "name": "roles/os10_system/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "520ed65df1e5f3526bba7d6021312a02df0c259019f0667209d142c2cf43ce67", + "format": 1 + }, + { + "name": "roles/os10_system/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/templates/os10_system.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57e1e32fffef8663700e790e71277ad0e3b8e263f10d21965bcff7115e56898d", + "format": 1 + }, + { + "name": "roles/os10_system/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "238dcdae07f217c1e64583262f8fe18a6de757c5959172b49784e4183ac44516", + "format": 1 + }, + { + "name": "roles/os10_system/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dff73f186ab1079802c73fef50e8e7d533fcf7aa8517cebb39425d6e2b8a18e4", + "format": 1 + }, + { + "name": "roles/os10_system/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e1b14922fe9bf4d9ac307185d8dff8eac7d98d2e33c6237a2b08558ced95061", + "format": 1 + }, + { + "name": "roles/os10_system/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "894b1ec35a60fea55d969c548964de6bb89a50cffbb1ac42cbed481768c4f032", + "format": 1 + }, + { + "name": "roles/os10_system/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_system/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_system/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e43a6cce922b7e77005b4331d27fe3cd205617abe34dcbeb34aaf7f0066e1c89", + "format": 1 + }, + { + "name": "roles/os10_template", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_template/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d311ae7470c42f2f31a7d688121e3ba8b240afe5fa917d4ab2b4fe99338055e", + "format": 1 + }, + { + "name": "roles/os10_template/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_template/tests/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_template/tests/group_vars/all", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b1e93949330f1a344c787df97038cfa6d122285f204fb3a1b6385603c2d248c", + "format": 1 + }, + { + "name": "roles/os10_template/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c21302855000db5fd5e9f1573fdc03b97a7cbe3392efe3e0013da373dce3596", + "format": 1 + }, + { + "name": "roles/os10_template/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b134978967ac2f8ef1951c1246c196caaba06421ec57bccadb8498514e06c26", + "format": 1 + }, + { + "name": "roles/os10_template/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e703d610802949e54787230fc461f04ad2a8af5e9e65533c6f53d8ffa19c345", + "format": 1 + }, + { + "name": "roles/os10_template/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_ip_bgp_summary.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f8b1427d59f64f9f23c9ef60b44e3adc2ad5cdb6af65d169ee3542e3df7a226", + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_port-channel_summary.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4be2ecbcaeaf7c87edc4dc1c317b50bc90a11e42f3b06178bd58cf8370f15205", + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "495588aea4ab674ccef67c158e0688d212af552ae2be73c3133a88cbe138ba1f", + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_lldp_neighbors.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48f9d390a818cb19307f186e715ca09ca0c477666c45e4f95bdc9f00c32d01d7", + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_ip_vrf.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb3661256bf365afd209f1b3cfe63acff96ed605870795452166f0d50359dd3e", + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_vlan.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7b08a9326f7dd8d9a0512cae918f1903a44a55f24e2cf62b8478316fbbc8254", + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb8d12baa8d65787ec6bfe263d8cd67b2b7f8c65332ad49d0ada87e4e7ee1792", + "format": 1 + }, + { + "name": "roles/os10_template/templates/os10_show_ip_interface_brief.template", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09c504a2eb0e35fe79e85c2d5b9b417b1c5545583a122bd353563fcac6631fc8", + "format": 1 + }, + { + "name": "roles/os10_template/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "943d291b756e2ac2a075a2004a2ddb8caa148f704a632ef75634818e179a7fc6", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_lldp_neighbors.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65b5436254aaf10a5d77b354cc322a5405cc4c44d7a16fa39aa670b0fabe6afa", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_ip_bgp_summary.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b51a689d0b09d992ad8b492a754d5e7d6af449f9e801c241738362b211438753", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1cfbc4e32774b6e1303a25e1a3a56b9eee8bc54daa50bc511d12af3c54985b11", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_ip_vrf.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72b260949bcfc2535794d50deec940eb04f38f8209a7d965f779d81e582c3ade", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_vlan.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9118d65024264927e657487606ac92a4eaac1a29d14286886f40194053d6d486", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/textfsm.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a76b1f05772eafab2d95e4bc644a314d97a6a727d7ccb85cce45372081e3c98f", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_ip_interface_brief.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3138ce2648d4507a62425dbb76d035083c5bb5310799e619f394b3cc19ff7676", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a25dd113f8bef549d87f0369acf0b80f1dd9dbcb396b1300237f90ba6dcb27fb", + "format": 1 + }, + { + "name": "roles/os10_template/tasks/show_port-channel_summary.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6fc9a394046acaec5d9819cb11311d1d39b11c699313e734748fc802a108b4e4", + "format": 1 + }, + { + "name": "roles/os10_template/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_template/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_template/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be70f3f8008d9eb8f8f6012880e576ab29edd59e1c92698f93109d88d2ecff93", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d07249dc4347118c841ed0740b1c8eea0351d593f943fdb1922306173842f7e", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2754f0e1d88508b9378acc6c770239fe810d7149765ccde298e9ec2d15359f2e", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8fd54e1550ea6b41b6272e12f8584779ce043ac0e6a4ef2ee44de2136855bbb6", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/host_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/host_vars/site1-spine2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/host_vars/site2-spine1", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/host_vars/site2-spine2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tests/host_vars/site1-spine1", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9a710654050c8ec0ff24355c999cc7d6b472d30059eafacba139853c5e97b6d", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_fabric_summary/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72f2a96b9a15847e7e948bae69d8318983d42a7e1e14718484d5f060042206cd", + "format": 1 + }, + { + "name": "roles/os10_bfd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9155aa75d45da527a150711f44a5d46195a07761fdc2326da9ce389c4f6bac6", + "format": 1 + }, + { + "name": "roles/os10_bfd/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9209c2609e24006f7a95935b3a9c342d776dbe31f537a5f445b9f39c5a7ba8b0", + "format": 1 + }, + { + "name": "roles/os10_bfd/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_bfd/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bae4876627be0da4ae45c11f4e3d14686c98c15ad6ff7c65c95f22a93cbc3998", + "format": 1 + }, + { + "name": "roles/os10_bfd/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/templates/os10_bfd.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8686cc925b9c907512f582c0af0a5e5daff0ea1a9f3818d7f210c2cc3d8b3bc0", + "format": 1 + }, + { + "name": "roles/os10_bfd/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "814fd791e0988bf3397ef69f8f8bd4bf97910fa390f6e9697c2bd8b5cd326457", + "format": 1 + }, + { + "name": "roles/os10_bfd/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37924af5558f6ca24c1399f212fd0c7de703cbbbe0814cb253cebf09ec03b43c", + "format": 1 + }, + { + "name": "roles/os10_bfd/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dbac54437ce53f90811f0472a8e9f242e17c5cab85fcad5faf818b5e3dc61f8e", + "format": 1 + }, + { + "name": "roles/os10_bfd/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9893b6a378c9bab4d39407e7b9ef878ae7d25ae68134bbc6f4156ed49c682d2c", + "format": 1 + }, + { + "name": "roles/os10_bfd/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_bfd/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bfd/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f535c91de0a7152f3e8b34e50696e9c5c17b3e0f303e5a0500a7dcf1ad07f1b", + "format": 1 + }, + { + "name": "roles/os10_bgp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c0330969b4488def090626fd3ca345b0d5fd7d13024b8f7bc5d099a4d61bf5f", + "format": 1 + }, + { + "name": "roles/os10_bgp/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_bgp/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11381f54e8ad097f39125218c22a2289cb177f2633a1fd4fe60d877d14075d5a", + "format": 1 + }, + { + "name": "roles/os10_bgp/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "829f2f19e06879ef781c9f42fbdac798699b92b0aa4f1fa03c20230707ae63ea", + "format": 1 + }, + { + "name": "roles/os10_bgp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/templates/os10_bgp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83f1f39f35731290e0c16f9fb4b5f40effc06a6c7148b08d7a18041baa3bc96e", + "format": 1 + }, + { + "name": "roles/os10_bgp/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ce824753ae5e51d5096c4d0330d02c7ba7ee2a4d1e9950557457ede4d51ea79", + "format": 1 + }, + { + "name": "roles/os10_bgp/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "554908747a3a38554d8d546ac6df4d952dff072db5e5a6887a0e51d0fe92ffee", + "format": 1 + }, + { + "name": "roles/os10_bgp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2410d1e75d200951a1a323149e6a4b95cfc6fe198a3f068a20ca9049c353bb0", + "format": 1 + }, + { + "name": "roles/os10_bgp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18e3284fe021e3ccd315d1ee37fb1e50fff6db3dfb39600d22dd13da0339cad3", + "format": 1 + }, + { + "name": "roles/os10_bgp/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_bgp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_bgp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b2e840c2981c38c49f6c8c729312b412b66b6ca2622bc94e38540d446ccc977", + "format": 1 + }, + { + "name": "roles/os10_vlan", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba6599c1a2ac34d0361638276f081621d8b31823c3fa4d01fe952a469c3a5692", + "format": 1 + }, + { + "name": "roles/os10_vlan/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_vlan/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90d75c30cf368c94b15f0e67f5dd42bf3be7079727ce68799344fd7fc8f09fed", + "format": 1 + }, + { + "name": "roles/os10_vlan/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7e6522987551969174daba5973aef288aa6ed91b59855a3406bf2e31ffd6632", + "format": 1 + }, + { + "name": "roles/os10_vlan/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/templates/os10_vlan.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69a1923fe2d15d97b33d28f5c9dbbe230fc3cf3af741cd98a3ca40410f49c6db", + "format": 1 + }, + { + "name": "roles/os10_vlan/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6464d917fb54d60e7c03c4f437e12795452246458aa27ea7f674afcdf744c2c", + "format": 1 + }, + { + "name": "roles/os10_vlan/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e2317f1dad320074b0d71bd2416faefc5007ba40dcd99c196ee79e23cd5ae6c", + "format": 1 + }, + { + "name": "roles/os10_vlan/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e05831587de755162befd688ae927a8b92504f7d7c4e5cde6ce67cf35f7f980", + "format": 1 + }, + { + "name": "roles/os10_vlan/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe8c107015a54e155ce06dace6919d89837d025d667cee3dc293ac602b7e9b46", + "format": 1 + }, + { + "name": "roles/os10_vlan/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_vlan/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlan/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8970684379fb0f71b0752e70e3b09791dde1e20117403ff98a06333fce205b3a", + "format": 1 + }, + { + "name": "roles/os10_lag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38b04b2c00265af0b32b43f8e0057565e818e088401545600b1476ab6f090fb8", + "format": 1 + }, + { + "name": "roles/os10_lag/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_lag/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdd9b082fe66bcf825e22a7a2bdb09b5ee000bfca5631a68f97f34bf0dc5e5a9", + "format": 1 + }, + { + "name": "roles/os10_lag/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42e8ff164312c486ba66a307d8d43c11940ab66a73fda890a316dad2fb49edf9", + "format": 1 + }, + { + "name": "roles/os10_lag/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/templates/os10_lag.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80d8472ab536a0f16455a593f63ebda55772ee6c9c55ba0f0086f90b79627949", + "format": 1 + }, + { + "name": "roles/os10_lag/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06492dafca5b9e2d876862aa59bed0855b2a42016f1cf0199846e78d308c90d0", + "format": 1 + }, + { + "name": "roles/os10_lag/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6081a52e02ebdba4b6d50bcf7b939ed072437e0cdbdbf26dbe806e824a03a79", + "format": 1 + }, + { + "name": "roles/os10_lag/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8727a9d0a9b2dfc2123c386c937fa24f137c6f0bad31a0278df0af3eae4b9351", + "format": 1 + }, + { + "name": "roles/os10_lag/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f80595af9a680584548d1c63a9ec37db0e5d9dc9330824a77640170772ce43c", + "format": 1 + }, + { + "name": "roles/os10_lag/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_lag/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lag/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "596b282692b0c8b54557287c0cac0a30077a34ee1a5f3443cf4e84397750945a", + "format": 1 + }, + { + "name": "roles/os10_ecmp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48f5c4db66fc2f0b993ee1f4fbd40997a402a2dc43760becb694bee7af60e02e", + "format": 1 + }, + { + "name": "roles/os10_ecmp/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_ecmp/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc8b26d113e94f68344b0a73022ec1b6642bee0fdabbd049400860da31d9b36d", + "format": 1 + }, + { + "name": "roles/os10_ecmp/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5263eac7a34b93e945d4a7703a9fe87f41654453f3b2929583c254c3565967b", + "format": 1 + }, + { + "name": "roles/os10_ecmp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/templates/os10_ecmp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e8acfc826d6090844b4928084c3710b27fac5075987d2a7647a1bc8210794e3", + "format": 1 + }, + { + "name": "roles/os10_ecmp/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad97aa8409e0c4cf2cc23d2e313e208abf6b21e5eadb23d64f424f64b8307ff0", + "format": 1 + }, + { + "name": "roles/os10_ecmp/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "763d31c712a86816eff6be981731088c42da2a5b0852f3f1fe57da56b57d7dc8", + "format": 1 + }, + { + "name": "roles/os10_ecmp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5dac42611a9fa57fa6a0a83cd60637d602055c4ae194635d550fb76f7ebacbb2", + "format": 1 + }, + { + "name": "roles/os10_ecmp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0321e026416b7fd9c2f88159362e8d1f2b65c41beedb3f168fb92ca45f1b2d0", + "format": 1 + }, + { + "name": "roles/os10_ecmp/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_ecmp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ecmp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e9c053d2bf170109e3fa13aea5ffb03a9db7441584555c8e2c1459027838dc3", + "format": 1 + }, + { + "name": "roles/os10_snmp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b1ebf6b632207048f02fb33090cd122da7b91f64a0dda9e9ca8d28303dc972c", + "format": 1 + }, + { + "name": "roles/os10_snmp/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_snmp/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5d59a283ccc11966a31af2db9d3183d83b78266a28e26b15f976b04c66c3a38", + "format": 1 + }, + { + "name": "roles/os10_snmp/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d56b04e4466345436a0cc5a245f7f275ff94faf68fcdad220014a6f92aaffb63", + "format": 1 + }, + { + "name": "roles/os10_snmp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/templates/os10_snmp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "562362465cd1503d084678d505e51c99c039399087f9f5933c9fb1dff5b87a0b", + "format": 1 + }, + { + "name": "roles/os10_snmp/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2ec9577c90e062a77e6462626fbfb81baa3305f5f8626c7361bb8216b02f47c", + "format": 1 + }, + { + "name": "roles/os10_snmp/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c920ee69d360a4c5a573e78c544024df542232ccea968027262ad7158c641ba", + "format": 1 + }, + { + "name": "roles/os10_snmp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5eb25123570c203b04de08fc2f50a67e1f447b0f91de636915d22948e194732", + "format": 1 + }, + { + "name": "roles/os10_snmp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49b9ff32e4872ec4027678a862f12bd9f6a78180ba6ad2b5255e60c9521d5beb", + "format": 1 + }, + { + "name": "roles/os10_snmp/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_snmp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_snmp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f3be0e02ccdca4f08555041789a3a607c9fbfefcca1de808ff929d9388bdf10", + "format": 1 + }, + { + "name": "roles/os10_aaa", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7874fbb6486c3ce0f7e13cef3426f5a9941b0017674cc085cef979d511bb31ff", + "format": 1 + }, + { + "name": "roles/os10_aaa/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_aaa/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66751106dbf88e567300e2b0c835f27fdc1015a2f9da04bb1f22a0c86f4aa41e", + "format": 1 + }, + { + "name": "roles/os10_aaa/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff5f1b96c5b4ec4a3f9e611de4e239780e7f88b93293fca09ad027129957b22c", + "format": 1 + }, + { + "name": "roles/os10_aaa/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/templates/os10_aaa.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88e02f764a50d9d5131f21745d8b272717d8036f24b5aba516cee99d60b8138d", + "format": 1 + }, + { + "name": "roles/os10_aaa/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65d59ffb82e5ebc61e867465058e9d6ae1669f9650498d5d27723dd3b4ae6a62", + "format": 1 + }, + { + "name": "roles/os10_aaa/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e673f73970ee3ae369f22daa57f0a98574c0068cb8e6c3523dfe633e8be4dd8", + "format": 1 + }, + { + "name": "roles/os10_aaa/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89cbb0d6f26de9476b2a98ac433755f227dbd7cf1d4458b26f402faafcbb3044", + "format": 1 + }, + { + "name": "roles/os10_aaa/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "873deeb7c99414c615310861f37813e789beb42962a6967c74256cf3a33f15d0", + "format": 1 + }, + { + "name": "roles/os10_aaa/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_aaa/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_aaa/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0370bc97b76c060488b25ad1265ffcad188037ceac6825a22554d9a65cafbb6a", + "format": 1 + }, + { + "name": "roles/os10_logging", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67358b37eda847a70eca21ede16c1f078989a6ef26bdcb167828592620c06a01", + "format": 1 + }, + { + "name": "roles/os10_logging/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59b2a3a4d5a95dd0991c773d92efb07e68bdd61da98b63dff97cc50ee751dd6b", + "format": 1 + }, + { + "name": "roles/os10_logging/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4959bb39ef5381cb61c508070e175d9a29649865be616e2557a35591d422a578", + "format": 1 + }, + { + "name": "roles/os10_logging/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9600c0f56aab9908ce0299c93e83503a959564caaf3479c0f28c08323061f511", + "format": 1 + }, + { + "name": "roles/os10_logging/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/templates/os10_logging.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a49077c07327ca5ce6fe16c4c507b807089af3503aeceb980c7e35ba3e18a18", + "format": 1 + }, + { + "name": "roles/os10_logging/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf4eba7564bf3cf20b2a11f28b0829615f2a1b8853ef5f10432e5e9b50b22fe0", + "format": 1 + }, + { + "name": "roles/os10_logging/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b685be74eefc0378442ad4dcd4be13eb1d1549f360ef93d05e2dcd642e6a7fa1", + "format": 1 + }, + { + "name": "roles/os10_logging/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac88e43708cc564df91c1e8a2ef9f0ee476f891e9a9999cef9896e92bccf3827", + "format": 1 + }, + { + "name": "roles/os10_logging/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5dc2d0ca8bead76638828a9b3b224a4263c4e5d0391990ac876a00d6da638ec2", + "format": 1 + }, + { + "name": "roles/os10_logging/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_logging/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_logging/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "935018e6dd62d4f57fc7e8be1320db76ef26bbb3ab5008bc861c44e50703dc41", + "format": 1 + }, + { + "name": "roles/os10_copy_config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b7b814ce1af3563e014db3e85d1395bc767682a69f33a6504009cd604d65af5", + "format": 1 + }, + { + "name": "roles/os10_copy_config/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f", + "format": 1 + }, + { + "name": "roles/os10_copy_config/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16f179b8972b382463445d13f20888fcb5f5980e8b75fea8510d358e7d31355b", + "format": 1 + }, + { + "name": "roles/os10_copy_config/tests/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "roles/os10_copy_config/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/templates/leaf1.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8eb520d7a9795fe75d13de190e17201d77da82d4f04674e185dd7dfd66ff0d8", + "format": 1 + }, + { + "name": "roles/os10_copy_config/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8f7b3839a275bb1dda6cc658b7818f060276dbb1297d7ea5927fd33201ef64f", + "format": 1 + }, + { + "name": "roles/os10_copy_config/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24b3fb3075b12ef503f3c3df760955b01f8fd64d175ca9435c4cff4fc542421d", + "format": 1 + }, + { + "name": "roles/os10_copy_config/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "652608725cb8231e6e56806713e1cd1613fba1240f53702b9bddf31a1aaf1155", + "format": 1 + }, + { + "name": "roles/os10_copy_config/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee2c451634d02a007d669a1ffaafc9715f4013fcf34efb55c391f1aa76d83b41", + "format": 1 + }, + { + "name": "roles/os10_copy_config/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_copy_config/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_copy_config/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17a4f025b9e7eda4c229c8b319b2f5e342d00d2314b53fae442b4773ba83f844", + "format": 1 + }, + { + "name": "roles/os10_uplink", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af218f5633d9ad8970bc83ea4d0112996ea810232a74d35813d31a378d183357", + "format": 1 + }, + { + "name": "roles/os10_uplink/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "174f8113fac66d0c5cb0ea967a222087bb87cbdd53d157ba82224194ed89ebf7", + "format": 1 + }, + { + "name": "roles/os10_uplink/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_uplink/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "606b5622997bdf5ac5a149016a56a7d09d2e1a06ffac37650e683898958e186b", + "format": 1 + }, + { + "name": "roles/os10_uplink/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/templates/os10_uplink.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a5ef1d9820fe2a91e4bd4b4ae33b436f9e920a5c2354cc38c1c7514cdce7e7f", + "format": 1 + }, + { + "name": "roles/os10_uplink/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c18562de4e1361b781112f1d25b7085081988ed1c4ef81d4bfc3f7e0de97879", + "format": 1 + }, + { + "name": "roles/os10_uplink/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70bb18eb36b126dd6d0e08692b8ac60c823da65b388684241d263e966a521adf", + "format": 1 + }, + { + "name": "roles/os10_uplink/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28fa2ba1a0d18a45d80a2fe56bef42892b653a49c1a9e273a32c66db4b259b10", + "format": 1 + }, + { + "name": "roles/os10_uplink/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a08ced67e2d18f14c3f98a7861e7c1e90676cb98403ff9a194b18841b288632", + "format": 1 + }, + { + "name": "roles/os10_uplink/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_uplink/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_uplink/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e48425fe0a4c069ee2bcd1b326515d3d883156c8c6427ec3acc9145cd194aee0", + "format": 1 + }, + { + "name": "roles/os10_acl", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4902c3d89e393cc4678bbe45f3201578a785441d2e57b71798a4b9169a1035ec", + "format": 1 + }, + { + "name": "roles/os10_acl/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_acl/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "15d359c83967d4c8065ec202d71f08aa9cfb9cb2ea7f8515ab938298cc7176ea", + "format": 1 + }, + { + "name": "roles/os10_acl/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d1499e88988365d216eea4383d3771a7da6c7808491aedd366a64a2c26b5bd2", + "format": 1 + }, + { + "name": "roles/os10_acl/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/templates/os10_acl.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82f512798eafa8b85f27f3caeded8da7e66d80c228d9694c9fcbdbb2056b4e87", + "format": 1 + }, + { + "name": "roles/os10_acl/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32411f8c331dd03641c3843c4e633b9a186917210b3512149a6d67d8bf0ca3ee", + "format": 1 + }, + { + "name": "roles/os10_acl/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7aa31675d890ccb8f2cba267762146a856aa260fa76793226ad6d1018d767706", + "format": 1 + }, + { + "name": "roles/os10_acl/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "62b435c90d0cd1f418a44a14ea7ba9a0e3d6cce0237c404855a09899abbdde78", + "format": 1 + }, + { + "name": "roles/os10_acl/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "600f3b1f85b778b3a28616390bf94bb4cdb87fbf6bd7372680313c6465205450", + "format": 1 + }, + { + "name": "roles/os10_acl/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_acl/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_acl/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21cc5b51e05746b675b5a608ae85f7995013476dec3e92c2db809665a4aa5441", + "format": 1 + }, + { + "name": "roles/os10_route_map", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6818269013ecdd151788b3dd06bba9bc06123b904efc39f89c9738c0315e14c2", + "format": 1 + }, + { + "name": "roles/os10_route_map/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e94645bfd865f3f6ea64b8444a2a6123d65d6ffe7822eff765531ddf59fe5385", + "format": 1 + }, + { + "name": "roles/os10_route_map/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f", + "format": 1 + }, + { + "name": "roles/os10_route_map/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f11741f11a0f817750f2fcf95e5be2131344ee420991fedc314cea9a1e8a044", + "format": 1 + }, + { + "name": "roles/os10_route_map/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/templates/os10_route_map.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44398b74953c02cc8b8d52f41c6a6d8a00ef3aa5e3a6a4f0944f3eb489570678", + "format": 1 + }, + { + "name": "roles/os10_route_map/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5202f1d5be26d843f185ad282b2a9c79219f29c3e25bb9142e1c3b6aab7c0d3", + "format": 1 + }, + { + "name": "roles/os10_route_map/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b5223df0608509782741a0b1b1d620489bfee39a22271879a33be226d563e75", + "format": 1 + }, + { + "name": "roles/os10_route_map/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc5a3a60138702f492afe0615ad779ada4907a0d291ca6f9cd0db99727d5a835", + "format": 1 + }, + { + "name": "roles/os10_route_map/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "feecce89b8551e332335a6ab8f776b01bbc9a28a1646402ea4df83a78a2e2986", + "format": 1 + }, + { + "name": "roles/os10_route_map/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_route_map/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_route_map/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d27725f7db1d7be5e903da8556375be380be2e292991c95d938dcf4de4dc15fb", + "format": 1 + }, + { + "name": "roles/os10_vlt", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7edcaa0e40c1497e1cec3310108fd578cab638f4d170b06e1c7c94f279e85943", + "format": 1 + }, + { + "name": "roles/os10_vlt/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_vlt/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0a27dd63de43769b735d5b49bb2c38ff6ceee6044829802eedb26e876c6764d", + "format": 1 + }, + { + "name": "roles/os10_vlt/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f7c495a659acb9a65c70f99751b8c8a93fff3634bab273a9f5fe4a173372884b", + "format": 1 + }, + { + "name": "roles/os10_vlt/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/templates/os10_vlt.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25e140862387419273a5c85f3c2c28ef342b13d22554027854e6694ab7f676db", + "format": 1 + }, + { + "name": "roles/os10_vlt/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c826942d39bbbe07afa7ce5ec6b25dd1bee3c45973217a9a8f914c61bc15dd1d", + "format": 1 + }, + { + "name": "roles/os10_vlt/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c99502dbf1431a5fa3b9f6c33bd479eca09dc7c718747b50745904118afff4b", + "format": 1 + }, + { + "name": "roles/os10_vlt/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7084e3f87a0be344172701e0fa030f9739ebb915b6c29e6782044f64b7742348", + "format": 1 + }, + { + "name": "roles/os10_vlt/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "637efe3f2b6ae54f972d9d684751267071fef192150794156b1cf900eb9dd4ba", + "format": 1 + }, + { + "name": "roles/os10_vlt/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_vlt/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vlt/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c086a03ccf6da3e70cb9405ba5fe964a211bcbfc2ebd711ec88fa1de71d76339", + "format": 1 + }, + { + "name": "roles/os10_lldp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a7b3ecc8fb44b6654f82f849ca81d35f554aae9c9d1a668d795b0b697bd1181", + "format": 1 + }, + { + "name": "roles/os10_lldp/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_lldp/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67910b8f7cdd4a708e661f8e9a91ef97535ee5bd2361770a356b87a2a441af36", + "format": 1 + }, + { + "name": "roles/os10_lldp/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84a67763842eb34ce0679295153beed4257949d0a4a2b3591eea13e1b640be41", + "format": 1 + }, + { + "name": "roles/os10_lldp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/templates/os10_lldp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1dd15229ab1883772f6bd6e65a9fa564d88b54d3bfd979c937b11ef9e3b22190", + "format": 1 + }, + { + "name": "roles/os10_lldp/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c93bc619c8c0fecf9be10e8738df37455d98ce6ea8f99fb5b4866be24075f5b0", + "format": 1 + }, + { + "name": "roles/os10_lldp/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0555b321eefc8953cf1654c41798524e402ad24bfe4e55a3ae5f93a6cca95889", + "format": 1 + }, + { + "name": "roles/os10_lldp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a914d650108a7d69cfba2c389021498b44a53e9c963e0d4e2e4f8863f5477a3c", + "format": 1 + }, + { + "name": "roles/os10_lldp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c931d17e150116110599ed116b9e981dcdeaa25c836b8efa7f45f084b770fecb", + "format": 1 + }, + { + "name": "roles/os10_lldp/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_lldp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_lldp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "096051fa88a0f1150e45bee7378a41b58710ee89aec049e7368282325db779b1", + "format": 1 + }, + { + "name": "roles/os10_vrrp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a7753705b354037b6b52c76d086328a60cf18b120f54d509e01a9c7ea4f2431", + "format": 1 + }, + { + "name": "roles/os10_vrrp/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_vrrp/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37bcf358ae65745173b3e91dd31f63b92e3180bd680546149c50a24ccfefdfb9", + "format": 1 + }, + { + "name": "roles/os10_vrrp/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e0c9f92417513f3c21cb535c11e39a91156a37b676e9ae02639b54f5bcb9d3b", + "format": 1 + }, + { + "name": "roles/os10_vrrp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/templates/os10_vrrp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea569e3efbe5591cee54852a1c4e93bdaa54e9affd4126a2c37dcdd228584e48", + "format": 1 + }, + { + "name": "roles/os10_vrrp/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f89da38e4c4c6dc6f4fdc65d71e0c326b53e53ab28a342e6d0c4838ecb5a0e14", + "format": 1 + }, + { + "name": "roles/os10_vrrp/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ab26d1ff9cb52daa1da7d36ab72fbd9031c0cee28a4a9f2cd402e3ac61ff9f3", + "format": 1 + }, + { + "name": "roles/os10_vrrp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c31a893d2de4391bab63960327186e0030eb3b50d0d454a202b9ad32bf8c1605", + "format": 1 + }, + { + "name": "roles/os10_vrrp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7c3d38e722d0113e681e5bf451fdbf787b772038dab654419eab455874e6490", + "format": 1 + }, + { + "name": "roles/os10_vrrp/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_vrrp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vrrp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc6092e9bba6056bce1edf31b3b1d8ddd50562b4595a05b4714364a289fc3f54", + "format": 1 + }, + { + "name": "roles/os10_network_validation", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_network_validation/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de7a336064a9cc726f30e55aa0c8f93426a7a30976ec9f8993268ad433c815d1", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/group_vars/all", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9783ff2c833041285b826cdd8818ff86e2dd4a036936057c2b786f90c5d94cfd", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "495543a435f037536ce83135c1c057b29dad18496dcec544d3d0d575412f8e57", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42f12cce1d2096bd6704d0bded92f0f424f20f6f8fb6bfd43f512ea167a3d535", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/host_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/host_vars/site1-spine2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/host_vars/site2-spine1", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/host_vars/site2-spine2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tests/host_vars/site1-spine1", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_network_validation/tasks/wiring_validation.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30189cda8538ba54cce476840c54dbd7b65bae20b81fbe38b5cdd490d267121a", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c93acda8436ce1b6b8ae69b6e0856172c1d4442c95ad26510f6b091511463030", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tasks/mtu_validation.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e88ffb1bdbc1d5111fd4dbcaef9d05122d130f3d92686bc1476cfe8b92d6ed88", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tasks/bgp_validation.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f231237fccf6b555cb5d603fa5ecb9a2520046052102546d0a49745c322516f", + "format": 1 + }, + { + "name": "roles/os10_network_validation/tasks/vlt_validation.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc2caadd4b8c67806553d9a09767879e2705cb87b14561821761170b04aae114", + "format": 1 + }, + { + "name": "roles/os10_network_validation/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_network_validation/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_network_validation/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "15dd45f3c6b3ef0b9a18068409f2fa06e7658ffa36fc536c61272f60cee19e6a", + "format": 1 + }, + { + "name": "roles/os10_xstp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1aa9f3f4225408e0224d825d03529cbf75a5aa95cdb9e2d48fff649122a4d488", + "format": 1 + }, + { + "name": "roles/os10_xstp/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11dc4d3a7f27c9457cb1926ffb4b6941a5698b00693270702b83007f9e6be475", + "format": 1 + }, + { + "name": "roles/os10_xstp/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_xstp/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbb0ce314389f1a1c62bd36eed69284476da8bdc35ca9f166e524da51a81bab1", + "format": 1 + }, + { + "name": "roles/os10_xstp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/templates/os10_xstp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91106569ca0f82c69b3987666f2c0988e065ba8c72e6619f1eea38024d7bb459", + "format": 1 + }, + { + "name": "roles/os10_xstp/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d104c9a45496afdc6fc6b1b6d0598bc7ba3fa9490bbbb0729aea0721ad6d9ed1", + "format": 1 + }, + { + "name": "roles/os10_xstp/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93faab9ab389ce95078149d446f7393e2f51b15ae26e4bc9e375b18e8f785ae3", + "format": 1 + }, + { + "name": "roles/os10_xstp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de9efdcd059596d2c0022a19a60bda4e148339b8903195327c22898b690851bf", + "format": 1 + }, + { + "name": "roles/os10_xstp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bdd9427e1adf15da19c43299ee9e0a80cb614aa975bda7d26c45d87f3853bfc", + "format": 1 + }, + { + "name": "roles/os10_xstp/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_xstp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_xstp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5ac22166270a0ff74c299d9fc58db9a5775da1c050b2750e21db38e7688f573", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9fc5caa466c30af8e20e94c890166472ad45e241e5a6c5e5cd8fb8e91789ab5", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6acee5a1485b2936d50918c4eeeaf19e935b16a71eb313966613d5f7474af1d6", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/tests/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5cc592c390f2fdb1b4d2522bd9d3ea1ef44c70f3c93b8f21dcccda6dacf8886", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e87e5811ae377b12e4ab55e05748fd9d633d451d50ddb27b944d35b3391030f", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fae119a8a8b829c96158bb18b129995bbd81bb1f4ce2816a56ebab6e5046dd7b", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f0fa971f5e8f6afc6354c45a243da50223bc28a3bd23544da6029d0ccf379b0", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4a1807cbb9bfb1bf7d5236ead8df960f56b308a96c7d427bf9c751f2d59189d", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_image_upgrade/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "600ddfdb6ea8958552d6f3c4883ff14e03a03c5baa74fed28c824c69853a630a", + "format": 1 + }, + { + "name": "roles/os10_vxlan", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3215dc7ae719682ff7edbfe9ee0dfcabf348cd9a47d59303abb23fa15ac48db", + "format": 1 + }, + { + "name": "roles/os10_vxlan/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93ecaaa721e06f9dfe6714065578aab69faf978ec3d0a052a65dd42bbe45375d", + "format": 1 + }, + { + "name": "roles/os10_vxlan/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f488468fc00c1ebbcd411bccfb79c4c3f969cb4798f545ff04bed1ff9f060295", + "format": 1 + }, + { + "name": "roles/os10_vxlan/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_vxlan/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/templates/os10_vxlan.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8c620c88dad78855c1d75470657add6b6945ae5033e7e365f6ca61eabf85cfe", + "format": 1 + }, + { + "name": "roles/os10_vxlan/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b18d0a5d4f4eb08afb13a90bef216e62f14084a7c15c6fd57b2571d2202c11c", + "format": 1 + }, + { + "name": "roles/os10_vxlan/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86f729a523490d39e8de987c937683b579d9fc5e2c02aad682fd5144d8da243f", + "format": 1 + }, + { + "name": "roles/os10_vxlan/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b05d46988f0fe03752627f491afbfaf43beb317b498dbfcc186fd51cf9f116b", + "format": 1 + }, + { + "name": "roles/os10_vxlan/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04323331ccbbbd6f0ed18972b31c11feef04c2fc82f73c276cf49b4c52bba17d", + "format": 1 + }, + { + "name": "roles/os10_vxlan/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_vxlan/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_vxlan/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08bd15c1af2e5b93adb0c287cd8dbfb89a99c104330532596905cefd633378a7", + "format": 1 + }, + { + "name": "roles/os10_users", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bdd33c3a3d4970f86670be7d55ae6e36afe1940c2bede9f7a3c3d64f16fb84ff", + "format": 1 + }, + { + "name": "roles/os10_users/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_users/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3cad1a913d91373fb343a45090749edccff3e614d29fc20b2be5dd62f1416839", + "format": 1 + }, + { + "name": "roles/os10_users/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45cb5340cf0daeaa88ce3ef96d6b2f729a9587614b4f2da6d018c01c7007ed05", + "format": 1 + }, + { + "name": "roles/os10_users/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/templates/os10_users.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a49fd0f145f2397a5320999f4fd2653d123355bd09b7f7f9fc7b08491530d7f", + "format": 1 + }, + { + "name": "roles/os10_users/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54960fc84fea4a3ca7b680da71f0caf5673ed6cbfe2848b6896e1e09d80daf78", + "format": 1 + }, + { + "name": "roles/os10_users/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5143d119661a1b293a965db175a3e495924ba56ae668293c393f79803987a9ad", + "format": 1 + }, + { + "name": "roles/os10_users/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7ab0972fa0d373e24e87fd2a37bf2d364a35d608c968a8ba3855775c96f7025", + "format": 1 + }, + { + "name": "roles/os10_users/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e757c050d438aa41fd3489ca030b2505af98aa7fa58efcb2af15af9e49f86679", + "format": 1 + }, + { + "name": "roles/os10_users/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_users/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_users/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51e8bbd65c9e3c0392c9cb365413149869fae597d2ed0d2d1f12b87e39f6ed91", + "format": 1 + }, + { + "name": "roles/os10_ntp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2cbcf9c9aa3c627804600ca4c6c09b75ab55111fac01ecbc3b108690f0f9d8e", + "format": 1 + }, + { + "name": "roles/os10_ntp/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f", + "format": 1 + }, + { + "name": "roles/os10_ntp/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a8f0e26fd497d4af28b1f97246262a24e8e890e0b37c6e5045a7e59a75b1027", + "format": 1 + }, + { + "name": "roles/os10_ntp/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bca5f97127de4c4fc5386d8a971e8ce5942b6e1e615eb074a85f84ae3922e2fa", + "format": 1 + }, + { + "name": "roles/os10_ntp/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/templates/os10_ntp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b01e6bab24ab07001d0041aec39b960adb7f5ae3380576cbdb4d8c5ead17615", + "format": 1 + }, + { + "name": "roles/os10_ntp/templates/os10_ntp_vrf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02bacaed5bb5d10ec65bf0ea092aa908d390c2aed8f33965f3ba5aa534fae551", + "format": 1 + }, + { + "name": "roles/os10_ntp/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6968045572c03408c0ae667b3e3d4bd618d7e44ef3569dc6afbb2f64c8aed3b", + "format": 1 + }, + { + "name": "roles/os10_ntp/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e30177d599fcae1cd2a5ae7dbe5edb7ae61b54fe8abc8a4a291f7cb2e99cb04", + "format": 1 + }, + { + "name": "roles/os10_ntp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e8c0b69d2ba07f1e7036c37c35e9450cb1a1713f5bf06e67ae55049a47f498e", + "format": 1 + }, + { + "name": "roles/os10_ntp/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba28cab4f617eea38d6a9572d6600b035bf3b0aa6a9ea1f16b89c3bca0bd5fef", + "format": 1 + }, + { + "name": "roles/os10_ntp/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_ntp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_ntp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "119e95d884e66b8699ce7cfb245ef9a295d0ad74f611e91bde10b8de1bf6b6c6", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30e96ae0283581b1dc3af2c83de6d1f3ef986c352b0f1aa30b3bcd84f3e0e82f", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/tests/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9521ae72a98e24c998e5ca8b57b37b6387fb46ea98bce475c300f7a998f5713", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f522718931e7b47c3e6d7f02a4c31981aa93f71729415a5a9226b098211fd167", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/templates/os10_flow_monitor.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce701f3f7435a76af19d25fb2c0aef0629baea31f100fca4ffb2d715fa330cdc", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da543209c815eaf6a292b7e3ba6b251d12218c7ac85973eeb5f347fc23f38d82", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c69bd67ade82f83cb462931176842e1c15d45d254ed2867937539fde9240056", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c4fb0eb22d434434842ef2e9f7a66cd480485a13ddedb7cbe4736776ee2660d", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d75f9ff91119f5b2c00a015c32634aef88d63f56446b8a49611baa94950a7109", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_flow_monitor/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262699aa3cc3f5ca73c44807c385bafe6ace198d295ae0f4d8174c8432d27a30", + "format": 1 + }, + { + "name": "roles/os10_raguard", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6083750b9289d4ab598aef8b07e4e2460b7c5c1c17e15196fa1389a8346e33a8", + "format": 1 + }, + { + "name": "roles/os10_raguard/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/tests/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f", + "format": 1 + }, + { + "name": "roles/os10_raguard/tests/main.os10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4249ee5e0837a3608674e0ddf5fe786819ff406046561c36d37faf53835a3c15", + "format": 1 + }, + { + "name": "roles/os10_raguard/tests/test.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "897b50f07fd83fdc9d4af3d1f95736df7de6587ecee7eb99656e896f11e57673", + "format": 1 + }, + { + "name": "roles/os10_raguard/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/templates/os10_raguard.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45b2a163a3a6f4ed91a04eaf1eaf163744dc521e68bcc652f5b9204c8680ccff", + "format": 1 + }, + { + "name": "roles/os10_raguard/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d6921abe30008182bc51398fb297b2e3513e74192f97e5425270f274ee10a72", + "format": 1 + }, + { + "name": "roles/os10_raguard/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f107b00d4f91ef0488f02bad76bcabf32e37d36ac7dc4b3e13eacd93d7a2ddf", + "format": 1 + }, + { + "name": "roles/os10_raguard/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5076cf62b3808abcd72b0591f6dd79a0edfa2e24506c3c24a54ebeac0b52fc46", + "format": 1 + }, + { + "name": "roles/os10_raguard/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba3c9242ffb63fbdee8e94263256c4607963147bc30edb716dbbd19760e3a000", + "format": 1 + }, + { + "name": "roles/os10_raguard/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "roles/os10_raguard/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/os10_raguard/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a865bdbfcce0522277b74b9312d4264b3ed3b692d22a6bffd740a8c8cc021fa4", + "format": 1 + }, + { + "name": "docs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/os10_bgp.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c0330969b4488def090626fd3ca345b0d5fd7d13024b8f7bc5d099a4d61bf5f", + "format": 1 + }, + { + "name": "docs/os10_image_upgrade.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9fc5caa466c30af8e20e94c890166472ad45e241e5a6c5e5cd8fb8e91789ab5", + "format": 1 + }, + { + "name": "docs/os10_vrrp.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a7753705b354037b6b52c76d086328a60cf18b120f54d509e01a9c7ea4f2431", + "format": 1 + }, + { + "name": "docs/os10_qos.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47508b2209d8933ede8aa881d3e42507baf52088fdcf1682dff4cb3dbacd2409", + "format": 1 + }, + { + "name": "docs/os10_logging.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67358b37eda847a70eca21ede16c1f078989a6ef26bdcb167828592620c06a01", + "format": 1 + }, + { + "name": "docs/os10_vxlan.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3215dc7ae719682ff7edbfe9ee0dfcabf348cd9a47d59303abb23fa15ac48db", + "format": 1 + }, + { + "name": "docs/os10_uplink.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af218f5633d9ad8970bc83ea4d0112996ea810232a74d35813d31a378d183357", + "format": 1 + }, + { + "name": "docs/roles.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f91d993dbc32b3691c9e2f79d51d67eda1493d0f0cea841a3cc2816fe1ca724", + "format": 1 + }, + { + "name": "docs/os10_vlan.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba6599c1a2ac34d0361638276f081621d8b31823c3fa4d01fe952a469c3a5692", + "format": 1 + }, + { + "name": "docs/os10_prefix_list.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b53872392d389fff12d93f0f89a85893c3a4dff81b7a29cc40072ad487734183", + "format": 1 + }, + { + "name": "docs/os10_system.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6f9fa3aebc2738e4c3f5f3a49fedfa92575553a0bf93a101a76967bc63954bb", + "format": 1 + }, + { + "name": "docs/os10_interface.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aeddd44f2c7f6d17211606d02555416c6bb3f3319bbff45ea634c665097715fa", + "format": 1 + }, + { + "name": "docs/os10_ntp.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2cbcf9c9aa3c627804600ca4c6c09b75ab55111fac01ecbc3b108690f0f9d8e", + "format": 1 + }, + { + "name": "docs/os10_ecmp.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48f5c4db66fc2f0b993ee1f4fbd40997a402a2dc43760becb694bee7af60e02e", + "format": 1 + }, + { + "name": "docs/os10_bfd.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9155aa75d45da527a150711f44a5d46195a07761fdc2326da9ce389c4f6bac6", + "format": 1 + }, + { + "name": "docs/os10_fabric_summary.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d07249dc4347118c841ed0740b1c8eea0351d593f943fdb1922306173842f7e", + "format": 1 + }, + { + "name": "docs/os10_vlt.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7edcaa0e40c1497e1cec3310108fd578cab638f4d170b06e1c7c94f279e85943", + "format": 1 + }, + { + "name": "docs/os10_flow_monitor.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30e96ae0283581b1dc3af2c83de6d1f3ef986c352b0f1aa30b3bcd84f3e0e82f", + "format": 1 + }, + { + "name": "docs/os10_vrf.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "563a9cadb1c7ce0ecb1f62f033c3959342805be145dc20f6b1bf81c23b2ed412", + "format": 1 + }, + { + "name": "docs/os10_xstp.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1aa9f3f4225408e0224d825d03529cbf75a5aa95cdb9e2d48fff649122a4d488", + "format": 1 + }, + { + "name": "docs/os10_template.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d311ae7470c42f2f31a7d688121e3ba8b240afe5fa917d4ab2b4fe99338055e", + "format": 1 + }, + { + "name": "docs/os10_lag.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38b04b2c00265af0b32b43f8e0057565e818e088401545600b1476ab6f090fb8", + "format": 1 + }, + { + "name": "docs/os10_dns.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d7a878dd74478a8e5a90b8f365f0d158ba08b1044984d6ad9d375314cb25f08", + "format": 1 + }, + { + "name": "docs/dellemc.os10.os10_config_module.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6267acb8f9cfb15edcc55f99507216be10e2e434a03c3427f4a5e1aad2b522c", + "format": 1 + }, + { + "name": "docs/os10_snmp.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b1ebf6b632207048f02fb33090cd122da7b91f64a0dda9e9ca8d28303dc972c", + "format": 1 + }, + { + "name": "docs/os10_acl.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4902c3d89e393cc4678bbe45f3201578a785441d2e57b71798a4b9169a1035ec", + "format": 1 + }, + { + "name": "docs/os10_users.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bdd33c3a3d4970f86670be7d55ae6e36afe1940c2bede9f7a3c3d64f16fb84ff", + "format": 1 + }, + { + "name": "docs/os10_network_validation.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de7a336064a9cc726f30e55aa0c8f93426a7a30976ec9f8993268ad433c815d1", + "format": 1 + }, + { + "name": "docs/os10_copy_config.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b7b814ce1af3563e014db3e85d1395bc767682a69f33a6504009cd604d65af5", + "format": 1 + }, + { + "name": "docs/os10_route_map.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6818269013ecdd151788b3dd06bba9bc06123b904efc39f89c9738c0315e14c2", + "format": 1 + }, + { + "name": "docs/dellemc.os10.os10_command_module.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29c5d8db29a5ff5c8008dc598adca8e4c9d242fddfd9c94ac20bf1c166174dea", + "format": 1 + }, + { + "name": "docs/os10_aaa.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7874fbb6486c3ce0f7e13cef3426f5a9941b0017674cc085cef979d511bb31ff", + "format": 1 + }, + { + "name": "docs/dellemc.os10.os10_facts_module.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c5627d7d019b5a96cd19c3b494538b54c8571a30678dbe29b1baa53040326bb", + "format": 1 + }, + { + "name": "docs/os10_lldp.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a7b3ecc8fb44b6654f82f849ca81d35f554aae9c9d1a668d795b0b697bd1181", + "format": 1 + }, + { + "name": "docs/os10_raguard.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6083750b9289d4ab598aef8b07e4e2460b7c5c1c17e15196fa1389a8346e33a8", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96780e8d019276e8fa9c7d937c88a841028c21adb67a02e0e0b239f26c870ace", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5dbc7f223c6b79feee444ffa052b620913771b52077a69bbe78a93c002ccf67", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "573c974da3352bb7318029687993489295ce119ca08138a99d7c1add75f698f4", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/ansible-test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "372cc6f489143cc505aa1b52c38cd04bc4b6487c3ce5bf613e4b3d4e32a053fc", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/os10.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9cd210947b85bbb3c3270a239d73c3e6fe6a5f0e16bc77326eb179a9486593e8", + "format": 1 + }, + { + "name": "plugins/action", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/action/os10.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d13cba843ee5563bec94849b9255cab43db98ee3369f553bc360ec3d9070513", + "format": 1 + }, + { + "name": "plugins/action/textfsm_parser.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e229a5a0eac4493c04425e825ab50945d7eda4a6bee56a3485dea640f7d209b", + "format": 1 + }, + { + "name": "plugins/cliconf", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/cliconf/os10.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fde5ba9d659d487322a300ecb3a433a5a522c4ec2a5383be1c6a661797ea033", + "format": 1 + }, + { + "name": "plugins/terminal", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/terminal/os10.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9633af38e10311856b00de1aee3be7c95c986fb25e6d20cd7827eaa2614d4974", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/vlt_validate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a31a1b1341a6cfb695af05233b5d29ef1cef8cc35c945e24b716a7d1445101a2", + "format": 1 + }, + { + "name": "plugins/modules/base_xml_to_dict.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ce7d403e2dc3e0090faed6853efcf0c3a7de4ff866d140e51839cf54b80efcf", + "format": 1 + }, + { + "name": "plugins/modules/wiring_validate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60306438476482dbf7df288c76a2c5a61a478c4f8e0b99ee2b6218f775153648", + "format": 1 + }, + { + "name": "plugins/modules/os10_command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ee1bda2f8719396ced20d4fae9d3c15536ed0467190260e8870f256926c0454", + "format": 1 + }, + { + "name": "plugins/modules/os10_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5cd81545aafbd4dc8eb8c08f6067c64059385f14521fad36000b755ff54c9089", + "format": 1 + }, + { + "name": "plugins/modules/mtu_validate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e8d0d7bceb4b16c36793c20304cb828274c34e82dcbc00eea80feeabb16fc73", + "format": 1 + }, + { + "name": "plugins/modules/show_system_network_summary.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb4c713221503a9a726f74a8d2b54b248b345f554bbe271ef513a32c38b4f56a", + "format": 1 + }, + { + "name": "plugins/modules/bgp_validate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cae91228b6226f4c6fa5e00b9dba14942075250061f8011ee144a207bb9bddd4", + "format": 1 + }, + { + "name": "plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/modules/os10_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0693b1edece924d6c8d055acc94e6cc5cb65fcd8b9ea96175fab3257fbbc9c99", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/network/os10.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0827b94850fda014c103f0c9cd3e2e80bab71bce2dfdef7c2224d62f2a1d60d", + "format": 1 + }, + { + "name": "plugins/module_utils/network/base_network_show.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f0291c93d3829d2e1c78e1bca681691332e48ca6fb0fabcf1ae35ad378ea89e", + "format": 1 + }, + { + "name": "plugins/module_utils/network/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/group_vars/all", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "602987ea665844bebae77f58df9682a1e015edb5d18b7ea9259a8209dd08994f", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/group_vars/spine.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9410c561579c34b35fc5239aab55350e3472d93aa0d0cef5dd4438b4b5ced91d", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/datacenter.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2627623980960381195863f4fee460eec8d778be76a096b54857a06e97df23bd", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8d7d7db2043af3d64bc829dc8fdb9b6c59826ccf0d75c1de55efc48260c89bb", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1dd91b341f01a3a4f397ef739ca40c87c824d07a39038d8ecf293bed2a2d9b8c", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/host_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50e9c43b204e412be5cba64f520bbb8bc1d6badfd418729149992494b78cb2ad", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f749957a827032a0c6077a83d1f709af736596e68ee9e6e33c5ce2abd928db91", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8696e7b5d612c1f16cc7a0bdbed960a6258baaade83ff7e6cfa55f2fc100e49", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/host_vars/spine2.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f795b650bb0a1b6360bef9a63be4ff553cf0ca4496ccfc5d2783247853ee218a", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/host_vars/spine1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86b4b5ae09553480001d5c459bbbc0c8f188029dfb5f88afdf3e3be6c7466304", + "format": 1 + }, + { + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acb008b56cdc437f69f9f95c4cfee68999e573fa6238f91d28597725f7f6f5be", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/datacenter.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01512d9b68a28d1f437b669edeaa3139209fc605f137b80d9b70c38eda29bf8d", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4171c46c9d8296384d9f2d944195e4bea20be0eb9d84c759490bf4dfe40ce3c", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/vxlan_evpn_topology.png", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1eb1121f60229ce9d7ffde2625a6d10b3d5bae314f888172bef932a8f1a1713a", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/inventory.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d7aa907659dffc2df8f9788f353f04081c1853ff959378c4facc618ac1f686c", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/host_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e581cc10cd5569e8e188acbdee765b32a143f3c517540cbe314c1f99092f8d5", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e86083141c836c8ea7bc9643cd7f47c5e7241fff2288efa9bac68f24b9d9a53f", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/host_vars/spine2.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4cb8ed1a4f629eac44ae9577abcf284b918d531b958338fd04a2a42f40488339", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1193c59b99f6954c1fe0098d2c31191802bf4ee697d1b3b4c0e240a45e42d9d", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/host_vars/spine1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6516ef2547d0aaced10ec03e730bebe67f2432f43797d4c2037dbc912544b96", + "format": 1 + }, + { + "name": "playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe94c0a6f1b5657c1a24373f117a6d71300dfdd2b3b2fcc513df934318e1ff4a", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e89e29376e2d83040beda97f813eadab01df7697a7e9594ca89537d54f95214c", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/LICENSE b/ansible_collections/dellemc/os10/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/MANIFEST.json b/ansible_collections/dellemc/os10/MANIFEST.json new file mode 100644 index 00000000..6bed5d7a --- /dev/null +++ b/ansible_collections/dellemc/os10/MANIFEST.json @@ -0,0 +1,38 @@ +{ + "collection_info": { + "namespace": "dellemc", + "name": "os10", + "version": "1.1.1", + "authors": [ + "Parameswaran Krishnamurthy ", + "Senthil Ganesan Ganesan ", + "Shreeja R " + ], + "readme": "README.md", + "tags": [ + "dell", + "dellemc", + "os10", + "emc", + "networking" + ], + "description": "Ansible Network Collection for Dell EMC SmartFabric OS10", + "license": [], + "license_file": "LICENSE", + "dependencies": { + "ansible.netcommon": ">=1.0.0" + }, + "repository": "https://github.com/ansible-collections/dellemc.os10", + "documentation": "https://github.com/ansible-collections/dellemc.os10/tree/master/docs", + "homepage": "https://github.com/ansible-collections/dellemc.os10", + "issues": "https://github.com/ansible-collections/dellemc.os10/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afa56fa2200f773fcafd6a3cd52df1affca39c600c356d3bccb09fcc1b92bdfd", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/README.md b/ansible_collections/dellemc/os10/README.md new file mode 100644 index 00000000..942dda9c --- /dev/null +++ b/ansible_collections/dellemc/os10/README.md @@ -0,0 +1,93 @@ +# Ansible Network Collection for Dell EMC SmartFabric OS10 + +### Collection contents +This collection includes Ansible modules, plugins and roles needed to provision and manage Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. Sample playbooks and documentation are also included to show how the collection can be used. + +### Collection core modules + +Name | Description +--- | --- +[os10_command](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/dellemc.os10.os10_command_module.rst)| Run commands on devices running OS10 +[os10_config](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/dellemc.os10.os10_config_module.rst)| Manage configuration on devices running OS10 +[os10_facts](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/dellemc.os10.os10_facts_module.rst)| Collect facts from devices running OS10 + +### Collection roles +These roles facilitate the provisioning and administration of devices running SmartFabric OS10. There are over 30 roles available that provide a comprehensive coverage of most OS10 resources, including *os10_interface*, *os10_acl*, *os10_bgp*, and *os10_vxlan*. The documentation for each role is located at [OS10 roles](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/roles.rst). + +### Sample use case playbooks +This collection includes these sample playbooks that illustrate end-to-end use cases. + +- [CLOS fabric](https://github.com/ansible-collections/dellemc.os10/blob/master/playbooks/clos_fabric_ebgp/README.md) — Example playbook to build a Layer 3 leaf-spine fabric + +- [VXLAN EVPN](https://github.com/ansible-collections/dellemc.os10/blob/master/playbooks/vxlan_evpn/README.md) — Example playbook to build a scale-out leaf-spine topology using VxLAN, BGP EVPN, and symmetric-IRB + +## Installation +Use this command to install the latest version of the OS10 collection from Ansible Galaxy. + + ansible-galaxy collection install dellemc.os10 + +To install a specific version, a version range identifier must be specified. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0. + + ansible-galaxy collection install 'dellemc.os10:>=1.0.0,<2.0.0' + +## Version compatibility +* Ansible version 2.9 or later. +* Python 3.5 or higher, or Python 2.7 + +### Using in Ansible version 2.9 +> **NOTE**: This step is not required for Ansible version 2.10 or later. + +To use this collection in Ansible version 2.9 it is required to set the below environment variable while running the playbook. + + ANSIBLE_NETWORK_GROUP_MODULES=os10 + +It can be set permanently in *ansible.cfg* with variable *network_group_modules* under the *defaults* section. + +**ansible.cfg** + + [defaults] + network_group_modules=os10 + +> **NOTE**: For Ansible versions lower than 2.9, use the legacy [dellos10 modules](https://ansible-dellos-docs.readthedocs.io/en/latest/modules.html#os10-modules) and [dellos roles](https://ansible-dellos-docs.readthedocs.io/en/latest/roles.html). + +## Sample playbook + +**playbook.yaml** + + - hosts: os10_switches + connection: network_cli + collections: + - dellemc.os10 + roles: + - os10_vlan + +**host_vars/os10_sw1.yaml** + + hostname: os10_sw1 + # Parameters for connection type network_cli + ansible_ssh_user: xxxx + ansible_ssh_pass: xxxx + ansible_network_os: dellemc.os10.os10 + + # Create vlan100 and delete vlan888 + os10_vlan: + vlan 100: + description: "Blue" + state: present + vlan 888: + state: absent + +**inventory.yaml** + + [os10_sw1] + os10_sw1 ansible_host=100.104.28.119 + + [os10_sw2] + os10_sw2 ansible_host=100.104.28.120 + + [os10_switches:children] + os10_sw1 + os10_sw2 + + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst new file mode 100644 index 00000000..367cb585 --- /dev/null +++ b/ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst @@ -0,0 +1,116 @@ +====================================================================== +Ansible Network Collection for Dell EMC SmartFabric OS10 Release Notes +====================================================================== + +.. contents:: Topics + + +v1.1.1 +====== + +Minor Changes +------------- + +- Adding support for Ansible version 2.9 (https://github.com/ansible-collections/dellemc.os10/pull/58) + +v1.1.0 +====== + +Major Changes +------------- + +- os10_bgp - Enhanced router bgp keyword support for non-default vrf which are supported for default vrf and additional keyword to support both default and non-default vrf +- os10_snmp role - Added support for snmp V3 features in community, group, host, engineID + +Minor Changes +------------- + +- Enhanced os10_bgp role to support internal BGP redistribution under address-family for V4 and V6 +- Enhanced os10_bgp role to support maximum-prefix configuration under BGP peer and peer-group. +- os10_ntp role - Added support for vrf and sha1 and sha2-256 authentication-key types +- os10_snmp role - Added support for source-interface and vrf +- os10_template - add template for show spanning tree compatibility mode +- os10_template - add template for show vlt error disabled ports +- os10_uplink role - Added support for downstream disable-links and auto-recover + +Breaking Changes / Porting Guide +-------------------------------- + +- os10_bgp - Changed "subnet" key as list format instead of dictionary format under "listen" key to support multiple neighbor prefix for listen command +- os10_bgp - Changed "vrf" key as list format instead of dictionary format to supprot multiple VRF in router BGP and changed the "vrf" key name to "vrfs" + +Bugfixes +-------- + +- Fixed issue in using interface range in os10_vlan members. (https://github.com/ansible-collections/dellemc.os10/issues/53) + +v1.0.2 +====== + +Bugfixes +-------- + +- Fix issue in using ip_and_mask along with members in os10_vlan role (https://github.com/ansible-collections/dellemc.os10/issues/42) +- Fix issue in using list of strings for `commands` argument for `os10_command` module (https://github.com/ansible-collections/dellemc.os10/issues/43) +- Fixed os10_vlan role idempotency issue with description and members (https://github.com/ansible-collections/dellemc.os10/issues/46) + +v1.0.1 +====== + +Release Summary +--------------- + +Added changelog. + +v1.0.0 +====== + +Major Changes +------------- + +- New role os10_aaa - Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server. +- New role os10_acl - Facilitates the configuration of Access Control lists. +- New role os10_bfd - Facilitates the configuration of BFD global attributes. +- New role os10_bgp - Facilitates the configuration of border gateway protocol (BGP) attributes. +- New role os10_copy_config - This role pushes the backup running configuration into a OS10 device. +- New role os10_dns - Facilitates the configuration of domain name service (DNS). +- New role os10_ecmp - Facilitates the configuration of equal cost multi-path (ECMP) for IPv4. +- New role os10_fabric_summary Facilitates to get show system information of all the OS10 switches in the fabric. +- New role os10_flow_monitor Facilitates the configuration of ACL flow-based monitoring attributes. +- New role os10_image_upgrade Facilitates installation of OS10 software images. +- New role os10_interface Facilitates the configuration of interface attributes. +- New role os10_lag Facilitates the configuration of link aggregation group (LAG) attributes. +- New role os10_lldp Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. +- New role os10_logging Facilitates the configuration of global logging attributes and logging servers. +- New role os10_network_validation Facilitates validation of wiring connection, BGP neighbors, MTU between neighbors and VLT pair. +- New role os10_ntp Facilitates the configuration of network time protocol (NTP) attributes. +- New role os10_prefix_list Facilitates the configuration of IP prefix-list. +- New role os10_qos Facilitates the configuration of quality of service attributes including policy-map and class-map. +- New role os10_raguard Facilitates the configuration of IPv6 RA Guard attributes. +- New role os10_route_map Facilitates the configuration of route-map attributes. +- New role os10_snmp Facilitates the configuration of global SNMP attributes. +- New role os10_system Facilitates the configuration of hostname and hashing algorithm. +- New role os10_template The role takes the raw string input from the CLI of OS10 device, and returns a structured text in the form of a Python dictionary. +- New role os10_uplink Facilitates the configuration of uplink attributes like uplink-state group. +- New role os10_users Facilitates the configuration of global system user attributes. +- New role os10_vlan Facilitates the configuration of virtual LAN (VLAN) attributes. +- New role os10_vlt Facilitates the configuration of virtual link trunking (VLT). +- New role os10_vrf Facilitates the configuration of virtual routing and forwarding (VRF). +- New role os10_vrrp Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes. +- New role os10_vxlan Facilitates the configuration of virtual extensible LAN (VXLAN) attributes. +- New role os10_xstp Facilitates the configuration of xSTP attributes. + +New Plugins +----------- + +Cliconf +~~~~~~~ + +- os10 - Use OS10 cliconf to run commands on Dell EMC PowerSwitch devices. + +New Modules +----------- + +- os10_command - Run commands on devices running Dell EMC SmartFabric OS1O. +- os10_config - Manage configuration on devices running OS10. +- os10_facts - Collect facts from devices running OS10. diff --git a/ansible_collections/dellemc/os10/changelogs/changelog.yaml b/ansible_collections/dellemc/os10/changelogs/changelog.yaml new file mode 100644 index 00000000..334a1e5f --- /dev/null +++ b/ansible_collections/dellemc/os10/changelogs/changelog.yaml @@ -0,0 +1,124 @@ +ancestor: null +releases: + 1.0.0: + changes: + major_changes: + - New role os10_aaa - Facilitates the configuration of Authentication Authorization + and Accounting (AAA), TACACS and RADIUS server. + - New role os10_acl - Facilitates the configuration of Access Control lists. + - New role os10_bfd - Facilitates the configuration of BFD global attributes. + - New role os10_bgp - Facilitates the configuration of border gateway protocol + (BGP) attributes. + - New role os10_copy_config - This role pushes the backup running configuration + into a OS10 device. + - New role os10_dns - Facilitates the configuration of domain name service (DNS). + - New role os10_ecmp - Facilitates the configuration of equal cost multi-path + (ECMP) for IPv4. + - New role os10_fabric_summary Facilitates to get show system information of + all the OS10 switches in the fabric. + - New role os10_flow_monitor Facilitates the configuration of ACL flow-based + monitoring attributes. + - New role os10_image_upgrade Facilitates installation of OS10 software images. + - New role os10_interface Facilitates the configuration of interface attributes. + - New role os10_lag Facilitates the configuration of link aggregation group + (LAG) attributes. + - New role os10_lldp Facilitates the configuration of link layer discovery protocol + (LLDP) attributes at global and interface level. + - New role os10_logging Facilitates the configuration of global logging attributes + and logging servers. + - New role os10_network_validation Facilitates validation of wiring connection, + BGP neighbors, MTU between neighbors and VLT pair. + - New role os10_ntp Facilitates the configuration of network time protocol (NTP) + attributes. + - New role os10_prefix_list Facilitates the configuration of IP prefix-list. + - New role os10_qos Facilitates the configuration of quality of service attributes + including policy-map and class-map. + - New role os10_raguard Facilitates the configuration of IPv6 RA Guard attributes. + - New role os10_route_map Facilitates the configuration of route-map attributes. + - New role os10_snmp Facilitates the configuration of global SNMP attributes. + - New role os10_system Facilitates the configuration of hostname and hashing + algorithm. + - New role os10_template The role takes the raw string input from the CLI of + OS10 device, and returns a structured text in the form of a Python dictionary. + - New role os10_uplink Facilitates the configuration of uplink attributes like + uplink-state group. + - New role os10_users Facilitates the configuration of global system user attributes. + - New role os10_vlan Facilitates the configuration of virtual LAN (VLAN) attributes. + - New role os10_vlt Facilitates the configuration of virtual link trunking (VLT). + - New role os10_vrf Facilitates the configuration of virtual routing and forwarding + (VRF). + - New role os10_vrrp Facilitates the configuration of virtual router redundancy + protocol (VRRP) attributes. + - New role os10_vxlan Facilitates the configuration of virtual extensible LAN + (VXLAN) attributes. + - New role os10_xstp Facilitates the configuration of xSTP attributes. + modules: + - description: Run commands on devices running Dell EMC SmartFabric OS1O. + name: os10_command + namespace: '' + - description: Manage configuration on devices running OS10. + name: os10_config + namespace: '' + - description: Collect facts from devices running OS10. + name: os10_facts + namespace: '' + plugins: + cliconf: + - description: Use OS10 cliconf to run commands on Dell EMC PowerSwitch devices. + name: os10 + namespace: null + release_date: '2020-07-31' + 1.0.1: + changes: + release_summary: Added changelog. + fragments: + - 1.0.1.yaml + release_date: '2020-08-14' + 1.0.2: + changes: + bugfixes: + - Fix issue in using ip_and_mask along with members in os10_vlan role (https://github.com/ansible-collections/dellemc.os10/issues/42) + - Fix issue in using list of strings for `commands` argument for `os10_command` + module (https://github.com/ansible-collections/dellemc.os10/issues/43) + - Fixed os10_vlan role idempotency issue with description and members (https://github.com/ansible-collections/dellemc.os10/issues/46) + fragments: + - 1.0.2.yaml + - 46-vlan-idempotency-desc-and-members.yaml + release_date: '2020-10-28' + 1.1.0: + changes: + breaking_changes: + - os10_bgp - Changed "subnet" key as list format instead of dictionary format + under "listen" key to support multiple neighbor prefix for listen command + - os10_bgp - Changed "vrf" key as list format instead of dictionary format to + supprot multiple VRF in router BGP and changed the "vrf" key name to "vrfs" + bugfixes: + - Fixed issue in using interface range in os10_vlan members. (https://github.com/ansible-collections/dellemc.os10/issues/53) + major_changes: + - os10_bgp - Enhanced router bgp keyword support for non-default vrf which are + supported for default vrf and additional keyword to support both default and + non-default vrf + - os10_snmp role - Added support for snmp V3 features in community, group, host, + engineID + minor_changes: + - Enhanced os10_bgp role to support internal BGP redistribution under address-family + for V4 and V6 + - Enhanced os10_bgp role to support maximum-prefix configuration under BGP peer + and peer-group. + - os10_ntp role - Added support for vrf and sha1 and sha2-256 authentication-key + types + - os10_snmp role - Added support for source-interface and vrf + - os10_template - add template for show spanning tree compatibility mode + - os10_template - add template for show vlt error disabled ports + - os10_uplink role - Added support for downstream disable-links and auto-recover + fragments: + - 53-vlan-member-port-range.yaml + - R9.yaml + release_date: '2020-12-02' + 1.1.1: + changes: + minor_changes: + - Adding support for Ansible version 2.9 (https://github.com/ansible-collections/dellemc.os10/pull/58) + fragments: + - 58-support-for-ansible-version-2.9.yaml + release_date: '2020-12-11' diff --git a/ansible_collections/dellemc/os10/changelogs/config.yaml b/ansible_collections/dellemc/os10/changelogs/config.yaml new file mode 100644 index 00000000..953ad20a --- /dev/null +++ b/ansible_collections/dellemc/os10/changelogs/config.yaml @@ -0,0 +1,30 @@ +changelog_filename_template: CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +flatmap: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Ansible Network Collection for Dell EMC SmartFabric OS10 +trivial_section_name: trivial diff --git a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst new file mode 100644 index 00000000..4bec5b23 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst @@ -0,0 +1,446 @@ +.. Document meta + + +.. Anchors + +.. _ansible_collections.dellemc.os10.os10_command_module: + +.. Anchors: short name for ansible.builtin + +.. Anchors: aliases + + + +.. Title + +dellemc.os10.os10_command -- Run commands on devices running Dell EMC SmartFabric OS10 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. Collection note + +.. note:: + This plugin is part of the `dellemc.os10 collection `_. + + To install it use: :code:`ansible-galaxy collection install dellemc.os10`. + + To use it in a playbook, specify: :code:`dellemc.os10.os10_command`. + +.. version_added + + +.. contents:: + :local: + :depth: 1 + +.. Deprecated + + +Synopsis +-------- + +.. Description + +- Sends arbitrary commands to a OS10 device and returns the results read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. +- This module does not support running commands in configuration mode. Please use `dellemc.os10.os10_config `_ to configure OS10 devices. + + +.. Aliases + + +.. Requirements + + +.. Options + +Parameters +---------- + +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterChoices/DefaultsComments
+
+ commands + +
+ list + / elements=string / required
+
+ +
List of commands to send to the remote OS10 device over the configured provider. The resulting output from the command is returned. If the wait_for argument is provided, the module is not returned until the condition is satisfied or the number of retries has expired.
+
+
+ interval + +
+ integer +
+
+ Default:
1
+
+
Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again.
+
+
+ match + +
+ string +
+
+
    Choices: +
  • all ←
  • +
  • any
  • +
+
+
The match argument is used in conjunction with the wait_for argument to specify the match policy. Valid values are all or any. If the value is set to all then all conditionals in the wait_for must be satisfied. If the value is set to any then only one of the values must be satisfied.
+
+
+ provider + +
+ dictionary +
+
+ +
A dict object containing connection details.
+
+
+ auth_pass + +
+ string +
+
+ +
Specifies the password to use if required to enter privileged mode on the remote device. If authorize is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_AUTH_PASS will be used instead.
+
+
+ authorize + +
+ boolean +
+
+
    Choices: +
  • no ←
  • +
  • yes
  • +
+
+
Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_AUTHORIZE will be used instead.
+
+
+ host + +
+ string +
+
+ +
Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport.
+
+
+ password + +
+ string +
+
+ +
Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_PASSWORD will be used instead.
+
+
+ port + +
+ integer +
+
+ +
Specifies the port to use when building the connection to the remote device.
+
+
+ ssh_keyfile + +
+ path +
+
+ +
Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_SSH_KEYFILE will be used instead.
+
+
+ timeout + +
+ integer +
+
+ +
Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations.
+
+
+ username + +
+ string +
+
+ +
User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_USERNAME will be used instead.
+
+
+ retries + +
+ integer +
+
+ Default:
10
+
+
Specifies the number of retries a command should be tried before it is considered failed. The command is run on the target device every retry and evaluated against the wait_for conditions.
+
+
+ wait_for + +
+ list + / elements=string
+
+ +
List of conditions to evaluate against the output of the command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of retries, the task fails. See examples.
+
+
+ +.. Notes + +Notes +----- + +.. note:: + - For more information on using Ansible to manage Dell EMC Network devices see https://www.ansible.com/ansible-dell-networking. + +.. Seealso + + +.. Examples + +Examples +-------- + +.. code-block:: yaml+jinja + + + tasks: + - name: run show version on remote devices + os10_command: + commands: show version + + - name: run show version and check to see if output contains OS10 + os10_command: + commands: show version + wait_for: result[0] contains OS10 + + - name: run multiple commands on remote nodes + os10_command: + commands: + - show version + - show interface + + - name: run multiple commands and evaluate the output + os10_command: + commands: + - show version + - show interface + wait_for: + - result[0] contains OS10 + - result[1] contains Ethernet + + + + +.. Facts + + +.. Return values + +Return Values +------------- +Common return values are documented `here `_, the following are the fields unique to this module: + +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyReturnedDescription
+
+ failed_conditions + +
+ list + / elements=string
+
failed +
The list of conditionals that have failed
+
+
Sample:
+
['...', '...']
+
+
+ stdout + +
+ list + / elements=string
+
always apart from low level errors (such as action plugin) +
The set of responses from the commands
+
+
Sample:
+
['...', '...']
+
+
+ stdout_lines + +
+ list + / elements=string
+
always apart from low level errors (such as action plugin) +
The value of stdout split into a list
+
+
Sample:
+
[['...', '...'], ['...'], ['...']]
+
+
+ warnings + +
+ list + / elements=string
+
always +
The list of warnings (if any) generated by module based on arguments
+
+
Sample:
+
['...', '...']
+
+

+ +.. Status (Presently only deprecated) + + +.. Authors + +Authors +~~~~~~~ + +- Senthil Kumar Ganesan (@skg-net) + + + +.. Parsing errors + diff --git a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst new file mode 100644 index 00000000..73747808 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst @@ -0,0 +1,606 @@ +.. Document meta + + +.. Anchors + +.. _ansible_collections.dellemc.os10.os10_config_module: + +.. Anchors: short name for ansible.builtin + +.. Anchors: aliases + + + +.. Title + +dellemc.os10.os10_config -- Manage Dell EMC SmartFabric OS10 configuration sections ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. Collection note + +.. note:: + This plugin is part of the `dellemc.os10 collection `_. + + To install it use: :code:`ansible-galaxy collection install dellemc.os10`. + + To use it in a playbook, specify: :code:`dellemc.os10.os10_config`. + +.. version_added + + +.. contents:: + :local: + :depth: 1 + +.. Deprecated + + +Synopsis +-------- + +.. Description + +- OS10 configurations use a simple block indent file syntax for segmenting configuration into sections. This module provides an implementation for working with OS10 configuration sections in a deterministic way. + + +.. Aliases + + +.. Requirements + + +.. Options + +Parameters +---------- + +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterChoices/DefaultsComments
+
+ after + +
+ list + / elements=string
+
+ +
The ordered set of commands to append to the end of the command stack if a change needs to be made. Just like with before this allows the playbook designer to append a set of commands to be executed after the command set.
+
+
+ backup + +
+ boolean +
+
+
    Choices: +
  • no ←
  • +
  • yes
  • +
+
+
This argument will cause the module to create a full backup of the current running-config from the remote device before any changes are made. If the backup_options value is not given, the backup file is written to the backup folder in the playbook root directory. If the directory does not exist, it is created.
+
+
+ backup_options + +
+ dictionary +
+
+ +
This is a dict object containing configurable options related to backup file path. The value of this option is read only when backup is set to yes, if backup is set to no this option will be silently ignored.
+
+
+ dir_path + +
+ path +
+
+ +
This option provides the path ending with directory name in which the backup configuration file will be stored. If the directory does not exist it will be first created and the filename is either the value of filename or default filename as described in filename options description. If the path value is not given in that case a backup directory will be created in the current working directory and backup configuration will be copied in filename within backup directory.
+
+
+ filename + +
+ string +
+
+ +
The filename to be used to store the backup configuration. If the the filename is not given it will be generated based on the hostname, current time and date in format defined by <hostname>_config.<current-date>@<current-time>
+
+
+ before + +
+ list + / elements=string
+
+ +
The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system.
+
+
+ config + +
+ string +
+
+ +
The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The config argument allows the implementer to pass in the configuration to use as the base config for comparison.
+
+
+ lines + +
+ list + / elements=string
+
+ +
The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser. This argument is mutually exclusive with src.
+

aliases: commands
+
+
+ match + +
+ string +
+
+
    Choices: +
  • line ←
  • +
  • strict
  • +
  • exact
  • +
  • none
  • +
+
+
Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to line, commands are matched line by line. If match is set to strict, command lines are matched with respect to position. If match is set to exact, command lines must be an equal match. Finally, if match is set to none, the module will not attempt to compare the source configuration with the running configuration on the remote device.
+
+
+ parents + +
+ list + / elements=string
+
+ +
The ordered set of parents that uniquely identify the section or hierarchy the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands.
+
+
+ provider + +
+ dictionary +
+
+ +
A dict object containing connection details.
+
+
+ auth_pass + +
+ string +
+
+ +
Specifies the password to use if required to enter privileged mode on the remote device. If authorize is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_AUTH_PASS will be used instead.
+
+
+ authorize + +
+ boolean +
+
+
    Choices: +
  • no ←
  • +
  • yes
  • +
+
+
Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_AUTHORIZE will be used instead.
+
+
+ host + +
+ string +
+
+ +
Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport.
+
+
+ password + +
+ string +
+
+ +
Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_PASSWORD will be used instead.
+
+
+ port + +
+ integer +
+
+ +
Specifies the port to use when building the connection to the remote device.
+
+
+ ssh_keyfile + +
+ path +
+
+ +
Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_SSH_KEYFILE will be used instead.
+
+
+ timeout + +
+ integer +
+
+ +
Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations.
+
+
+ username + +
+ string +
+
+ +
User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_USERNAME will be used instead.
+
+
+ replace + +
+ string +
+
+
    Choices: +
  • line ←
  • +
  • block
  • +
+
+
Instructs the module on the way to perform the configuration on the device. If the replace argument is set to line then the modified lines are pushed to the device in configuration mode. If the replace argument is set to block then the entire command block is pushed to the device in configuration mode if any line is not correct.
+
+
+ save + +
+ boolean +
+
+
    Choices: +
  • no ←
  • +
  • yes
  • +
+
+
The save argument instructs the module to save the running- config to the startup-config at the conclusion of the module running. If check mode is specified, this argument is ignored.
+
+
+ src + +
+ path +
+
+ +
Specifies the source path to the file that contains the configuration or configuration template to load. The path to the source file can either be the full path on the Ansible control host or a relative path from the playbook or role root directory. This argument is mutually exclusive with lines.
+
+
+ update + +
+ string +
+
+
    Choices: +
  • merge ←
  • +
  • check
  • +
+
+
The update argument controls how the configuration statements are processed on the remote device. Valid choices for the update argument are merge and check. When you set this argument to merge, the configuration changes merge with the current device running configuration. When you set this argument to check the configuration updates are determined but not actually configured on the remote device.
+
+
+ +.. Notes + +Notes +----- + +.. note:: + - For more information on using Ansible to manage Dell EMC Network devices see https://www.ansible.com/ansible-dell-networking. + +.. Seealso + + +.. Examples + +Examples +-------- + +.. code-block:: yaml+jinja + + + - os10_config: + lines: ['hostname {{ inventory_hostname }}'] + + - os10_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + - 50 permit ip host 5.5.5.5 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + match: exact + + - os10_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + replace: block + + - os10_config: + lines: ['hostname {{ inventory_hostname }}'] + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user + + + + +.. Facts + + +.. Return values + +Return Values +------------- +Common return values are documented `here `_, the following are the fields unique to this module: + +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyReturnedDescription
+
+ backup_path + +
+ string +
+
when backup is yes +
The full path to the backup file
+
+
Sample:
+
/playbooks/ansible/backup/os10_config.2016-07-16@22:28:34
+
+
+ commands + +
+ list + / elements=string
+
always +
The set of commands that will be pushed to the remote device
+
+
Sample:
+
['hostname foo', 'router bgp 1', 'router-id 1.1.1.1']
+
+
+ saved + +
+ boolean +
+
When not check_mode. +
Returns whether the configuration is saved to the startup configuration or not.
+
+
Sample:
+
True
+
+
+ updates + +
+ list + / elements=string
+
always +
The set of commands that will be pushed to the remote device.
+
+
Sample:
+
['hostname foo', 'router bgp 1', 'router-id 1.1.1.1']
+
+

+ +.. Status (Presently only deprecated) + + +.. Authors + +Authors +~~~~~~~ + +- Senthil Kumar Ganesan (@skg-net) + + + +.. Parsing errors + diff --git a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst new file mode 100644 index 00000000..658a3d9d --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst @@ -0,0 +1,511 @@ +.. Document meta + + +.. Anchors + +.. _ansible_collections.dellemc.os10.os10_facts_module: + +.. Anchors: short name for ansible.builtin + +.. Anchors: aliases + + + +.. Title + +dellemc.os10.os10_facts -- Collect facts from devices running Dell EMC SmartFabric OS10 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. Collection note + +.. note:: + This plugin is part of the `dellemc.os10 collection `_. + + To install it use: :code:`ansible-galaxy collection install dellemc.os10`. + + To use it in a playbook, specify: :code:`dellemc.os10.os10_facts`. + +.. version_added + + +.. contents:: + :local: + :depth: 1 + +.. Deprecated + + +Synopsis +-------- + +.. Description + +- Collects a base set of device facts from a remote device that is running OS10. This module prepends all of the base network fact keys with ``ansible_net_``. The facts module will always collect a base set of facts from the device and can enable or disable collection of additional facts. + + +.. Aliases + + +.. Requirements + + +.. Options + +Parameters +---------- + +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterChoices/DefaultsComments
+
+ gather_subset + +
+ list + / elements=string
+
+ Default:
["!config"]
+
+
When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, hardware, config, and interfaces. Can specify a list of values to include a larger subset. Values can also be used with an initial ! to specify that a specific subset should not be collected.
+
+
+ provider + +
+ dictionary +
+
+ +
A dict object containing connection details.
+
+
+ auth_pass + +
+ string +
+
+ +
Specifies the password to use if required to enter privileged mode on the remote device. If authorize is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_AUTH_PASS will be used instead.
+
+
+ authorize + +
+ boolean +
+
+
    Choices: +
  • no ←
  • +
  • yes
  • +
+
+
Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_AUTHORIZE will be used instead.
+
+
+ host + +
+ string +
+
+ +
Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport.
+
+
+ password + +
+ string +
+
+ +
Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_PASSWORD will be used instead.
+
+
+ port + +
+ integer +
+
+ +
Specifies the port to use when building the connection to the remote device.
+
+
+ ssh_keyfile + +
+ path +
+
+ +
Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_SSH_KEYFILE will be used instead.
+
+
+ timeout + +
+ integer +
+
+ +
Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations.
+
+
+ username + +
+ string +
+
+ +
User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable ANSIBLE_NET_USERNAME will be used instead.
+
+
+ +.. Notes + +Notes +----- + +.. note:: + - For more information on using Ansible to manage Dell EMC Network devices see https://www.ansible.com/ansible-dell-networking. + +.. Seealso + + +.. Examples + +Examples +-------- + +.. code-block:: yaml+jinja + + + # Collect all facts from the device + - os10_facts: + gather_subset: all + + # Collect only the config and default facts + - os10_facts: + gather_subset: + - config + + # Do not collect hardware facts + - os10_facts: + gather_subset: + - "!hardware" + + + + +.. Facts + + +.. Return values + +Return Values +------------- +Common return values are documented `here `_, the following are the fields unique to this module: + +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyReturnedDescription
+
+ ansible_net_all_ipv4_addresses + +
+ list + / elements=string
+
when interfaces is configured +
All IPv4 addresses configured on the device
+
+
+
+ ansible_net_all_ipv6_addresses + +
+ list + / elements=string
+
when interfaces is configured +
All IPv6 addresses configured on the device
+
+
+
+ ansible_net_config + +
+ string +
+
when config is configured +
The current active config from the device
+
+
+
+ ansible_net_cpu_arch + +
+ string +
+
when hardware is configured +
CPU Architecture of the remote device.
+
+
+
+ ansible_net_gather_subset + +
+ list + / elements=string
+
always +
The list of fact subsets collected from the device
+
+
+
+ ansible_net_hostname + +
+ string +
+
always +
The configured hostname of the device
+
+
+
+ ansible_net_interfaces + +
+ dictionary +
+
when interfaces is configured +
A hash of all interfaces running on the system
+
+
+
+ ansible_net_memfree_mb + +
+ integer +
+
when hardware is configured +
The available free memory on the remote device in Mb
+
+
+
+ ansible_net_memtotal_mb + +
+ integer +
+
when hardware is configured +
The total memory on the remote device in Mb
+
+
+
+ ansible_net_model + +
+ string +
+
always +
The model name returned from the device.
+
+
+
+ ansible_net_name + +
+ string +
+
Always. +
The name of the OS that is running.
+
+
+
+ ansible_net_neighbors + +
+ dictionary +
+
when interfaces is configured +
The list of LLDP neighbors from the remote device
+
+
+
+ ansible_net_servicetag + +
+ string +
+
always +
The service tag number of the remote device.
+
+
+
+ ansible_net_version + +
+ string +
+
always +
The operating system version running on the remote device
+
+
+

+ +.. Status (Presently only deprecated) + + +.. Authors + +Authors +~~~~~~~ + +- Senthil Kumar Ganesan (@skg-net) + + + +.. Parsing errors + diff --git a/ansible_collections/dellemc/os10/docs/os10_aaa.md b/ansible_collections/dellemc/os10/docs/os10_aaa.md new file mode 100644 index 00000000..cabee7ea --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_aaa.md @@ -0,0 +1,136 @@ +AAA role +======== + +This role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of RADIUS server, TACACS server, and AAA. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The AAA role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_aaa keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os10 | +| ``radius_server.retransmit`` | integer | Configures the number of retransmissions | os10 | +| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions | os10 | +| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os10 | +| ``host.ip`` | string | Configures the RADIUS server host address | os10 | +| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 | +| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os10 | +| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``) | os10 | +| ``tacacs_server.timeout`` | integer | Configures the timeout for retransmissions | os10 | +| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os10 | +| ``host.ip`` | string | Configures the TACACS server host address | os10 | +| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 | +| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os10 | +| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os10 | +| ``aaa_accounting.accounting.accounting_type`` | dictionary | Configures accounting type | os10 | +| ``aaa_accounting.accounting.connection_type`` | dictionary | Configures accounting connection type | os10 | +| ``aaa_accounting.accounting.account_mode`` | dictionary | Configures accounting mode | os10 | +| ``aaa_accounting.accounting.server_group`` | dictionary | Configures accounting server group | os10 | +| ``aaa_accounting.accounting.state`` | string: present,absent | Configures/unconfigures accounting parameters | os10 | +| ``aaa_authentication`` | dictionary | Configures authentication parameters (see ``aaa_authentication.*``) | os10 | +| ``aaa_authentication.login`` | dictionary | Configures authentication login (see ``aaa_authentication.login.*``)| os10 | +| ``aaa_authentication.login.console`` | dictionary | Configures authentication method for console login | os10 | +| ``aaa_authentication.login.state`` | string: present,absent | Unconfigures authentication login if set to absent | os10 | +| ``aaa_authentication.login.type`` | dictionary | Configures authentication type | os10 | +| ``aaa_authentication.re_authenticate`` | boolean | Configures re-authenticate by enable if set to true | os10 | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_aaa* role to configure AAA for radius and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os10_aaa* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_aaa: + radius_server: + retransmit: 5 + timeout: 10 + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + tacacs_server: + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + timeout: 6 + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: present + re_authenticate: true + aaa_accounting: + accounting: + - accounting_type: commands + connection_type: console + account_mode: start-stop + server_group: group tacacs+ + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_aaa + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_acl.md b/ansible_collections/dellemc/os10/docs/os10_acl.md new file mode 100644 index 00000000..14a1fe2a --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_acl.md @@ -0,0 +1,130 @@ +ACL role +======== + +This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The ACL role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_acl keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os10 | +| ``name`` | string (required) | Configures the name of the access-control list | os10 | +| ``description`` | string | Configures the description about the access-control list | os10 | +| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os10| +| ``remark.number`` | integer (required) | Configures the remark sequence number | os10 | +| ``remark.description`` | string | Configures the remark description | os10 | +| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os10 | +| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os10 | +| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os10 | +| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os10 | +| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os10 | +| ``entries.source`` | string (required) | Specifies the source address to match in the packets | os10 | +| ``entries.src_condition`` | string | Specifies the condition to filter packets from the source address; ignored if MAC | os10 | +| ``entries.destination`` | string (required) | Specifies the destination address to match in the packets | os10 | +| ``entries.dest_condition`` | string | Specifies the condition to filter packets to the destination address | os10 | +| ``entries.other_options`` | string | Specifies the other options applied on packets (count, log, order, monitor, and so on) | os10 | +| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os10 | +| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os10 | +| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os10 | +| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os10 | +| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os10 | +| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os10 | +| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os10 | +| ``lineterminal`` | list | Configures the terminal to apply the ACL (see ``lineterminal.*``) | os10 | +| ``lineterminal.state`` | string: absent,present\* | Deletes the access-class from line terminal if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_acl* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_acl: + - name: ssh + type: ipv4 + description: acl + remark: + - description: 1 + number: 3 + state: absent + entries: + - number: 14 + permit: true + protocol: tcp + source: any + src_condition: neq 6 + destination: any + dest_condition: eq 4 + other_options: count + state: present + stage_ingress: + - name: ethernet 1/1/1 + state: absent + - name: ethernet 1/1/2 + state: absent + stage_egress: + - name: ethernet 1/1/3 + state: absent + lineterminal: + state: absent + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_acl + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_bfd.md b/ansible_collections/dellemc/os10/docs/os10_bfd.md new file mode 100644 index 00000000..c6907992 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_bfd.md @@ -0,0 +1,89 @@ +BFD role +=========== + +This role facilitates the configuration of bidirectional forwarding detection (BFD) global attributes. It specifically enables configuration of BFD interval, min_rx, multiplier, and role. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The BFD role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_bfd keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``bfd`` | dictionary | Configures global BFD parameters (see ``bfd.*``) | os10 | +| ``bfd.interval`` | integer | Configures the time interval in ms (100 to 1000) | os10 | +| ``bfd.min_rx`` | integer | Configures maximum waiting time for receiving control packets from BFD peers in ms (100 to 1000)| os10 | +| ``bfd.multiplier`` | integer | Configures the maximum number of consecutive packets that are not received from BFD peers before session state changes to Down (3 to 50) | os10 | +| ``bfd.role`` | string: passive,active\* | Configures the BFD role | os10 | +| ``bfd.state`` | string: absent,present\* | Removes the global BFD if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +******************** + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_bfd role* to completely set the global BFD attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The bfd role writes a simple playbook that only references the *os10_bfd* role. By including the role, you automatically get access to all of the tasks to configure BFD feature. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_bfd: + bfd: + interval: 100 + min_rx: 100 + multiplier: 3 + role: "active" + state: "present" + +**Simple playbook to setup bfd — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_bfd + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_bgp.md b/ansible_collections/dellemc/os10/docs/os10_bgp.md new file mode 100644 index 00000000..e4e7c94e --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_bgp.md @@ -0,0 +1,729 @@ +BGP role +======== + +This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum paths. This role is abstracted for Dell EMC PowerSwitch platforms running SmartFabric OS10. + +The BGP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_bgp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os10 | +| ``router_id`` | string | Configures the IP address of the local BGP router instance | os10 | +| ``graceful_restart`` | boolean | Configures graceful restart capability | os10 | +| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os10 | +| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os10 | +| ``log_neighbor_changes`` | boolean | Configures log neighbors up/down | os10 | +| ``fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down | os10 | +| ``always_compare_med`` | boolean | Configures comparing MED from different neighbors | os10 | +| ``default_loc_pref`` | integer | Configures the default local preference value | os10 | +| ``as_notation`` | string | Configures AS number notation format | os10 | +| ``enforce_first_as`` | boolean | Configures the first AS for eBGP routes | os10 | +| ``non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm | os10 | +| ``outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members | os10 | +| ``confederation`` | dictionary | Configures AS confederation parameters (see ``confederation.*``) | os10 | +| ``confederation.identifier`` | integer | Configures the routing domain confederation AS | os10 | +| ``confederation.peers`` | string | Configures the peer AS in BGP confederation | os10 | +| ``confederation.peers_state`` | string: absent,present\* | Deletes the peer AS in BGP confederation if set to absent | os10 | +| ``route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) | os10 | +| ``route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection | os10 | +| ``route_reflector.cluster_id`` | string | Configures the route reflector cluster-id | os10 | +| ``address_family_ipv4`` | dictionary | Configures IPv4 address family parameters (see ``address_family_ipv4.*``) | os10 | +| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary only if true | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent | os10 | +| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures internal BGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the internal BGP redistribution for an IPv4 address family | os10 | +| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPv4 address family | os10 | +| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv4 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``address_family_ipv6`` | dictionary | Configures IPv6 address family parameters (see ``address_family_ipv6.*``) | os10 | +| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 | +| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for an IPv6 address family | os10 | +| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 | +| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for an IPv6 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``best_path`` | list | Configures the default best-path selection (see ``best_path.*``) | os10 | +| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 | +| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 | +| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 | +| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 | +| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 | +| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 | +| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 | +| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 | +| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 | +| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 | +| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 | +| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 | +| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os10 | +| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 | +| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 | +| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 | +| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 | +| ``neighbor.auto_peer`` |string: unnumbered-auto | Enables auto discovery of neighbors | os10 | +| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 | +| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 | +| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 | +| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 | +| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 | +| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 | +| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 | +| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 | +| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 | +| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 | +| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 | +| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 | +| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 | +| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6/EVPN address family command mode on the BGP neighbor | os10 | +| ``address_family.activate`` | boolean | Configures activation/deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 | +| ``address_family.sender_loop_detect`` | boolean | Enables/disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6/l2vpn address family | os10 | +| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 | +| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 | +| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 | +| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both ', 'send ', 'receive')| os10 | +| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 | +| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 | +| ``route_map.filter`` | string | Configures the filter for routing updates | os10 | +| ``route_map.state`` | string, choices: absent,present\* | Deletes the route-map of the BGP neighbor if set to absent | os10 | +| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.\*``) | os10 | +| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 | +| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 | +| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 | +| ``max_prefix.warning``| boolean | Configures a warning without dropping the session when maximum limit exceeds if set to true | os10| +| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 | +| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 | +| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 | +| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 | +| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 | +| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 | +| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 | +| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 | +| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 | +| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 | +| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 | +| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 | +| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of a neighbor | os10 | +| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 | +| ``neighbor.remove_pri_as`` | string: absent,present | Configures the remove private AS number from outbound updates | os10 | +| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 | +| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 | +| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 | +| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 | +| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 | +| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 | +| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 | +| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 | +| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 | +| ``neighbor.description`` | string | Configures neighbor description | os10 | +| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 | +| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os10 | +| ``redistribute.route_type`` | string (required): static,connected,imported_bgp,l2vpn,ospf | Configures the name of the routing protocol to redistribute | os10 | +| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 | +| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 | +| ``redistribute.ospf_id`` | string | Configures the redistribute OSPF | os10 | +| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 | +| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 | +| ``bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors | os10 | +| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 | +| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 | +| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 | +| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 | +| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 | +| ``vrfs`` | list | Enables VRF under BGP | os10 | +| ``vrf.name`` | string (Required)| Configures VRF name | os10 | +| ``vrf.router_id`` | string | Configures the IP address of the local BGP router instance in VRF | os10 | +| ``vrf.graceful_restart`` | boolean | Configures graceful restart capability in VRF | os10 | +| ``vrf.maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) in VRF | os10 | +| ``vrf.maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) in VRF | os10 | +| ``vrf.log_neighbor_changes`` | boolean | Configures log neighbors up/down in VRF | os10 | +| ``vrf.fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down in VRF | os10 | +| ``vrf.always_compare_med`` | boolean | Configures comparing MED from different neighbors in VRF | os10 | +| ``vrf.default_loc_pref`` | integer | Configures the default local preference value in VRF | os10 | +| ``vrf.as_notation`` | string | Changes the AS number notation format in VRF | os10 | +| ``vrf.enforce_first_as`` | boolean | Configures the first AS for eBGP routes in VRF | os10 | +| ``vrf.non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm in VRF | os10 | +| ``vrf.outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members in VRF | os10 | +| ``vrf.route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) in VRF | os10 | +| ``vrf.route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection in VRF | os10 | +| ``vrf.route_reflector.cluster_id`` | string | Configures the route-reflector cluster-id in VRF | os10 | +| ``vrf.address_family_ipv4`` | dictionary | Configures IPv4 address family parameters in VRF (see ``address_family_ipv4.*``) in VRF | os10 | +| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) in VRF | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address in VRF | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true in VRF | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent in VRF | os10 | +| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPV4 address family | os10 | +| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPV4 address family | os10 | +| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPV4 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``vrf.address_family_ipv6`` | dictionary | Configures IPv6 address family parameters in VRF (see ``address_family_ipv6.*``) | os10 | +| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 | +| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPv6 address family | os10 | +| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 | +| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv6 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``vrf.best_path`` | list | Configures the default best-path selection in VRF (see ``best_path.*``) | os10 | +| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 | +| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 | +| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 | +| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 | +| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 | +| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 | +| ``vrf.ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 | +| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 | +| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 | +| ``vrf.ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 | +| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 | +| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 | +| ``vrf.neighbor`` | list | Configures IPv4 BGP neighbors in VRF (see ``neighbor.*``) | os10 | +| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 | +| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 | +| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 | +| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 | +| ``neighbor.auto_peer`` |string: unnumbered-auto | Enable auto-discovery of neighbors | os10 | +| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 | +| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 | +| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 | +| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 | +| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 | +| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 | +| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 | +| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 | +| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 | +| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 | +| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 | +| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 | +| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 | +| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6 EVPN address family command mode on the BGP neighbor | os10 | +| ``address_family.activate`` | boolean | Configures activation or deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 | +| ``address_family.sender_loop_detect`` | boolean | Enables or disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6 l2vpn address family | os10 | +| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 | +| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 | +| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 | +| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both ', 'send ', 'receive')| os10 | +| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 | +| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 | +| ``route_map.filter`` | string | Configures the filter for routing updates | os10 | +| ``route_map.state`` | string, choices: absent,present* | Deletes the route-map of the BGP neighbor if set to absent | os10 | +| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.*``) | os10 | +| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 | +| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 | +| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 | +| ``max_prefix.warning``| boolean | Configures a warning without dropping session when maximum limit exceeds if set to true | os10| +| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 | +| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 | +| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 | +| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 | +| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 | +| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer-group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 | +| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 | +| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 | +| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 | +| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 | +| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 | +| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 | +| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of the neighbor | os10 | +| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 | +| ``neighbor.remove_pri_as`` | string: absent,present | Removes private AS number from outbound updates | os10 | +| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 | +| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 | +| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 | +| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 | +| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 | +| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 | +| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 | +| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 | +| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 | +| ``neighbor.description`` | string | Configures neighbor description | os10 | +| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 | +| ``vrf.redistribute`` | list | Configures the redistribute list to get information from other routing protocols in VRF (see ``redistribute.*``) | os10 | +| ``redistribute.route_type`` | string (required): static,connected,imported_bgp | Configures the name of the routing protocol to redistribute | os10 | +| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 | +| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 | +| ``redistribute.ospf_id`` | string | Configures the redistribute ospf | os10 | +| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 | +| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 | +| ``vrf.bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors in VRF ((see ``bfd_all_neighbors.*``))| os10 | +| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 | +| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 | +| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 | +| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 | +| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 | +| ``vrf.state`` | string: absent,present\* | Deletes the VRF instance under router BGP if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_bgp* role to configure the BGP network and neighbors. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_bgp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_bgp: + asn: 12 + router_id: 90.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: true + fast_ext_fallover: false + always_compare_med: true + default_loc_pref: 1000 + as_notation: asdot + enforce_first_as: false + non_deterministic_med: true + outbound_optimization: true + confederation: + identifier: 25 + peers: 23 24 + peers_state: present + route_reflector: + client_to_client: false + cluster_id: 4294967295 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + summary_only: true + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + default_metric: 10 + distance_bgp: + value: 3 4 6 + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + address_family: + - type: ipv4 + activate: false + state: present + max_prefix: + count: 20 + threshold: 90 + warning: true + state: present + listen: + - subnet: 4.4.4.4/32 + limit: 4 + subnet_state: present + - subnet: 20::/64 + limit: 4 + subnet_state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2-spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + password: bgppassword + route_reflector_client: true + adv_start: 100 + adv_start_state: present + conn_retry_timer: 20 + remove_pri_as: present + src_loopback: 0 + address_family: + - type: ipv4 + activate: true + state: present + max_prefix: + count: 10 + threshold: 40 + warning: true + state: present + default_originate: + route_map: aa + state: present + distribute_list: + in: XX + in_state: present + out: YY + out_state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: absent + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + sender_loop_detect: true + password: bgppassword + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + activate: true + sender_loop_detect: false + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + vrfs: + - name: "GREEN" + router_id: 50.1.1.1 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: false + fast_ext_fallover: false + always_compare_med: true + default_loc_pref: 1000 + route_reflector: + client_to_client: false + cluster_id: 1 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-worst + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan10 + description: U_site2 vlan + send_community: + - type: extended + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.20.1 + name: peer1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + route_reflector_client: true + src_loopback: 0 + address_family: + - type: ipv4 + activate: false + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.15.3 + address_family: + - type: ipv4 + activate: false + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: present + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + address_family: + - type: ipv4 + activate: false + sender_loop_detect: false + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + - route_type: connected + route_map_name: bb + address_type: ipv4 + state: present + - route_type: l2vpn + route_map_name: cc + address_type: ipv4 + state: present + - route_type: imported_bgp + imported_bgp_vrf_name: test6 + route_map_name: dd + address_type: ipv4 + state: present + - route_type: ospf + ospf_id: 12 + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + state: present + state: present + + +**Simple playbook to configure BGP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_bgp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_copy_config.md b/ansible_collections/dellemc/os10/docs/os10_copy_config.md new file mode 100644 index 00000000..eadefecb --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_copy_config.md @@ -0,0 +1,131 @@ +Copy-config role +================ + +This role is used to push the backup running configuration into a Dell EMC PowerSwitch platform running Dell EMC SmartFabric OS10, and merges the configuration in the template file with the running configuration of the device. + +The copy-config role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- No predefined variables are part of this role +- Use *host_vars* or *group_vars* as part of the template file +- Configuration file is host-specific +- Copy the host-specific configuration to the respective file under the template directory in *.j2* format +- Variables and values are case-sensitive + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_copy_config* role to push the configuration file into the device. It creates a *hosts* file with the switch details and corresponding variables. It writes a simple playbook that only references the *os10_copy_config* role. By including the role, you automatically get access to all of the tasks to push configuration file. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + + # This variable shall be applied in the below jinja template for each host by defining here + os10_bgp + asn: 64801 + +**Sample roles/os10_copy_config/templates/leaf1.j2** + + ! Leaf1 BGP profile on Dell OS10 switch + snmp-server community public ro + hash-algorithm ecmp crc + ! + interface ethernet1/1/1:1 + no switchport + ip address 100.1.1.2/24 + ipv6 address 2001:100:1:1::2/64 + mtu 9216 + no shutdown + ! + interface ethernet1/1/9:1 + no switchport + ip address 100.2.1.2/24 + ipv6 address 2001:100:2:1::2/64 + mtu 9216 + no shutdown + ! + router bgp {{ os10_bgp.asn }} + bestpath as-path multipath-relax + bestpath med missing-as-worst + router-id 100.0.2.1 + ! + address-family ipv4 unicast + ! + address-family ipv6 unicast + ! + neighbor 100.1.1.1 + remote-as 64901 + no shutdown + ! + neighbor 100.2.1.1 + remote-as 64901 + no shutdown + ! + neighbor 2001:100:1:1::1 + remote-as 64901 + no shutdown + ! + address-family ipv4 unicast + no activate + exit + ! + address-family ipv6 unicast + activate + exit + ! + neighbor 2001:100:2:1::1 + remote-as 64901 + no shutdown + ! + address-family ipv4 unicast + no activate + exit + ! + address-family ipv6 unicast + activate + exit + ! + +**Simple playbook to setup to push configuration file into device — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_copy_config + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_dns.md b/ansible_collections/dellemc/os10/docs/os10_dns.md new file mode 100644 index 00000000..b65d7622 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_dns.md @@ -0,0 +1,125 @@ +DNS role +======== + +This role facilitates the configuration of the domain name service (DNS). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The DNS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_dns keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``name_server`` | list | Configures DNS (see ``name_server.*``) | os10 | +| ``name_server.ip`` | list | Configures the name server IP | os10 | +| ``name_server.vrf`` | list | Configures VRF for each IP | os10 | +| ``name_server.state`` | string: absent,present\* | Deletes the name server IP if set to absent | os10 | +| ``domain_list`` | list | Configures domain-list (see ``domain_list.*``) | os10 | +| ``domain_list.name`` | list | Configures the domain-list name | os10 | +| ``domain_list.vrf`` | list | Configures VRF for each domain-list name | os10 | +| ``domain_list.state`` | string: absent,present\* | Deletes the domain-list if set to absent | os10 | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_dns* role to completely set up the DNS server configuration. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_dns* role. By including the role, you automatically get access to all of the tasks to configure DNS. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_dns: + domain_lookup: true + name_server: + - ip: + - 3.1.1.1 + - 3.1.1.2 + vrf: + - test + - test1 + - ip: + - 3.1.1.2 + vrf: + - test1 + state: absent + - ip: + - 2.2.2.2 + - ip: + - 3.3.2.2 + state: absent + domain_list: + - name: + - dname7 + - dname8 + vrf: + - test + - test1 + - name: + - dname7 + vrf: + - test + - test1 + state: absent + - name: + - dname3 + - dname4 + - name: + - dname5 + - dname6 + state: absent + +> **NOTE**: vrf should be present which can be configured using os10_vrf role + +**Simple playbook to setup DNS — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_dns + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_ecmp.md b/ansible_collections/dellemc/os10/docs/os10_ecmp.md new file mode 100644 index 00000000..6932fdf6 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_ecmp.md @@ -0,0 +1,78 @@ +ECMP role +========= + +This role facilitates the configuration of equal cost multi-path (ECMP), and it supports the configuration of ECMP for IPv4. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The ECMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_ecmp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``ecmp_group_max_paths`` | integer | Configures the number of maximum paths per ECMP group | os10 | +| ``trigger_threshold`` | integer | Configures the number of link bundle utilization trigger threshold | os10 | + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_ecmp* role to configure ECMP for IPv4. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The example writes a simple playbook that only references the *os10_ecmp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_ecmp: + ecmp_group_max_paths: 3 + trigger_threshold: 50 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_ecmp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_fabric_summary.md b/ansible_collections/dellemc/os10/docs/os10_fabric_summary.md new file mode 100644 index 00000000..0ff99bf2 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_fabric_summary.md @@ -0,0 +1,119 @@ +os10_fabric_summary +===================================== +This role is used to get show system information of all devices in the fabric. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Fabric summary role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used | +| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Dependencies +------------ + +- *xmltodict* library should be installed to get show command output in dict format from XML +- To install the package use the *pip install xmltodict* command + +Example playbook +---------------- + +This example uses the *os10_fabric_summary* role to completely get the show attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name. + +The *os10_fabric_summary* role has a simple playbook that only references the *os10_fabric_summary* role. + +**Sample hosts file** + + site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + [spine] + site1-spine1 + site1-spine2 + site2-spine1 + site2-spine2 + [LeafAndSpineSwitch:children] + spine + +**Sample host_vars/site1-spine1** + + + cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + os10_cli_user: xxxx + os10_cli_pass: xxxx + ansible_network_os: dellemc.os10.os10 + +**Simple playbook to setup fabric summary — provision.yaml** + + --- + - name: show system summary command + hosts: localhost + gather_facts: False + connection: local + roles: + - os10_fabric_summary + +**Run** + + ansible-playbook -i hosts provision.yaml + +**Samaple Output** + + "results": [ + { + "device type": "S6010-ON", + "host": "10.11.180.21", + "hostname": "host3", + "inv_name": "site1-spine1", + "node-mac": "e4:f0:04:9b:e5:dc", + "service-tag": "D33FXC2", + "software-version": "10.4.9999EX" + }, + { + "device type": "S6010-ON", + "host": "10.11.180.22", + "hostname": "host22", + "inv_name": "site1-spine2", + "node-mac": "e4:f0:04:9b:eb:dc", + "service-tag": "J33FXC2", + "software-version": "10.4.9999EX" + }, + { + "device type": "S6010-ON", + "host": "10.11.180.24", + "hostname": "site2-spine1", + "inv_name": "site2-spine1", + "node-mac": "e4:f0:04:9b:ee:dc", + "service-tag": "343FXC2", + "software-version": "10.4.9999EX" + }, + { + "device type": "S6010-ON", + "host": "10.11.180.23", + "hostname": "site2-spine2", + "inv_name": "site2-spine2", + "node-mac": "e4:f0:04:9b:f1:dc", + "service-tag": "543FXC2", + "software-version": "10.4.9999EX" + } + ] + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_flow_monitor.md b/ansible_collections/dellemc/os10/docs/os10_flow_monitor.md new file mode 100644 index 00000000..dd98aa95 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_flow_monitor.md @@ -0,0 +1,152 @@ +ACL flow-based monitor role +=========================== + +This role facilitates configuring ACL flow-based monitoring attributes. Flow-based mirroring is a mirroring session in which traffic matches specified policies that are mirrored to a destination port. Port-based mirroring maintains a database that contains all monitoring sessions (including port monitor sessions). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The ACL flow-based role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os10_flow_monitor` (dictionary) with session ID key (in *session * format; 1 to 18) +- Variables and values are case-sensitive + +**session ID keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``session_type`` | string: local_*_,rspan-source,erspan-source | Configures the monitoring session type | os10 | +| ``description`` | string | Configures the monitor session description | os10 | +| ``port_match`` | list | Displays a list of interfaces with location source and destination | os10 | +| ``port_match.interface_name`` | string | Configures the interface | os10 | +| ``port_match.location`` | string: source,destination | Configures the source/destination of an interface | os10 | +| ``port_match.state`` | string: absent,present\* | Deletes the interface if set to absent | os10 | +| ``flow_based`` | boolean | Enables flow-based monitoring | os10 | +| ``shutdown`` | string: up,down\* | Enable/disables the monitoring session | os10 | +| ``state`` | string: absent,present\* | Deletes the monitoring session corresponding to the session ID if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_flow_monitor* role to configure session monitor configuration. It creates a *hosts* file with the switch details and corresponding variables. The hosts file defines the `anisble_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, the variable is set to false. +It writes a simple playbook that only references the *os10_flow_monitor* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + os10_flow_monitor: + session 1: + session_type: local + description: "Discription goes here" + port_match: + - interface_name: ethernet 1/1/4 + location: source + state: present + - interface_name: ethernet 1/1/3 + location: destination + state: present + flow_based: true + shutdown: up + state: present + session 2: + session_type: local + description: "Discription of session goes here" + port_match: + - interface_name: ethernet 1/1/6 + location: source + state: present + - interface_name: ethernet 1/1/7 + location: destination + state: present + flow_based: true + shutdown: up + state: present + session 3: + state: absent + os10_acl: + - name: testflow + type: ipv4 + description: testflow description + extended: true + entries: + - number: 5 + permit: true + protocol: icmp + source: any + destination: any + other_options: capture session 1 count + state: present + - number: 10 + permit: true + protocol: ip + source: 102.1.1.0/24 + destination: any + other_option: capture session 1 count byte + state: present + - number: 15 + permit: false + protocol: udp + source: any + destination: any + other_options: capture session 2 count byte + state: present + - number: 20 + permit: false + protocol: tcp + source: any + destination: any + other_options: capture session 2 count byte + state: present + stage_ingress: + - name: ethernet 1/1/1 + state: present + +> **NOTE**: Destination port should not be an L2/L3 port which can be configured using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_flow_monitor + - dellemc.os10.os10_acl + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_image_upgrade.md b/ansible_collections/dellemc/os10/docs/os10_image_upgrade.md new file mode 100644 index 00000000..9ae8f731 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_image_upgrade.md @@ -0,0 +1,73 @@ +Image upgrade role +=================================== + +This role facilitates upgrades or installation of a software image. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Image upgrade role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_image_upgrade keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``operation_type`` | string: cancel,install | Displays the type of image operation | os10 | +| ``software_image_url`` | string | Configures the URL path to the image file | os10 | +| ``software_version`` | string | Displays the software version of the image file | os10 | +| ``number_of_retries`` | int | Configures the numbe of retries to check the status of image install process | os10 | + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_image_upgrade* role to upgrade/install software image. It creates a *hosts* file with the switch details, corresponding *host_vars* file, and a simple playbook that references the *os10_image_upgrade* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + os10_image_upgrade: + operation_type: install + software_image_url: tftp://10.16.148.8/PKGS_OS10-Enterprise-10.2.9999E.5790-installer-x86_64.bin + software_version: 10.2.9999E + number_of_retries: 50 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_image_upgrade + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_interface.md b/ansible_collections/dellemc/os10/docs/os10_interface.md new file mode 100644 index 00000000..bbb4f8ee --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_interface.md @@ -0,0 +1,178 @@ +Interface role +============== + +This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra, and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Interface role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os10_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name +- For physical interfaces, the interface name must be in * * format; for logical interfaces, the interface must be in * * format; physical interface name can be *ethernet 1/1/32* +- For interface ranges, the interface name must be in *range * format; *range ethernet 1/1/1-1/1/4* +- Logical interface names can be *vlan 1* or *port-channel 1* +- Variables and values are case-sensitive + +> **NOTE**: Only define supported variables for the interface type, and do not define the *switchport* variable for a logical interface. + +**interface name keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``desc`` | string | Configures a single line interface description | os10 | +| ``portmode`` | string | Configures port-mode according to the device type | os10 | +| ``switchport`` | boolean: true,false\* | Configures an interface in L2 mode | os10 | +| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os10 | +| ``mtu`` | integer | Configures the MTU size for L2 and L3 interfaces (1280 to 65535) | os10 | +| ``fanout`` | string:dual, single; string:10g-4x, 40g-1x, 25g-4x, 100g-1x, 50g-2x (os10) | Configures fanout to the appropriate value | os10 | +| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os10 | +| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os10 | +| ``ipv6_type_dynamic`` | boolean: true,false | Configures an IPv6 address for DHCP if set to true (*ipv6_and_mask* is ignored if set to true) | os10 | +| ``ipv6_autoconfig`` | boolean: true,false | Configures stateless configuration of IPv6 addresses if set to true (*ipv6_and_mask* is ignored if set to true) | os10 | +| ``vrf`` | string | Configures the specified VRF to be associated to the interface | os10 | +| ``min_ra`` | string | Configures RA minimum interval time period | os10 | +| ``max_ra`` | string | Configures RA maximum interval time period | os10 | +| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 | +| ``ipv6_and_mask`` | string | Configures a specified IPv6 address to the interface | os10 | +| ``virtual_gateway_ip`` | string | Configures an anycast gateway IP address for a VxLAN virtual network as well as VLAN interfaces| os10 | +| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 | +| ``state_ipv6`` | string: absent,present\* | Deletes the IPV6 address if set to absent | os10 | +| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os10 | +| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os10 | +| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os10 | +| ``flowcontrol`` | dictionary | Configures the flowcontrol attribute (see ``flowcontrol.*``) | os10 | +| ``flowcontrol.mode`` | string: receive,transmit | Configures the flowcontrol mode | os10 | +| ``flowcontrol.enable`` | string: on,off | Configures the flowcontrol mode on | os10 | +| ``flowcontrol.state`` | string: absent,present\* | Deletes the flowcontrol if set to absent | os10 | +| ``ipv6_bgp_unnum`` | dictionary | Configures the IPv6 BGP unnum attributes (see ``ipv6_bgp_unnum.*``) below | os10 | +| ``ipv6_bgp_unnum.state`` | string: absent,present\* | Disables auto discovery of BGP unnumbered peer if set to absent | os10 | +| ``ipv6_bgp_unnum.peergroup_type`` | string: ebgp,ibgp | Specifies the type of template to inherit from | os10 | + +| ``stp_rpvst_default_behaviour`` | boolean: false,true | Configures RPVST default behaviour of BPDU's when set to True which is default | os10 | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_interface* role to set up description, MTU, admin status, port mode, and switchport details for an interface. The example creates a *hosts* file with the switch details and orresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os10_interface* role. + +**Sample hosts file** + + leaf3 ansible_host= + +**Sample host_vars/leaf3** + + hostname: "leaf3" + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_interface: + ethernet 1/1/32: + desc: "Connected to Core 2" + mtu: 2500 + stp_rpvst_default_behaviour: False + portmode: + admin: up + switchport: False + ip_and_mask: + ip_type_dynamic: True + ipv6_type_dynamic: True + ethernet 1/1/12: + desc: "ipv6 auto config" + switchport: False + mtu: 2500 + admin: up + ipv6_autoconfig: True + ethernet 1/1/14: + fanout: 10g-4x + ethernet 1/1/13: + desc: "set ipv6 address" + switchport: False + admin: up + ipv6_and_mask: 2001:4898:5809:faa2::10/126 + state_ipv6: present + ethernet 1/1/1: + desc: "Connected to Leaf1" + portmode: "trunk" + switchport: True + suppress_ra: present + admin: up + stp_rpvst_default_behaviour: False + ethernet 1/1/3: + desc: site2-spine2 + ip_and_mask: 10.9.0.4/31 + mtu: 9216 + switchport: False + admin: up + flowcontrol: + mode: "receive" + enable: "on" + state: "present" + + vlan 100: + ip_and_mask: + ipv6_and_mask: 2001:4898:5808:ffaf::1/64 + state_ipv6: present + ip_helper: + - ip: 10.0.0.33 + state: present + admin: up + range ethernet 1/1/1-1/1/32: + mtu: 2500 + port-channel 10: + admin: up + switchport: False + suppress_ra: + stp_rpvst_default_behaviour: True + ipv6_bgp_unnum: + state: present + peergroup_type: ebgp + vlan 10: + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + admin: up + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf3 + roles: + - dellemc.os10.os10_interface + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_lag.md b/ansible_collections/dellemc/os10/docs/os10_lag.md new file mode 100644 index 00000000..eb679dcf --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_lag.md @@ -0,0 +1,103 @@ +LAG role +======== + +This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The LAG role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Object drives the tasks in this role +- `os10_lag` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- `os10_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po ` format (1 to 128) +- Variables and values are case-sensitive + +**port-channel ID keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os10 | +| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 32) | os10 | +| ``max_bundle_size`` | integer | Configures the maximum bundle size for the port channel | os10 | +| ``lacp_system_priority`` | integer | Configures the LACP system-priority value | os10 | +| ``lacp_fallback_enable`` | boolean | Configures LACP fallback | os10 | +| ``channel_members`` | list | Specifies the list of port members to be associated to the port-channel (see ``channel_members.*``) | os10 | +| ``channel_members.port`` | string | Specifies valid interface names to be configured as port-channel members | os10 | +| ``channel_members.mode`` | string: active,passive,on | Configures mode of channel members | os10 | +| ``channel_members.port_priority`` | integer | Configures port priority on devices for channel members | os10 | +| ``channel_members.lacp_rate_fast`` | boolean | Configures the LACP rate as fast if set to true | os10 | +| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port-channel ID if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port-channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lag* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_lag: + Po 12: + type: dynamic + min_links: 2 + max_bundle_size: 2 + lacp_system_priority: 2 + channel_members: + - port: ethernet 1/1/31 + mode: "active" + port_priority: 3 + lacp_rate_fast: true + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_lag + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_lldp.md b/ansible_collections/dellemc/os10/docs/os10_lldp.md new file mode 100644 index 00000000..0c08af4d --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_lldp.md @@ -0,0 +1,149 @@ +LLDP role +========= + +This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, and iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The LLDP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_lldp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``enable`` | boolean | Enables or disables LLDP at a global level | os10 | +| ``multiplier`` | integer | Configures the global LLDP multiplier (2 to 10) | os10 | +| ``reinit`` | integer | Configures the reinit value (1 to 10) | os10 | +| ``timer`` | integer | Configures the timer value (5 to 254) | os10 | +| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os10 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | os10 | +| ``med.fast_start_repeat_count`` | integer | Configures med fast start repeat count value (1 to 10) | os10 | +| ``med.application`` | list | Configures global MED TLVs advertisement for an application (see ``application.*``) | os10 | +| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os10 | +| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement (1 to 4094) | os10 | +| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement (0 to 7) | os10 | +| ``application.code_point_value`` | integer | Configures differentiated services code point values for MED TLVs advertisement (0 to 63) | os10 | +| ``application.vlan_type`` | string: tag, untag | Configures the VLAN type for the application MED TLvs advertisement | os10 | +| ``application.network_policy_id`` | integer | Configures network policy ID for the application MED TLVs advertisement | os10 | +| ``application.state`` | string: present\*,absent | Deletes the application if set to absent | os10 | +| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os10 | +| ``local_interface.`` | dictionary | Configures LLDP at the interface level (see ``.*``) | os10 | +| ``.mode`` | string: rx,tx | Configures LLDP mode configuration at the interface level | os10 | +| ``.mode_state`` | string: absent,present | Configures transmit/receive at the interface level| os10 | +| ``.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os10 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os10 | +| ``med.enable`` | boolean | Enables interface level MED capabilities | os10 | +| ``med.tlv`` | string | Configures MED TLV advertisement at interface level | os10 | +| ``med.tlv_state`` | string: present\*,absent | Deletes the interface level MED configuration if set to absent | os10 | +| ``med.application`` | list | Configures MED TLVs advertisement for the application at the interface level (see ``application.*``) | os10 | +| ``application.network_policy_id`` | integer | Configures the *network_policy_id* for the application of MED | os10 | +| ``application.state`` | string: present\*,absent | Deletes the associated network policy ID for the application if set to absent.| os10 | +| ``advertise.tlv`` | list | Configures TLVs advertisement at interface level (see `.tlv.*`) | os10 | +| ``tlv.name`` | string: basic-tlv,dcbxp,dcbxp-appln,dot1-tlv,dot3-tlv | Configures corresponding to the TLV name specified at the interface | os10 | +| ``tlv.value`` | string | Specifies corresponding TLV value according to the name as a string | os10 | +| ``tlv.state`` | string: present\*,absent | Deletes the interface level TLVs advertisement if set to absent | os10 | + + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lldp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_lldp: + enable: false + multiplier: 3 + reinit: 2 + timer: 5 + advertise: + med: + fast_start_repeat_count: 4 + application: + - name: guest-voice + network_policy_id: 0 + vlan_id: 2 + vlan_type: tag + l2_priority: 3 + code_point_value: 4 + state: present + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: absent + local_interface: + ethernet 1/1/1: + mode: rx + mode_state: present + advertise: + med: + enable: true + tlv: inventory + tlv_state: present + application: + - network_policy_id: 4 + state: present + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_lldp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_logging.md b/ansible_collections/dellemc/os10/docs/os10_logging.md new file mode 100644 index 00000000..c8a2dbf2 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_logging.md @@ -0,0 +1,97 @@ +Logging role +============ + +This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Logging role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_logging keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``logging`` | list | Configures the logging server (see ``logging.*``) | os10 | +| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os10 | +| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os10 | +| ``console`` | dictionary | Configures logging to the console (see ``console.*``) | os10 | +| ``console.enable`` | boolean | Enables/disables logging to the console | os10 | +| ``console.severity`` | string | Configures the minimum severity level for logging to the console | os10 | +| ``log_file`` | dictionary | Configures logging to a log file (see ``log_file.*``) | os10 | +| ``log_file.enable`` | boolean | Enables/disables logging to a log file | os10 | +| ``log_file.severity`` | string | Configures the minimum severity level for logging to a log file | os10 | +| ``source_interface`` | string | Configures the source interface for logging | os10 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_logging: + logging: + - ip: 1.1.1.1 + state: absent + console: + enable: True + severity: log-err + log_file: + enable: True + severity: log-err + source_interface: "ethernet1/1/30" + +**Simple playbook to setup logging — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_logging + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_network_validation.md b/ansible_collections/dellemc/os10/docs/os10_network_validation.md new file mode 100644 index 00000000..e9014c42 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_network_validation.md @@ -0,0 +1,304 @@ +Network validation role +========================= + +This roles is used to verify network validation. It validates network features of a wiring connection, BGP neighbors, MTU between neighbors, and VLT pairing. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. The Network validation role requires an SSH connection for connectivity to a Dell EMC OS10 device. You can use any of the built-in OS connection variables. + +- **Wiring validation** — Based on the LLDP neighbor establishment, the intended neighbor input model is defined by the _group_var/all_ user which is compared with the actual LLDP neighbor; report is generated if there is any mismatch with the intended neighbors + +- **BGP validation** — Based on the BGP neighbor state establishment, report is generated if the BGP neighbor state is not in an established state + +- **MTU validation** — Based on the interface MTU, the report is generated if there is an MTU mismatch between LLDP neighbors + +- **VLT validation** — Based on the VLT information, the report is generated if the backup VLT link is down or not present + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- Variables and values are case-sensitive + +**wiring_validation keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``intended_neighbors`` | list | Defines topology details planned | os10 | +| ``source_switch`` | string | Defines the source switch inventory name planned | os10 | +| ``source_port`` | string | Defines the source port planned | os10 | +| ``dest_switch`` | string | Defines the destination switch inventory name planned | os10 | +| ``dest_port`` | string | Defines the destination port planned | os10 | + +**bgp_validation keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``intended_bgp_neighbors`` | list | Defines topology details planned | os10 | +| ``source_switch`` | string | Defines the source switch inventory name planned | os10 | + +**vlt_validation keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``intended_vlt_pairs`` | list | Defines topology details planned | os10 | +| ``primary`` | string | Defines the primary role of switch inventory name planned | os10 | +| ``secondary`` | string | Defines the secondary role of switch inventory name planned | os10 | + +Connection variables +-------------------- + +Ansible Dell EMC roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible _group_vars_ or _host_vars_ directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if the value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; defaults to 22 | +| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used | +| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the _become_ method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use, if required, to enter privileged mode on the remote device; if `ansible_become` is set to no, this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Dependencies +------------ + +- The _xmltodict_ library should be installed to convert show command output in dictionary format from XML +- To install the package, use the pip install xmltodict command +- The *os10_fabric_summary* role must be included to query system network summary information + +Example playbook +---------------- + +This example uses the *os10_network_validation* role to verify network validations. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + + +**Sample hosts file** + + site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + [spine] + site1-spine1 + site1-spine2 + site2-spine1 + site2-spine2 + [LeafAndSpineSwitch:children] + spine + + +**Sample host_vars/site1-spine1** + + cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + + os10_cli_user: xxxx + os10_cli_pass: xxxx + ansible_network_os: dellemc.os10.os10 + + +#### Sample ``group_var/all`` + +**Sample input for wiring validation** + + + intended_neighbors: + - source_switch: site1-spine2 + source_port: ethernet1/1/5 + dest_port: ethernet1/1/29 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/6 + dest_port: ethernet1/1/30 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/7 + dest_port: ethernet1/1/31 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/8 + dest_port: ethernet1/1/32 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/9 + dest_port: ethernet1/1/21 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/7 + dest_port: ethernet1/1/29 + dest_switch: site1-spine3 + +**Sample input for BGP validation** + + intended_bgp_neighbors: + - source_switch: site1-spine1 + neighbor_ip: ["10.11.0.1","10.9.0.1","10.9.0.3","10.9.0.5","1.1.1.1"] + - source_switch: site1-spine2 + neighbor_ip: ["10.11.0.0","10.9.0.9","10.9.0.11","10.9.0.15"] + +**Sample input for VLT validation** + + intended_vlt_pairs: + - primary: site1-spine1 + secondary: site2-spine2 + - primary: site2-spine1 + secondary: site2-spine2 + + +#### Simple playbook to setup network validation + +**Sample playbook of ``validation.yaml`` to run complete validation** + + --- + - name: setup network validation + hosts: localhost + gather_facts: no + connection: local + roles: + - os10_network_validation + +**Sample playbook to run wiring validation** + + --- + - name: setup wiring validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: wiring_validation.yaml + +**Sample playbook to run BGP validation** + + --- + - name: setup bgp validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: bgp_validation.yaml + +**Sample playbook to run VLT validation** + + --- + - name: setup vlt validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: vlt_validation.yaml + +**Sample playbook to run MTU validation** + + --- + - name: setup mtu validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: mtu_validation.yaml + + +**Run** + +Execute the playbook and examine the results. + + ansible-playbook -i inventory.yaml validation.yaml + +**sample output of wiring validation** + + "results": [ + { + "dest_port": "ethernet1/1/1", + "dest_switch": "site2-spine2", + "error_type": "link-missing", + "reason": "link is not found for source switch: site2-spine1,port: ethernet1/1/1", + "source_port": "ethernet1/1/1", + "source_switch": "site2-spine1" + }, + { + "dest_port": "ethernet1/1/2", + "dest_switch": "site2-spine1", + "error_type": "link-mismatch", + "reason": "Destination switch is not an expected value, expected switch: site2-spine1,port: ethernet1/1/2; actual switch: site1-spine2(svc-tag:J33FXC2, node_mac:e4:f0:04:9b:eb:dc), port: ethernet1/1/1", + "source_port": "ethernet1/1/1", + "source_switch": "site1-spine1" + } + ] + +**sample output of BGP validation** + + "results": [ + { + "bgp_neighbor": "10.9.0.1", + "bgp_state": "idle", + "error_type": "remote_port_down", + "possible_reason": "remote port site2-spine1 ethernet1/1/2 is down", + "source_switch": "site1-spine1" + }, + { + "bgp_neighbor": "-", + "bgp_state": "idle", + "error_type": "not_an_intended_neighbor", + "possible_reason": "neighbor 10.9.0.7 is not an intended, please add this neighbor in the intended_bgp_neighbors", + "source_switch": "site1-spine1" + }, + { + "bgp_neighbor": "1.1.1.1", + "error_type": "config_missing", + "possible_reason": "neighbor config missing", + "source_switch": "site1-spine1" + }, + { + "bgp_neighbor": "10.9.0.9", + "bgp_state": "idle", + "error_type": "remote_port_down", + "possible_reason": "remote port site2-spine1 ethernet1/1/3 is down", + "source_switch": "site1-spine2" + } + ] + +**sample output of VLT validation** + + "results": [ + { + "error_type": "secondary_mismatch", + "intended_primary": "site1-spine1", + "intended_secondary": "site2-spine2", + "possible_reason": "config mismatch as site2-spine2 is expected, but the actual secondary is site1-spine2 ", + "secondary": "site1-spine2" + }, + { + "error_type": "peer_missing", + "intended_primary": "site2-spine1", + "intended_secondary": "site2-spine2", + "possible_reason": "peer info is not configured or peer interface is down" + } + ] + +**sample output of MTU validation** + + "msg": { + "results": "There is no MTU mistmatch between neighbors" + } + + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/docs/os10_ntp.md b/ansible_collections/dellemc/os10/docs/os10_ntp.md new file mode 100644 index 00000000..17e879c6 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_ntp.md @@ -0,0 +1,124 @@ +NTP role +======== + +This role facilitates the configuration of network time protocol (NTP) attributes. It specifically enables configuration of NTP server, NTP source, authentication, and broadcast service. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The NTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_ntp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``server`` | list | Configures the NTP server (see ``server.*``) | os10 | +| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os10 | +| ``server.key`` | integer | Configures the peer authentication key for the NTP server | os10 | +| ``server.prefer`` | boolean | Configures the peer preference | os10 | +| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os10 | +| ``source`` | string | Configures the interface for the source address | os10 | +| ``master`` | integer | Configures the local clock to act as the server | os10 | +| ``authenticate`` | boolean | Configures authenticate time sources | os10 | +| ``authentication_key`` | list | Configures authentication key for trusted time sources (see ``authentication_key.*``) | os10 | +| ``authentication_key.key_num`` | integer | Configures authentication key number | os10 | +| ``authentication_key.key_string_type`` | integer: 0,9 | Configures hidden authentication key string if the value is 9, and configures unencrypted authentication key string if the value is 0 | os10 | +| ``authentication_key.key_string`` | string | Configures the authentication key string | os10 | +| ``authentication_key.type`` | string: md5,sha1,sha2-256 | Configures the authentication type | os10 | +| ``authentication_key.state`` | string: absent,present\* | Deletes the authenticaton key if set to absent | os10 | +| ``trusted_key`` | list | Configures key numbers for trusted time sources (see ``trusted_key.*``) | os10 | +| ``trusted_key.key_num`` | integer | Configures the key number | os10 | +| ``trusted_key.state`` | string: absent,present\* | Deletes the trusted key if set to absent | os10 | +| ``intf`` | dictionary | Configures NTP on the interface (see ``intf.*``) | os10 | +| ``intf.`` | dictionary | Configures NTP on the interface (see ``.*``) | os10 | +| ``.disable`` | boolean | Configures NTP disable on the interface | os10 | +| ``.broadcast`` | boolean | Configures NTP broadcast client service on the interface | os10 | +| ``vrf`` | dictionary | Enables NTP on VRF (see ``vrf.*``) | os10 | +| ``vrf.name`` | string | Name of the VRF to enable NTP | os10 | +| ``vrf.state`` | string: absent,present\* | Disables NTP on the VRF if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When the `os10_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_ntp* role. + +By including the role, you automatically get access to all of the tasks to configure NTP attributes. The sample *host_vars* is for os10. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + host: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_ntp: + source: ethernet 1/1/2 + master: 5 + authenticate: true + authentication_key: + - key_num: 123 + key_string_type: 9 + key_string: test + type: md5 + state: present + trusted_key: + - key_num: 1323 + state: present + server: + - ip: 2.2.2.2 + key: 345 + prefer: true + state: present + intf: + ethernet 1/1/2: + disable: true + broadcast: true + vrf: + name: red + state: present + +**Simple playbook to setup NTP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_ntp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_prefix_list.md b/ansible_collections/dellemc/os10/docs/os10_prefix_list.md new file mode 100644 index 00000000..dce141e8 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_prefix_list.md @@ -0,0 +1,104 @@ +Prefix-list role +================ + +This role facilitates the configuration of a prefix-list. It supports the configuration of an IP prefix-list, and assigns the prefix-list to line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The prefix-list role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value +- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_prefix_list keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string (required): ipv4,ipv6 | Configures an L3 (IPv4/IPv6) prefix-list | os10 | +| ``name`` | string (required) | Configures the prefix-list name | os10 | +| ``description`` | string | Configures the prefix-list description | os10 | +| ``entries`` | list | Configures rules in the prefix-list (see ``seqlist.*``) | os10 | +| ``entries.number`` | int (required) | Specifies the sequence number of the prefix-list rule | os10 | +| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true, and specifies to reject packets if set to false | os10 | +| ``entries.net_num`` | string (required) | Specifies the network number | os10 | +| ``entries.mask`` | string (required) | Specifies the mask | os10 | +| ``entries.condition_list`` | list | Configures conditions to filter packets (see ``condition_list.*``)| os10 | +| ``condition_list.condition`` | list | Specifies the condition to filter packets from the source address | os10 | +| ``condition_list.prelen`` | string (required) | Specifies the allowed prefix length | os10 | +| ``entries.state`` | string: absent,present\* | Deletes the rule from the prefix-list if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the prefix-list if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_prefix_list* role to configure prefix-list for both IPv4 and IPv6. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_prefix_list* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_prefix_list + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_qos.md b/ansible_collections/dellemc/os10/docs/os10_qos.md new file mode 100644 index 00000000..58415970 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_qos.md @@ -0,0 +1,90 @@ +QoS role +======== + +This role facilitates the configuration quality of service (QoS) attributes like policy-map and class-map. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The QoS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_qos keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``policy_map`` | list | Configures the policy-map (see ``policy_map.*``) | os10 | +| ``policy_map.name`` | string (required) | Configures the policy-map name | os10 | +| ``policy_map.type`` | string: qos\*, application, control-plane, network-qos, queuing in os10 | Configures the policy-map type | os10 | +| ``policy_map.state`` | string: present\*,absent | Deletes the policy-map if set to absent | os10 | +| ``class_map`` | list | Configures the class-map (see ``class_map.*``) | os10 | +| ``class_map.name`` | string (required) | Configures the class-map name | os10 | +| ``class_map.type`` | string: qos\*,application,control-plane,network-qos,queuing | Configures the class-map type | os10 | +| ``class_map.state`` | string: present\*,absent | Deletes the class-map if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_qos* role to configure the policy-map class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_qos* role. By including the role, you automatically get access to all of the tasks to configure QoS features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_qos: + policy_map: + - name: testpolicy + type: qos + state: present + class_map: + - name: testclass + type: application + state: present + +**Simple playbook to setup qos — leaf.yaml** + + - hosts: leaf1 + roles: + - Dell-Networking.os10.os10_qos + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_raguard.md b/ansible_collections/dellemc/os10/docs/os10_raguard.md new file mode 100644 index 00000000..abf7cf4a --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_raguard.md @@ -0,0 +1,126 @@ +IPv6 RA uard role +=================== + +This role facilitates the configuration of IPv6 RA Guard attributes. It specifically enables configuration of IPv6 RA Guard feature enable/disable, IPv6 RA Guard policy definition and policy parameter configuration, and attachment of IPv6 RA Guard policy to an interface. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The IPv6 RA Guard role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_raguard keys** + + +| Key | Type | Description |Support | +|--------------------------------------|-------------------------|----------------------------------------------------------|---------| +| ``enable`` | boolean | Enables IPv6 RA-Guard feature | os10 | +| ``policy`` | list | Configures the IPv6 RA Guard policy (see ``policy.*``) | os10 | +| ``policy.state`` | string: absent/present\*| Deletes the policy if set to absent | os10 | +| ``policy.name`` | string (required) | Configures the IPv6 RA Guard policy name | os10 | +| ``policy.device_role.value`` | string (required) | Configures the device role for a policy | os10 | +| ``policy.device_role.state`` | string: absent,present\*| Deletes the device role if set to absent | os10 | +| ``policy.managed_config_flag.value`` | string | Configures the managed config flag param for a policy | os10 | +| ``policy.managed_config_flag.state`` | string: absent,present\*| Deletes the managed config flag if set to absent | os10 | +| ``policy.other_config_flag.value`` | string | Configures the other config flag param for a policy | os10 | +| ``policy.other_config_flag.state`` | string: absent,present\*| Deletes the other config flag if set to absent | os10 | +| ``policy.mtu.value`` | integer | Configures the MTU param for a policy | os10 | +| ``policy.mtu.state`` | string: absent,present\*| Deletes the MTU if set to absent | os10 | +| ``policy.reachable_time.value`` | integer | Configures the reachable time param for a policy | os10 | +| ``policy.reachable_time.state`` | string: absent,present\*| Deletes the reachable time if set to absent | os10 | +| ``policy.retrans_timer.value`` | integer | Configures the retransmit timer param for a policy | os10 | +| ``policy.retrans_timer.state`` | string: absent,present\*| Deletes the retransmit timer if set to absent | os10 | +| ``policy.router_lifetime.value`` | integer | Configures the router lifetime param for a policy | os10 | +| ``policy.router_lifetime.state`` | string: absent,present\*| Deletes the router lifetime if set to absent | os10 | +| ``policy.router_preference.value`` | string | Configures the router preference param for a policy | os10 | +| ``policy.router_preference.state`` | string: absent,present\*| Deletes the router preference if set to absent | os10 | +| ``policy.match`` | list | Configures the prefix/ACL/MAC list param for a policy | os10 | +| ``policy.match.type`` | string | Configures the prefix/ACL/MAC type for a policy | os10 | +| ``policy.match.name`` | string | Configures the prefix/ACL/MAC name for a policy | os10 | +| ``policy.match.state`` | string: absent,present\*| Deletes the prefix/ACL/MAC if set to absent | os10 | +| ``intf`` | dictionary | Configures IPv6 RA Guard on the interface (see``intf.*``) | os10 | +| ``intf.`` | dictionary | Configures RA Guard on the interface (see``.*``)| os10 | +| ``.policy_name`` | String | Configures RA Guard policy name to be attached on an interface | os10 | +| ``.vlan`` | String | Configures VLAN name to which policy to be attached on an interface| os10| +| ``.state`` | String: absent,present\*| Deletes the policy if set to absent an interface | os10| + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_raguard* role to configure the IPv6 RA Guard feature enable/disable, IPv6 RA Guard Policy defination and policy parameter configuration, Attachment of IPv6 RA Guard policy to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_raguard* role. By including the role, you automatically get access to all of the tasks to configure IPv6 RA Guard attributes. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + host: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_raguard: + enable: true + policy: + - policy_name: test + device_role: + value: router + state: present + managed_config_flag: + value: "on" + state: present + mtu: + value: 1280 + state: present + match: + - type: prefix_list + name: test_prefix + state: present + state: present + intf: + ethernet 1/1/2: + policy_name: test + vlan: 10 + state: present + +**Simple playbook to setup IPv6 RA Guard — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_raguard + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_route_map.md b/ansible_collections/dellemc/os10/docs/os10_route_map.md new file mode 100644 index 00000000..1160ca48 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_route_map.md @@ -0,0 +1,190 @@ +Route-map role +============== + +This role facilitates the configuration of route-map attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The route-map role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_route_map keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``route_map`` | list | Configures the route-map (see ``route_map.*``) | os10 | +| ``route_map.name`` | string (required) | Configures the route-map name | os10 | +| ``route_map.permit`` | boolean | Configures permit/deny set operations | os10 | +| ``route_map.seq_num`` | integer | Configures the sequence number | os10 | +| ``route_map.continue`` | integer | Configures the next sequence number | os10 | +| ``route_map.set`` | dictionary | Configures route-map to set values in the destination routing protocol (see ``set.*``) | os10 | +| ``set.local_pref`` | integer | Configures the BGP local preference path attribute | os10 | +| ``set.metric`` | string | Configures a specific value to add or subtract from the existing metric value ("+ ", "- ", format) | os10 | +| ``set.metric_type`` | string: internal,type-1,type-2 | Configures the metric type for the destination routing protocol | os10 | +| ``set.origin`` | string: igp,egp,incomplete | Configures the BGP origin attribute | os10 | +| ``set.weight`` | integer | Configures the weight for the BGP route | os10 | +| ``set.comm_list`` | dictionary | Configures the BGP community list (see ``comm_list.*``) | os10 | +| ``comm_list.add`` | string | Adds the community attribute of a BGP update | os10 | +| ``comm_list.delete`` | string | Deletes a community attribute of a BGP update | os10 | +| ``set.community`` | string | Configures the community attribute for a BGP route update | os10 | +| ``set.extcomm_list`` | dictionary | Configures the BGP extcommunity list (see ``extcomm_list.*``) | os10 | +| ``extcomm_list.add`` | string | Adds an extended community attribute of a BGP update | os10 | +| ``extcomm_list.delete`` | string | Deletes the extended community attribute of a BGP update | os10 | +| ``set.extcommunity`` | string | Configures the extended community attribute for a BGP route update | os10 | +| ``set.next_hop`` | list | Configures the next-hop address (see ``next_hop.*``) | os10 | +| ``next_hop.type`` | string: ip,ipv6 | Configures the type of the next-hop address | os10 | +| ``next_hop.address`` | string | Configures the next-hop address | os10 | +| ``next_hop.track_id`` | integer | Configures the object track ID | os10 | +| ``next_hop.state`` | string: present\*,absent | Deletes the next-hop address if set to absent | os10 | +| ``route_map.match`` | list | Configures the route-map to match values from the route table (see ``match.*``) | os10 | +| ``match.ip_type`` | string (required): ipv4,ipv6 | Configures the IPv4/IPv6 address to match | os10 | +| ``match.access_group`` | string | Configures the access-group or list to match | os10 | +| ``match.source_protocol_ebgp`` | string | Configures the source protocol to eBGP to match | os10 | +| ``match.source_protocol_ibgp`` | string | Configures the source protocol to iBGP to match | os10 | +| ``match.source_protocol_evpn`` | string | Configures the source protocol to EVPN to match | os10 | +| ``match.source_protocol_static`` | string | Configures the source protocol to static to match | os10 | +| ``match.source_protocol_connected`` | string | Configures the source protocol to connected to match | os10 | +| ``match.source_protocol_ospf`` | string | Configures the source protocol to OSPF to match | os10 | +| ``match.prefix_list`` | string | Configures the IP prefix-list to match against | os10 | +| ``route_map.state`` | string, choices: present\*,absent | Deletes the route-map if set to absent | os10 | +| ``as_path`` | list | Configures the BGP AS path filter (see ``as_path.*``) | os10 | +| ``as_path.access_list`` | string (required) | Configures the access-list name | os10 | +| ``as_path.permit`` | boolean (required) | Configures an AS path to accept or reject | os10 | +| ``as_path.regex``| string (required) | Configures a regular expression | os10 | +| ``as_path.state`` | string: absent,present\* | Deletes the BGP as path filter if set to absent | os10 | +| ``community_list`` | list | Configures a community list entry (see ``community_list.*``) | os10 | +| ``community_list.type`` | string (required): standard,expanded | Configures the type of community-list entry | os10 | +| ``community_list.name`` | string (required) | Configures the name of community-list entry | os10 | +| ``community_list.permit`` | boolean(required) | Configures the community to accept or reject | os10 | +| ``community_list.regex`` | string (required) | Configures the regular expression for extended community list; mutually exclusive with *community_list.community* | os10 | +| ``community_list.community`` | string (required) | Configures a well-known community or community number for standard community list; mutually exclusive with *community_list.regex* | os10 | +| ``community_list.state`` | string: absent,present\* | Deletes the community list entry if set to absent | os10 | +| ``extcommunity_list`` | list | Configures extcommunity-list entry (see ``extcommunity_list.*``) | os10 | +| ``extcommunity_list.type`` | string (required): standard,expanded | Configures the type of extcommunity-list entry | os10 | +| ``extcommunity_list.name`` | string (required) | Configures the name of extcommunity-list entry | os10 | +| ``extcommunity_list.permit`` | boolean(required) | Configures the extcommunity to accept or reject | os10 | +| ``extcommunity_list.regex`` | string (required) | Configures the regular expression for the extended extcommunity list; mutually exclusive with *extcommunity_list.community* | os10 | +| ``extcommunity_list.community`` | string (required) | Configures the extended community for standard community-list; mutually exclusive with *extcommunity_list.regex* | os10 | +| ``extcommunity_list.state`` | string: absent,present\* | Deletes the extcommunity-list entry if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_route_map* role for the route-map, policy-map, and class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_route_map* role. By including the role, you automatically get access to all of the tasks to configure route-map features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_route_map: + as_path: + - access_list: aa + permit: true + regex: www + state: present + community_list: + - type: expanded + name: qq + permit: true + regex: aaa + state: present + - type: standard + name: qqq + permit: false + community: internet + state: present + extcommunity_list: + - type: expanded + name: qq + permit: true + regex: aaa + state: present + - type: standard + name: qqq + permit: false + community: "rt 22:33" + state: present + route_map: + - name: test + permit: true + seq_num: 1 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + source_protocol_ebgp: present + source_protocol_ibgp: present + source_protocol_evpn: present + source_protocol_static: present + source_protocol_ospf: present + source_protocol_connected: present + set: + local_pref: 1200 + metric_type: internal + metric: + 30 + origin: igp + weight: 50 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: present + community: internet + comm_list: + add: qq + delete: qqq + extcommunity: "22:33" + extcomm_list: + add: aa + delete: aa + state: present + +**Simple playbook to setup QoS —leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_route_map + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_snmp.md b/ansible_collections/dellemc/os10/docs/os10_snmp.md new file mode 100644 index 00000000..a875a234 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_snmp.md @@ -0,0 +1,269 @@ +SNMP role +========= + +This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The SNMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_snmp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``snmp_contact`` | string | Configures SNMP contact information | os10 | +| ``snmp_location`` | string | Configures SNMP location information | os10 | +| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os10 | +| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os10 | +| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os10 | +| ``snmp_community.access_list`` | dictionary | Configures ACL for the community (see ``snmp_community.access_list.*``) | os10 | +| ``snmp_community.access_list.name`` | string | Specifies the name of the ACL for the community | os10 | +| ``snmp_community.access_list.state`` | string: absent,present\* | Deletes the ACL from the community if set to absent | os10 | +| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os10 | +| ``snmp_engine_id`` | string | Configures SNMP local EngineID | os10 | +| ``snmp_remote_engine_id`` | list | Configures SNMP remote engine information (see ``snmp_remote_engine_id.*``) | os10 | +| ``snmp_remote_engine_id.ip`` | string | Configures the IP address of the SNMP remote engine | os10 | +| ``snmp_remote_engine_id.engine_id`` | string | Configures the EngineID of the SNMP remote engine | os10 | +| ``snmp_remote_engine_id.udpport`` | string | Configures the UDP port of the SNMP remote engine | os10 | +| ``snmp_remote_engine_id.state`` | string: absent,present\* | Deletes the SNMP remote engine information if set to absent | os10 | +| ``snmp_group`` | list | Configures the SNMP group information (see ``snmp_group.*``) | os10 | +| ``snmp_group.name`` | string | Configures the name of the SNMP group | os10 | +| ``snmp_group.version`` | string: 1,2c,3 | Configures the version of the SNMP group | os10 | +| ``snmp_group.security_level`` | string: auth,noauth,priv | Configures the security level of SNMP group for version 3 | os10 | +| ``snmp_group.access_list`` | dictionary | Configures the access list of the SNMP group (see ``snmp_group.access_list.*``)| os10 | +| ``snmp_group.access_list.name`` | string | Specifies the name of the access list for the SNMP group wtih version 1 or 2c | os10 | +| ``snmp_group.access_list.state`` | string: absent,present\* | Deletes the access list from the SNMP group if set to absent | os10 | +| ``snmp_group.read_view`` | dictionary | Configures the read view of the SNMP group (see ``snmp_group.read_view.*``) | os10 | +| ``snmp_group.read_view.name`` | string | Specifies the name of the read view for the SNMP group | os10 | +| ``snmp_group.read_view.state`` | string: absent,present\* | Deletes the read view from the SNMP group if set to absent | os10 | +| ``snmp_group.write_view`` | dictionary | Configures the write view of the SNMP group (see ``snmp_group.write_view``) | os10 | +| ``snmp_group.write_view.name`` | string | Specifies the name of the write view for the SNMP group | os10 | +| ``snmp_group.write_view.state`` | string: absent,present\* | Deletes the write view from the SNMP group if set to absent | os10 | +| ``snmp_group.notify_view`` | dictionary | Configures the notify view of the SNMP group (see ``snmp_group.notify_view.*``) | os10 | +| ``snmp_group.notify_view.name`` | string | Specifies the name of the notify view for the SNMP group | os10 | +| ``snmp_group.notify_view.state`` | string: absent,present\* | Deletes the notify view from the SNMP group if set to absent | os10 | +| ``snmp_group.state`` | string: absent,present\* | Deletes the SNMP group if set to absent | os10 | +| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os10 | +| ``snmp_host.ip`` | string | Configures the IP address of the SNMP trap host | os10 | +| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host for version 1 or 2c | os10 | +| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os10 | +| ``snmp_host.version`` | string: 1,2c,3 (required) | Specifies the SNMP version of the host (1 or 2c or 3 in os10) | os10 | +| ``snmp_host.security_level`` | string: auth,noauth,priv | Configures the security level of the SNMP host for version 3 | os10 | +| ``snmp_host.security_name`` | string | Configures the security name of the SNMP host for version 3 | os10 | +| ``snmp_host.notification_type`` | string: traps,informs | Configures the notification type of the SNMP host | os10 | +| ``snmp_host.trap_categories`` | dictionary | Enables or disables different trap categories for the SNMP host (see ``snmp_host.trap_categories.*``) | os10 | +| ``snmp_host.trap_categories.dom`` | boolean: true,false | Enables or disables dom category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.entity`` | boolean: true,false | Enables or disables entity category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.envmon`` | boolean: true,false | Enables or disables envmon category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.lldp`` | boolean: true,false | | Enables or disables lldp category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.snmp`` | boolean: true,false | | Enables or disables snmp category traps for the SNMP host | os10 | +| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os10 | +| ``snmp_source_interface`` | string | Configures the source interface for SNMP | os10 | +| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os10 | +| ``snmp_traps.name`` | string | Enables SNMP traps | os10 | +| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os10 | +| ``snmp_user`` | list | Configures the SNMP user information (see ``snmp_user.*``) | os10 | +| ``snmp_user.name`` | string | Specifies the name of the SNMP user | os10 | +| ``snmp_user.group_name`` | string | Specifies the group of the SNMP user | os10 | +| ``snmp_user.version `` | string: 1,2c,3 | Configures the version for the SNMP user | os10 | +| ``snmp_user.access_list`` | string | Configures the access list for the SNMP user with version 1 or 2c | os10 | +| ``snmp_user.authentication`` | dictionary | Configures the authentication information for the SNMP user with version 3 (see ``snmp_user.authentication.*``) | os10 | +| ``snmp_user.authentication.localized`` | boolean: true,false | Configures the password to be in localized key format or not | os10 | +| ``snmp_user.authentication.algorithm`` | string: md5, sha | Configures the authentication algorithm for the SNMP user | os10 | +| ``snmp_user.authentication.password`` | string | Configures the authentication password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 | +| ``snmp_user.authentication.encryption`` | dictionary | Configures the encryption parameters for the SNMP user | os10 | +| ``snmp_user.authentication.encryption.algorithm`` | string: aes,des | Configures the encryption algorithm for the SNMP user | os10 | +| ``snmp_user.authentication.encryption.password`` | string | Configures encryption password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 | +| ``snmp_user.remote`` | dictionary | Configures the remote SNMP entity the user belongs to (see ``snmp_user.remote.*``) | os10 | +| ``snmp_user.remote.ip`` | string | Configures the IP address of the remote entity for the SNMP user | os10 | +| ``snmp_user.remote.udpport`` | string | Configures the UDP port of the remote entiry for the SNMP user | os10 | +| ``snmp_user.state`` | string: absent,present\* | Deletes the SNMP user if set to absent | os10 | +| ``snmp_view`` | list | Configures SNMPv3 view information (see ``snmp_view.*``) | os10 | +| ``snmp_view.name`` | string | Configures the SNMP view name (up to 20 characters) | os10 | +| ``snmp_view.oid_subtree`` | integer | Configures the SNMP view for the OID subtree | os10 | +| ``snmp_view.include`` | boolean: true,false | Specifies if the MIB family should be included or excluded from the view | os10 | +| ``snmp_view.state`` | string: absent,present\* | Deletes the SNMP view if set to absent | os10 | +| ``snmp_vrf`` | string | Configures the VRF for SNMP | os10 | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_snmp: + snmp_contact: test + snmp_location: Chennai + snmp_source_interface: loopback 10 + snmp_vrf: test + snmp_community: + - name: public + access_mode: ro + access_list: + name: test_acl + state: present + state: present + snmp_engine_id: 123456789 + snmp_remote_engine_id: + - host: 1.1.1.1 + engine_id: '0xab' + udpport: 162 + state: present + snmp_traps: + - name: all + state: present + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: absent + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: present + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + trap_categories: + dom: true + lldp: true + state: present + - ip: 3.1.1.1 + version: 3 + security_level: priv + security_name: test + notification_type: informs + udpport: 200 + trap_categories: + dom: true + entity: true + envmon: true + snmp: true + state: present + snmp_group: + - name: group_1 + version: "2c" + state: present + access_list: + name: test_acl + state: present + read_view: + name: view_1 + state: present + write_view: + name: view_2 + state: present + notify_view: + name: view_3 + state: present + - name: group_2 + version: 3 + security_level: priv + state: present + read_view: + name: view_1 + state: absent + notify_view: + name: view_3 + state: present + snmp_user: + - name: user_1 + group_name: group_1 + version: 3 + authentication: + localized: true + algorithm: md5 + password: 9fc53d9d908118b2804fe80e3ba8763d + encryption: + algorithm: aes + password: d0452401a8c3ce42804fe80e3ba8763d + state: present + - name: user_2 + group_name: group_1 + version: 3 + remote: + ip: 1.1.1.1 + udpport: 200 + authentication: + localized: true + algorithm: md5 + password: '0x9fc53d9d908118b2804fe80e3ba8763d' + encryption: + algorithm: aes + password: '0xd0452401a8c3ce42804fe80e3ba8763d' + state: present + - name: user_3 + group_name: group_1 + version: 2c + state: present + - name: user_4 + group_name: group_1 + version: 3 + state: present + - name: user_5 + group_name: group_2 + version: 2c + remote: + ip: 1.1.1.1 + udpport: 200 + access_list: test_acl + state: present + +**Simple playbook to setup SNMP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_snmp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_system.md b/ansible_collections/dellemc/os10/docs/os10_system.md new file mode 100644 index 00000000..119138af --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_system.md @@ -0,0 +1,126 @@ +System role +=========== + +This role facilitates the configuration of global system attributes. It specifically enables configuration of hostname and hashing algorithm. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The System role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_system keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``hostname`` | string | Configures a hostname to the device (no negate command) | os10 | +| ``hardware_forwarding`` | string: scaled-l2,scaled-l3-routes,scaled-l3-hosts | Configures hardware forwarding mode | os10 | +| ``hash_algo`` | dictionary | Configures hash algorithm commands (see ``hash_algo.*``) | os10 | +| ``hash_algo.algo`` | list | Configures hashing algorithm (see ``algo.*``) | os10 | +| ``algo.name`` | string (required) | Configures the name of the hashing algorithm | os10 | +| ``algo.mode`` | string (required) | Configures the hashing algorithm mode | os10 | +| ``algo.state`` | string: absent,present\* | Deletes the hashing algorithm if set to absent | os10 | +| ``load_balance`` | dictionary | Configures the global traffic load balance (see ``load_balance.*``) | os10 | +| ``load_balance.ingress_port`` | boolean: true,false | Specifies whether to use the source port ID for the hashing algorithm | os10 | +| ``load_balance.ip_selection`` | list | Configures IPv4 key fields to use in hashing algorithm; | os10 | +| ``ip_selection.field`` | string | Configures IPv4 key fields to use in hashing algorithm | os10 | +| ``ip_selection.state`` | string: absent,present\* | Deletes the IPv4 key fields if set to absent | os10 | +| ``load_balance.ipv6_selection`` | list | Configures IPv6 key fields to use in hashing algorithm | os10 | +| ``ipv6_selection.field`` | string | Configures IPv6 key fields to use in hashing algorithm | os10 | +| ``ipv6_selection.state`` | string: absent,present\* | Deletes the IPv6 key fields if set to absent | os10 | +| ``load_balance.mac_selection`` | list | Configures MAC key fields to use in hashing algorithm (see ``mac_selection.*``) | os10 | +| ``mac_selection.field`` | string | Configures MAC key fields to use in hashing algorithm | os10 | +| ``mac_selection.state`` | string: absent,present\* | Deletes the MAC key fields if set to absent | os10 | +| ``load_balance.tcp_udp_selection`` | list | Configures TCP UDP ports for load balancing configurations (see ``tcp_udp_selection.*``) | os10 | +| ``tcp_udp_selection.field`` | string | Configures TCP UDP port fields to use in hashing algorithm | os10 | +| ``tcp_udp_selection.state`` | string: absent,present\* | Deletes the TCP UDP ports if set to absent | os10 | +| ``min_ra`` | string | Configures global RA minimum interval value, applicable to all interfaces across VRFs | os10 | +| ``max_ra`` | string | Configures global RA maximum interval value, applicable to all interfaces across VRFs | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os10_system* role. By including the role, you automatically get access to all of the tasks to configure system features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_system: + hostname: os10 + hardware_forwarding: scaled-l3-hosts + hash_algo: + algo: + - name: lag + mode: crc + state: present + - name: ecmp + mode: xor + state: present + load_balance: + ingress_port: true + ip_selection: + - field: source-ip + state: present + ipv6_selection: + - field: source-ip + state: present + mac_selection: + - field: source-mac + state: present + tcp_udp_selection: + - field: l4-source-port + state: present + max_ra: 15 + min_ra: 10 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_system + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_template.md b/ansible_collections/dellemc/os10/docs/os10_template.md new file mode 100644 index 00000000..d7faf013 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_template.md @@ -0,0 +1,75 @@ +Template role +============== + +This role provides access to structured data from show commands. This role facilitates the TEXTFSM parsing engine. TextFSM is a template based state machine . It takes the raw string input from the CLI of network devices, run them through a TEXTFSM template and return structured text in the form of a Python dictionary. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Template role is highly customizable, and it works with separate template definitions which contain variables and rules with regular expressions. This library is very helpful to parse any text-based CLI output from network devices. The Template role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- Variables and values are case-sensitive + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_template* role to parse any text-based CLI output. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. All the supported CLI commands are imported as tasks in tasks/main.yml. + +For the *os10_template* role plugins to be used, you may need to specify the actual path of role in *ansible.cfg* file. + +**Sample ansible.cfg** + + action_plugins = ../../plugins/modules/ + + +**Sample hosts file** + + leaf1 ansible_host= ansible_network_os=dellemc.os10.os10 ansible_ssh_user=xxxxx ansible_ssh_pass=xxxxx + + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_template + +**Example playbook to run specific show command — leaf.yaml** + + + --- + - name: PARSE SHOW IP INTERFACE BRIEF + hosts: leaf1 + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + + + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_uplink.md b/ansible_collections/dellemc/os10/docs/os10_uplink.md new file mode 100644 index 00000000..8ffeb0e7 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_uplink.md @@ -0,0 +1,109 @@ +Uplink role +=========== + +This role facilitates the configuration of uplink failure detection feature attributes. It specifically enables configuration of association between upstream and downstream interfaces known as uplink-state group. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Uplink role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_uplink keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``uplink_state_group`` | list | Configures the uplink state group (see ``uplink_state_group.*``) | os10 | +| ``uplink_state_group.id`` | integer | Configures the uplink state group instance | os10 | +| ``uplink_state_group.enable`` | boolean: True,False | Enables the uplink state group instance | os10 | +| ``uplink_state_group.defer_time`` | integer | Configures defer timer for the uplink state group | os10 | +| ``uplink_state_group.uplink_type`` | list | Configures the upstream and downstream attribute (see ``uplink_type.*``) | os10 | +| ``uplink_type.type`` | string: upstream,downstream | Configures the uplink type | os10 | +| ``uplink_type.intf`` | string | Configures the uplink interface | os10 | +| ``uplink_type.state`` | string: absent,present\* | Removes the uplink stream if set to absent | os10 | +| ``uplink_state_group.downstream`` | dictionary | Configures downstream information for the uplink state group (see ``downstream.*``) | os10 | +| ``downstream.disable_links`` | integer | Configures number of downstream links to be disabled. String 'all' can be used to disable all downstream links | os10 | +| ``downstream.auto_recover`` | boolean: True,False | Enables or disables auto recover for downstream interfaces | os10 | +| ``uplink_state_group.state`` | string: absent,present\* | Removes the uplink state group instance if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +******************** + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_uplink role* to completely set the uplink sate group instance, and upstream, downstream interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The uplink role writes a simple playbook that only references the *os10_uplink* role. By including the role, you automatically get access to all of the tasks to configure uplink features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "present" + - type: "downstream" + intf: "ethernet1/1/2-1/1/5" + state: "present" + state: "present" + downstream: + disable_links: all + auto_recover: false + defer_time: 50 + - id: 2 + enable: True + state: "present" + +> **NOTE**: Interfaces should be created using the *os10_interface* role. + +**Simple playbook to setup uplink — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_uplink + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_users.md b/ansible_collections/dellemc/os10/docs/os10_users.md new file mode 100644 index 00000000..09d55f1d --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_users.md @@ -0,0 +1,89 @@ +Users role +========== + +This role facilitates the configuration of global system user attributes, and it supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Users role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_users list keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os10 | +| ``password`` | string | Configures the password set for the username; password length must be at least eight characters | os10 | +| ``role`` | string | Configures the role assigned to the user | os10 | +| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_users* role to configure global system user attributes. It creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os10_users* role. By including the role, you automatically get access to all of the tasks to configure user features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_users: + - username: test + password: a1a2a3a4!@#$ + role: sysadmin + state: present + - username: u1 + password: a1a2a3a4!@#$ + role: netadmin + state: present + +**Simple playbook to setup users — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_users + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_vlan.md b/ansible_collections/dellemc/os10/docs/os10_vlan.md new file mode 100644 index 00000000..71a7adf9 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_vlan.md @@ -0,0 +1,123 @@ +VLAN role +========= + +This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration +- `os10_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key. +- VLAN ID key should be in format "vlan ID" (1 to 4094) +- Variables and values are case-sensitive + +**os10_vlan** + +| Key | Type | Notes | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``default_vlan_id`` | integer | Configures the vlan-id as the default VLAN for an existing VLAN | os10 | + +**VLAN ID keys** + +| Key | Type | Notes | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``description`` | string | Configures a single line description for the VLAN | os10 | +| ``tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os10 | +| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os10 | +| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os10 | +| ``untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os10 | +| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os10 | +| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os10 | +| ``virtual_gateway_ip`` | string | Configures an anycast gateway IPv4 address for VLAN interfaces| os10 | +| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 | +| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +## Example playbook + +This example uses the *os10_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlan* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_vlan: + default_vlan_id: 2 + vlan 100: + description: "Blue" + tagged_members: + - port: ethernet 1/1/32 + state: present + - port: ethernet 1/1/31 + state: present + untagged_members: + - port: ethernet 1/1/30 + state: present + - port: ethernet 1/1/29 + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: "present" + vlan 10: + description: "vlan with anycast GW" + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + state: "present" + +> **NOTE**: Interfaces should be created using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vlan + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_vlt.md b/ansible_collections/dellemc/os10/docs/os10_vlt.md new file mode 100644 index 00000000..85ed917a --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_vlt.md @@ -0,0 +1,108 @@ +VLT role +======== + +This role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VLT role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables . + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_vlt keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``domain`` | integer (required) | Configures the VLT domain identification number (1 to 1000) | os10 | +| ``backup_destination`` | string | Configures an IPv4 address for the VLT backup link (A.B.C.D format or X:X:X:X::X format) | os10 | +| ``destination_type`` | string | Configures the backup destination based on this destination type (IPv4 or IPv6)| os10 | +| ``backup_destination_vrf`` | string | Configures the virtual routing and forwarding (VRF) instance through which the backup destination IP is reachable (*vrfname* must be present) | os10 | +| ``discovery_intf`` | string | Configures the discovery interface for the VLT domain (range of interfaces)| os10 | +| ``discovery_intf_state`` | string: absent,present | Deletes the discovery interfaces for the VLT domain if set to absent | os10 | +| ``peer_routing`` | boolean | Configures VLT peer routing | os10 | +| ``priority`` | integer (default:32768) | Configures VLT priority | os10 | +| ``vlt_mac`` | string | Configures the VLT MAC address | os10 | +| ``vlt_peers`` | dictionary | Contains objects to configure the VLT peer port-channel (see ``vlt_peers.*``) | os10 | +| ``vlt_peers.`` | dictionary | Configures the VLT peer port-channel (`Po value`) | os10 | +| ``vlt_peers..peer_lag`` | integer | Configures the port-channel ID of the VLT peer lag | os10 | +| ``state`` | string: absent,present | Deletes the VLT instance if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network OS roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Dependencies +------------ + +The *os10_vlt* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0. + +Example playbook +---------------- + +This example uses the *os10_vlt* role to setup a VLT-domain. It creates a *hosts* file with the switch details and corresponding variables.The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlt* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_vlt: + domain: 1 + backup_destination: "192.168.211.175" + destination_type: "ipv4" + backup_destination_vrf: + discovery_intf: 1/1/12 + discovery_intf_state: present + peer_routing: True + vlt_mac: aa:aa:aa:aa:aa:aa + vlt_peers: + Po 12: + peer_lag: 13 + state: present + +> **NOTE**: Discovery interface must not be in switchport mode and can be configured using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vlt + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_vrf.md b/ansible_collections/dellemc/os10/docs/os10_vrf.md new file mode 100644 index 00000000..464efc5b --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_vrf.md @@ -0,0 +1,143 @@ +VRF role +======== + +This role facilitates to configure the basics of virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VRF role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the variable `ansible_network_os` that can take the `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_vrf keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``vrfdetails`` | list | Configures the list of VRF instances (see ``instances.*``) | os10 | +| ``vrfdetails.vrf_name`` | string | Specifies the VRF instance name (default is management) | os10 | +| ``vrfdetails.state`` | string | Deletes the VRF instance name if set to absent | os10 | +| ``vrfdetails.ip_route_import`` | string | Configures VRF IP subcommands | os10 | +| ``ip_route_import.community_value`` | string | Configures the route community value | os10 | +| ``ip_route_import.route_map_value`` | string | Configures the route-map value | os10 | +| ``ip_route_import.state`` | string | Deletes the IP configuration if set to absent | os10 | +| ``vrfdetails.ip_route_export`` | string | Configures VRF IP subcommands | os10 | +| ``ip_route_export.community_value`` | string | Configures the route community value | os10 | +| ``ip_route_export.route_map_value`` | string | Configures the route-map value | os10 | +| ``ip_route_export.state`` | string | Deletes the IP config if set to absent | os10 | +| ``vrfdetails.ipv6_route_import`` | string | Configures VRF IPv6 subcommands | os10 | +| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 | +| ``ipv6_route_import.route_map_value`` | string | Configures the route-map value | os10 | +| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 | +| ``vrfdetails.ipv6_route_export`` | string | Configures VRF IPv6 subcommands | os10 | +| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 | +| ``ipv6_route_export.route_map_value`` | string | Configures the route-map value | os10 | +| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 | +| ``vrfdetails.map_ip_interface`` | list | Specifies a list of valid interface names | os10 | +| ``map_ip_interface.intf_id`` | string | Specifies a valid interface name | os10 | +| ``map_ip_interface.state`` | string | Deletes VRF association in the interface if set to absent | os10 | +| ``upd_src_ip_loopback_id`` | string | Configures the source IP for any leaked route in VRF from the provided loopback ID, delete if empty string| os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Dependencies +------------ + +The *os10_vrf* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0 + +Example playbook +---------------- + +This example uses the *os10_vrf* role to setup a VRF and associate it to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that references the *os10_vrf* role. +*upd_src_ip_loopback_id* has an dependency with association of the interface in a VRF, and the *os10_vrf* role needs to be invoked twice with different input dictionary one for the create and one for *upd_src_ip_loopback_id*. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + os10_vrf: + vrfdetails: + - vrf_name: "os10vrf" + state: "present" + ip_route_import: + community_value: "10:20" + state: "present" + route_map_value: "test4" + ip_route_export: + community_value: "30:40" + state: "present" + route_map_value: "test3" + ipv6_route_import: + community_value: "40:50" + state: "absent" + route_map_value: "test2" + ipv6_route_export: + community_value: "60:70" + state: "absent" + route_map_value: "test2" + map_ip_interface: + - intf_id : "loopback11" + state : "present" + + os_vrf_upd_src_loopback: + vrfdetails: + - vrf_name: "os10vrf" + state: "present" + upd_src_ip_loopback_id: 11 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vrf + +**Simple playbook with `upd_src_ip_loopback_id` — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vrf + - hosts: leaf1 + vars: + os10_vrf: "{{ os_vrf_upd_src_loopback }}" + roles: + - dellemc.os10.os10_vrf + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_vrrp.md b/ansible_collections/dellemc/os10/docs/os10_vrrp.md new file mode 100644 index 00000000..299166bf --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_vrrp.md @@ -0,0 +1,139 @@ +VRRP role +========= + +This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VRRP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os10_vrrp` (dictionary) holds a dictionary with the interface name key +- Interface name can correspond to any of the valid OS10 interface with a unique interface identifier name +- Physical interfaces names must be in * * format (for example *fortyGigE 1/1*) +- Variables and values are case-sensitive + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``vrrp`` | dictionary | Configures VRRP commands (see ``vrrp.*``) | os10 | +| ``version`` | dictionary | Configures VRRP version | os10 | +| ``vrrp_active_active_mode`` | dictionary | Configures VRRP active-active mode | os10 | +| ``delay_reload`` | integer | Configures the minimum delay timer applied after boot (0 to 900) | os10 | +| ``vrrp_group`` | list | Configures VRRP group commands (see ``vrrp_group.*``) | os10 | +| ``vrrp_group.type`` | string: ipv6,ipv4 | Specifies the type of the VRRP group | os10 | +| ``vrrp_group.group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os10 | +| ``vrrp_group.virtual_address`` | string | Configures a virtual-address to the VRRP group (A.B.C.D format) | os10 | +| ``virtual_address.ip`` | string | Configures a virtual ip address (A.B.C.D format) | os10 | +| ``virtual_address.state`` | string: present\*,absent | Configures/unconfigures a virtual-address (A.B.C.D format) | os10 | +| ``vrrp_group.preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os10 | +| ``vrrp_group.priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100) | os10 | +| ``vrrp_group.adv_interval_centisecs`` | integer | Configures the advertisement interval for the VRRP group in centiseconds (25 to 4075; default 100) and in multiple of 25; centisecs gets converted into seconds in version 2 | os10 | +| ``vrrp_group.track_interface`` | list | Configures the track interface of the VRRP group (see ``track.*``) | os10 | +| ``track_interface.resource_id`` | integer | Configures the object tracking resource ID of the VRRP group; mutually exclusive with *track.interface* | os10 | +| ``track_interface.interface`` | string | Configures the track interface of the VRRP group ( format) | os10 | +| ``track_interface.priority_cost`` | integer | Configures the priority cost for track interface of the VRRP group (1 to 254; default 10) | os10 | +| ``track_interface.state`` | string: present\*,absent | Deletes the specific track interface from the VRRP group if set to absent | os10 | +| ``vrrp_group.track_interface.state`` | string: present*,absent | Deletes all track interfaces from the VRRP group if set to absent | os10 | +| ``vrrp_group.state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vrrp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + ethernet1/1/1: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: present + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: present + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: present + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + adv_interval_centisecs: 200 + state: present + vlan100: + vrrp_active_active_mode: true + +> **NOTE**: Interface VRRP cannot exist with L2 modes and can be configured using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vrrp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_vxlan.md b/ansible_collections/dellemc/os10/docs/os10_vxlan.md new file mode 100644 index 00000000..09b23bb3 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_vxlan.md @@ -0,0 +1,259 @@ +VxLAN role +======== + +This role facilitates the configuration of virtual extensible LAN (VxLAN) attributes. It supports the configuration of virtual networks, Ethernet virtual private network (EVPN), and network virtualization edge (NVE). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VxLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_vxlan keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``anycast_gateway_mac`` | string | Configures an anycast gateway IP address for a VxLAN virtual network | os10 | +| ``loopback`` | dictionary | Configures the loopback interface (see ``loopback.*``) | os10 | +| ``loopback.loopback_id`` | integer | Configures the loopback interface number (0 to 16383) | os10 | +| ``loopback.description`` | string | Configures the interface description | os10 | +| ``loopback.ip_address`` | string | Configure the IP address | os10 | +| ``loopback.state`` | string: absent,present\* | Removes loopback interface if set to absent | os10 | +| ``nve`` | dictionary | Configures network virtualization edge (see ``nve.*``) | os10 | +| ``nve.source_interface`` | integer | Configures source loopback interface | os10 | +| ``nve.controller`` | dictionary | Configures controller; supports only one controller connection at a time (see ``controller.*``) | os10 | +| ``controller.name`` | string: NSX, ovsdb | Configures the NVE controller | os10 | +| ``controller.max_backoff`` | integer | Configures max_backoff value (setting an empty value negates the corresponding configuration) | os10 | +| ``controller.control_cfg`` | list | Configures the controller IP and port (see ``control_cfg.*``) | os10 | +| ``control_cfg.ip_addr`` | string | Configures the controller IP | os10 | +| ``control_cfg.port`` | integer | Configures the controller port | os10 | +| ``control_cfg.state`` | string: absent,present\* | Removes the controller IP and port configuration if set to absent | os10 | +| ``controller.state`` | string: absent,present\* | Removes the controller if set to absent | os10 | +| ``nve.state`` | string: absent,present\* | Removes the NVE if set to absent | os10 | +| ``evpn`` | dictionary | Enables EVPN in control plane (see ``evpn.*``) | os10 | +| ``evpn.autoevi`` | boolean: True, False | Configures auto-EVI; no further manual configuration is allowed in auto-EVI mode | os10 | +| ``evpn.rmac`` | string | Configures router MAC address | os10 | +| ``evpn.evi`` | list | Configures EVPN instance (see ``evi.*``)| os10 | +| ``evpn.dis_rt_asn`` | boolean | Enables/disables AS number usage in route target | os10 | +| ``evpn.vrf`` | dictionary | Enables VRF for EVPN| os10 | +| ``vrf.name`` | string | Configures VRF name | os10 | +| ``vrf.state`` | string(present,absent) | Configures/removes VRF for EVPN | os10 | +| ``vrf.vni`` | integer | Configures VNI for the VRF | os10 | +| ``vrf.rd`` | string | Configures RD for the VRF | os10 | +| ``vrf.route_target`` | dictionary | Enables route target for the VRF | os10 | +| ``route_target.type`` | string (manual, auto) | Configures the route target type | os10 | +| ``route_target.asn_value`` | string | Configure AS number | os10 | +| ``route_target.state`` | string (present,absent) | Configures/unconfigures the route target | os10 | +| ``route_target.route_target_type`` | string | Configures the route target type | os10 | +| ``vrf.adv_ipv4`` | dictionary | Enables IPv4 advertisement VRF | os10 | +| ``adv_ipv4.type`` | string | Configures IPv4 advertisement type | os10 | +| ``adv_ipv4.rmap_name`` | string | Configures route-map for advertisement | os10 | +| ``adv_ipv4.unconfig`` | boolean | Configures/unconfigures route-map for advertisement | os10 | +| ``evi.id`` | integer | Configures the EVPN instance ID (1 to 65535) | os10 | +| ``evi.rd`` | string | Configures the route distinguisher | os10 | +| ``evi.vni`` | dictionary | Configures VNI value (see ``vni.*``) | os10 | +| ``vni.id`` | integer | Configures VNI value; configure the same VNI value configured for the VxLAN virtual network | os10 | +| ``vni.state`` | string: absent,present\* | Removes the VNI if set to absent | os10 | +| ``evi.route_target`` | list | Configures route target (see ``route_target.*``) | os10 | +| ``route_target.type`` | string: manual,auto | Configures the route target (auto mode auto-configures an import and export value for EVPN routes) | os10 | +| ``route_target.asn_value`` | string | Configures the route target ASN value | os10 | +| ``route_target.route_target_type`` | string: import,export,both | Configures the route target type | os10 | +| ``route_target.state`` | string: absent,present\* | Removes the route target if set to absent | os10 | +| ``evi.state`` | string: absent,present\* | Removes EVPN instance ID if set to absent | os10 | +| ``evpn.state`` | string: absent,present\* | Removes the EVPN configuration if set to absent | os10 | +| ``virtual_network`` | dictionary | Configures the virtual network attributes (see ``virtual_network.*``) | os10 | +| ``virtual_network.untagged_vlan`` | integer | Configures the reserved untagged VLAN ID (1 to 4093) | os10 | +| ``virtual_network.virtual_net`` | list | Configures the virtual network attributes for VxLAN tunneling (see ``virtual_net.*``) | os10 | +| ``virtual_net.id`` | integer | Configures a virtual network ( virtual-network ID, from 1 to 65535) | os10 | +| ``virtual_net.description`` | string | Configures the description for virtual network | os10 | +| ``virtual_net.vlt_vlan_id`` | integer | Configures the VLTi VLAN ID | os10 | +| ``virtual_net.member_interface`` | list | Configures the trunk member interface attributes to the virtual network (see ``member_interface.*``) | os10 | +| ``member_interface.ifname`` | string | Configures interface name to provision the virtual network member interface | os10 | +| ``member_interface.type`` | string: tagged,untagged | Configures the type to provision the virtual network member interface | os10 | +| ``member_interface.vlanid`` | integer | Configures the VLAN ID to provision the virtual network member interface | os10 | +| ``member_interface.state`` | string: absent,present\* | Removes the virtual network member interface if set to absent | os10 | +| ``virtual_net.vxlan_vni`` | dictionary | Configures the VxLAN attributes to virtual network (see ``vxlan_vni.*``) | os10 | +| ``vxlan_vni.id`` | integer | Configures the VxLAN ID to a virtual network | os10 | +| ``vxlan_vni.remote_endpoint`` | list | Configures the IP address of a remote tunnel endpoint in a VxLAN network (see ``remote_endpoint.*``) | os10 | +| ``remote_endpoint.ip`` | string | Configures the IP address of a remote tunnel endpoint (1.1.1.1) | os10 | +| ``remote_endpoint.state`` | string: absent,present\* | Removes the remote tunnel endpoint in a VxLAN network if set to absent | os10 | +| ``vxlan_vni.state`` | string: absent,present\* | Removes the VxLAN ID if set to absent | os10 | +| ``virtual_net.state`` | string: absent,present\* | Removes a virtual network if set to absent | os10 | +| ``vlan_association`` | list | Configures the VLAN association with virtual network (see ``vlan_association.*``) | os10 | +| ``vlan_association.vlan_id`` | integer | Specifies the VLAN ID | os10 | +| ``vlan_association.virtual_net`` | integer | Specifies the virtual netwrok ID which is to be associated with VLAN | os10 | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_vxlan* role to configure the VxLAN network, source IP address on VxLAN tunnel endpoint and virtual networks. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_vxlan* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_vxlan: + anycast_gateway_mac: "00:22:33:44:55:66" + loopback: + loopback_id: 10 + description: "HARDWARE_VXLAN" + ip_address: "10.8.0.1/32" + state: "present" + nve: + source_interface: 10 + controller: + name: "ovsdb" + max_backoff: 2000 + control_cfg: + - ip_addr: "1.2.3.4" + port: 30 + state: "present" + state: "present" + state: "present" + evpn: + autoevi: False + evi: + - id: 111 + rd: "auto" + vni: + id: 111 + state: "present" + route_target: + - type: "manual" + asn_value: "111:111" + route_target_type: "both" + state: "present" + - type: "manual" + asn_value: "11:11" + route_target_type: "export" + state: "present" + state: "present" + - id: 222 + rd: "2.2.2.2:222" + vni: + id: 222 + state: "present" + route_target: + - type: "auto" + asn_value: + route_target_type: + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + - name: "blue" + state: "absent" + rmac: 00:11:11:11:11:11 + dis_rt_asn: "true" + state: "present" + virtual_network: + untagged_vlan: 1001 + virtual_net: + - id: 111 + description: "NSX_Cluster_VNI_111" + vlt_vlan_id: 11 + member_interface: + - ifname: "ethernet 1/1/15" + type: "tagged" + vlanid: 15 + state: "present" + - ifname: "port-channel 12" + type: "tagged" + vlanid: 11 + state: "present" + vxlan_vni: + id: 111 + remote_endpoint: + - ip: "1.1.1.1" + state: "present" + - ip: "11.11.11.11" + state: "present" + - ip: "111.111.111.111" + state: "present" + state: "present" + state: "present" + - id: 222 + description: "NSX_Cluster_VNI_222" + vlt_vlan_id: 22 + member_interface: + - ifname: "ethernet 1/1/16" + type: "tagged" + vlanid: 16 + state: "present" + vxlan_vni: + id: 222 + remote_endpoint: + - ip: "2.2.2.2" + state: "present" + - ip: "22.22.22.22" + state: "present" + state: "present" + state: "present" + vlan_association: + - vlain_id: 111 + virtual_net: 111 + +> **NOTE**: Member interfaces should be in switchport trunk mode which can be configured using the *os10_interface* role. + +**Simple playbook to configure VxLAN — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vxlan + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/os10_xstp.md b/ansible_collections/dellemc/os10/docs/os10_xstp.md new file mode 100644 index 00000000..0dd919b2 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/os10_xstp.md @@ -0,0 +1,196 @@ +# xSTP role + +This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The xSTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- `os10_xstp` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the device +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**hostname keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|----------------------| +| ``type`` | string (required) | Configures the type of spanning-tree mode specified that can vary according to the device including RSTP, rapid-PVST, and MST | os10 | +| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os10 | +| ``mac_flush_timer`` | integer | Configures the mac_flush_timer value (0 to 500) | os10 | +| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os10 | +| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os10 | +| ``rstp.max_age`` | integer | Configures the max_age timer for RSTP (6 to 40) | os10 | +| ``rstp.hello_time`` | integer | Configures the hello-time for RSTP (1 to 10) | os10 | +| ``rstp.forward_time`` | integer | Configures the forward-time for RSTP (4 to 30) | os10 | +| ``rstp.force_version`` | string: stp | Configures the force version for the BPDUs transmitted by RSTP | os10 | +| ``rstp.mac_flush_threshold`` | integer | Configures the MAC flush threshold for RSTP (1 to 65535) | os10 | +| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os10 | +| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os10 | +| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os10 | +| ``vlan.max_age`` | integer | Configures the max_age timer for a VLAN (6 to 40) | os10 | +| ``vlan.hello_time`` | integer | Configures the hello-time for a VLAN (1 to 10) | os10 | +| ``vlan.forward_time`` | integer | Configures the forward-time for a VLAN (4 to 30) | os10 | +| ``vlan.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated VLAN range_or_id | os10 | +| ``vlan.mac_flush_threshold`` | integer | Configures the MAC flush threshold for a VLAN (1 to 65535) | os10 | +| ``vlan.root`` | string: primary,secondary | Designates the primary or secondary root for the associated VLAN range_or_id; mutually exclusive with *vlan.bridge_priority* | os10 | +| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os10 | +| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os10 | +| ``mstp.max_age`` | integer | Configures the max_age timer for MSTP (6 to 40) | os10 | +| ``mstp.max_hops`` | integer | Configures the max-hops for MSTP (6 to 40) | os10 | +| ``mstp.hello_time`` | integer | Configures the hello-time for MSTP (1 to 10) | os10 | +| ``mstp.forward_time`` | integer | Configures the forward-time for MSTP (4 to 30) | os10 | +| ``mstp.force_version`` | string: stp,rstp | Configures the force-version for the BPDUs transmitted by MSTP | os10 | +| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os10 | +| ``mstp_instances.number_or_range`` | integer | Configures the multiple spanning-tree instance number| os10 | +| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os10 | +| ``mstp_instances.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated MSTP instance | os10 | +| ``mstp_instances.mac_flush_threshold`` | integer | Configures the MAC flush-threshold for an MSTP instance (1 to 65535) | os10 | +| ``mstp_instances.root`` | string: primary,secondary | Designates the primary or secondary root for the associated MSTP instance; mutually exclusive with *mstp_instances.bridge_priority* | os10 | +| ``mstp.mst_config`` | dictionary | Configures multiple spanning-tree (see ``mstp.mst_config.*``); supported | os10 | +| ``mst_config.name`` | string | Configures the name which is specified for the MSTP | os10 | +| ``mst_config.revision`` | integer | Configures the revision number for MSTP | os10 | +| ``mst_config.cfg_list`` | list | Configures the multiple spanning-tree list (see ``mst_config.cfg_list.*``) | os10 | +| ``cfg_list.number`` | integer | Specifies the MSTP instance number | os10 | +| ``cfg_list.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to an instance number | os10 | +| ``cfg_list.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os10 | +| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os10 | +| ``intf ``| dictionary | Configures the interface name (see ``intf..*``) | os10 | +| ``intf..edge_port`` | boolean: true,false | Configures the EdgePort as dynamic if set to true | os10 | +| ``intf..bpdu_filter``| boolean: true,false | Enables/disables bpdufilter at the interface | os10 | +| ``intf..bpdu_guard``| boolean: true,false | Enables/disables bpduguard at the interface | os10 | +| ``intf..guard``| string: loop,root,none | Configures guard on the interface | os10 | +| ``intf..enable`` | boolean: true,false | Enables/disables spanning-tree at the interface level | os10 | +| ``intf..link_type``| string: auto,point-to-point,shared | Configures the link type at the interface | os10 | +| ``intf..rstp`` | dictionary | Configures the RSTP interface name (see ``intf..rstp.*``) | os10 | +| ``rstp.priority``| integer | Configures the RSTP priority value at the interface | os10 | +| ``rstp.cost`` | integer | Configures the RSTP cost value at the interface | os10 | +| ``intf..msti`` | list | Configures the MSTi interface name (see ``intf..msti``) | os10 | +| ``msti.instance_number`` | integer or range | Specifies the MSTP instance number or range | os10 | +| ``msti.priority`` | integer | Specifies the priority value to be configured at the interface | os10 | +| ``msti.cost`` | integer | Specifies the cost value to be configured at the interface | os10 | +| ``intf..vlan`` | list | Configures the VLAN interface name (see ``intf..vlan``) | os10 | +| ``vlan.range_or_id`` | integer or range | Specifies the VLAN ID or range | os10 | +| ``vlan.priority`` | integer | Specifies the priority value to be configured at the interface | os10 | +| ``vlan.cost`` | integer | Specifies the cost value to be configured at the interface | os10 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path. + +It writes a simple playbook that only references the *os10_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP. + +**Sample hosts file** + + spine1 ansible_host= + +**Sample host_vars/spine1** + + hostname: spine1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + +**Sample vars/main.yml** + + os10_xstp: + type: rstp + enable: true + path_cost: true + mac_flush_timer: 4 + rstp: + max_age: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + bridge_priority: 4096 + mac_flush_threshold: 5 + pvst: + vlan: + - range_or_id: 10 + max_age: 6 + enable: true + hello_time: 7 + forward_time: 7 + bridge_priority: 4096 + mac_flush_threshold: 9 + mstp: + max_age: 6 + max_hops: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + mstp_instances: + - number_or_range: 1 + enable: true + mac_flush_threshold: 9 + bridge_priority: 4096 + mst_config: + name: cfg1 + revision: 5 + cfg_list: + - number: 1 + vlans: 10,12 + vlans_state: present + intf: + ethernet 1/1/8: + edge_port: true + bpdu_filter: true + bpdu_guard: true + guard: loop + enable: true + link_type: point-to-point + msti: + - instance_number: 1 + priority: 32 + cost: 1 + rstp: + priority: 32 + cost: 7 + vlan: + - range_or_id: 6 + priority: 16 + cost: 8 + + +**Simple playbook to setup system — spine.yml** + + - hosts: spine + roles: + - dellemc.os10.os10_xstp + +**Run** + + ansible-playbook -i hosts spine.yml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/docs/roles.rst b/ansible_collections/dellemc/os10/docs/roles.rst new file mode 100644 index 00000000..12e37212 --- /dev/null +++ b/ansible_collections/dellemc/os10/docs/roles.rst @@ -0,0 +1,193 @@ +############################################################## +Ansible Network Collection Roles for Dell EMC SmartFabric OS10 +############################################################## + +The roles facilitate provisioning of devices running Dell EMC SmartFabric OS10. This document describes each of the roles. + +AAA role +-------- + +The `os10_aaa `_ role facilitates the configuration of Authentication Authorization and Accounting (AAA), and supports the configuration of TACACS and RADIUS server and AAA. + + +ACL role +-------- + +The `os10_acl `_ role facilitates the configuration of Access Control lists. It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to line terminals. + + +BFD role +-------- + +The `os10_bfd `_ role facilitates the configuration of BFD global attributes. It specifically enables configuration of BFD interval , min_rx, multiplier, and role. + + +BGP role +-------- + +The `os10_bgp `_ role facilitates the configuration of border gateway protocol (BGP) attributes, and supports router ID, networks, neighbors, and maximum path configurations. + + +Copy configuration role +----------------------- + +The `os10_copy_config `_ role pushes the backup running configuration into a device. This role merges the configuration in the template file with the running configuration of the Dell EMC Networking OS10 device. + + +DNS role +-------- + +The `os10_dns `_ role facilitates the configuration of domain name service (DNS). + + +ECMP role +--------- + +The `os10_ecmp `_ role facilitates the configuration of equal cost multi-path (ECMP). It supports the configuration of ECMP for IPv4. + + +Fabric-summary role +------------------- + +The `os10_fabric_summary `_ role facilitates to get show system information of all the switches in the fabric. + + +Flow-monitor role +----------------- + +The `os10_flow_monitor `_ role facilitates the configuration of ACL flow-based monitoring attributes. In Flow-based mirroring, the ingress traffic, matching the specified policies are mirrored to a destination port. Port-based mirroring maintains a database that contains all monitoring sessions, including port monitor sessions. + + +Image-upgrade role +------------------ + +The `os10_image_upgrade `_ role facilitates installation of OS10 software images. + + +Interface role +-------------- + +The `os10_interface `_ role facilitates the configuration of interface attributes. It supports the configuration of administrative state, description, MTU, IP address, IP helper, and port mode. + + +LAG role +-------- + +The `os10_lag `_ role facilitates the configuration of link aggregation group (LAG) attributes. This role supports the creation and deletion of a LAG and its member ports, and supports the configuration of type (static/dynamic), hash scheme, and minimum required link. + + +LLDP role +--------- + +The `os10_lldp `_ role facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. This role supports the configuration of hello, mode, multiplier, advertise tlvs, management interface, fcoe, iscsi at global and interface levels. + + +Logging role +------------ + +The `os10_logging `_ role facilitates the configuration of global logging attributes, and supports the configuration of logging servers. + + +Network-Validation role +----------------------- + +The `os10_network_validation `_ role facilitates to verify the Networks. It validates networking features of wiring connection, BGP neighbors, MTU between neighbors and VLT pair. + + +NTP role +-------- + +The `os10_ntp `_ role facilitates the configuration of network time protocol attributes. + + +Prefix-list role +---------------- + +The `os10_prefix_list `_ role facilitates the configuration of a prefix-list, supports the configuration of IP prefix-list, and assigns the prefix-list to line terminals. + + +QoS role +-------- + +The `os10_qos `_ role facilitates the configuration of quality of service attributes including policy-map and class-map. + + +RA Guard role +------------- + +The `os10_raguard `_ role facilitates the configuration of IPv6 RA Guard attributes. + + +Route-map role +-------------- + +The `os10_route_map `_ role facilitates the configuration of route-map attributes. + + +SNMP role +--------- + +The `os10_snmp `_ role facilitates the configuration of global snmp attributes. It supports the configuration of SNMP server attributes like users, group, community, location, traps, and so on. + + +System role +----------- + +The `os10_system `_ role facilitates the configuration of global system attributes. This role specifically enables configuration of hostname and hashing algorithm for OS10. + + +TEMPLATE role +------------- + +The `os10_template `_ role facilitates the TEXTFSM parsing engine. TextFSM is a template based state machine . It takes the raw string input from the CLI of network devices OS10, run them through a TEXTFSM template and return structured text in the form of a Python dictionary. + + +UPLINK role +----------- + +The `os10_uplink `_ role facilitates the configuration of uplink attributes, and is abstracted for OS10. It specifically enables configuration of association between upstream and downstream interfaces known as uplink-state group. + + +Users role +---------- + +The `os10_users `_ role facilitates the configuration of global system user attributes. This role supports the configuration of CLI users. + + +VLAN role +--------- + +The `os10_vlan `_ role facilitates configuring virtual LAN (VLAN) attributes. This role supports the creation and deletion of a VLAN and its member ports. + + +VLT role +-------- + +The `os10_vlt `_ role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. + + +VRF role +-------- + +The `os10_vrf `_ role facilitates the configuration of basic virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. + + +VRRP role +--------- + +The `os10_vrrp `_ role facilitates configuration of virtual router redundancy protocol (VRRP) attributes. This role supports the creation of VRRP groups for interfaces, and setting the VRRP group attributes. + + +VXLAN role +---------- + +The `os10_vxlan `_ role facilitates the configuration of virtual extensible LAN (VXLAN) attributes. It supports the configuration of virtual networks, Ethernet virtual private network (EVPN), and network virtualization edge (NVE). + + +xSTP role +--------- + +The `os10_xstp `_ role facilitates the configuration of xSTP attributes. This role supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP) protocol, multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). This role supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. + + +\(c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. diff --git a/ansible_collections/dellemc/os10/meta/runtime.yml b/ansible_collections/dellemc/os10/meta/runtime.yml new file mode 100644 index 00000000..e211415c --- /dev/null +++ b/ansible_collections/dellemc/os10/meta/runtime.yml @@ -0,0 +1,8 @@ +plugin_routing: + action: + os10_config: + redirect: dellemc.os10.os10 + os10_command: + redirect: dellemc.os10.os10 + os10_facts: + redirect: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md new file mode 100644 index 00000000..f0affd95 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md @@ -0,0 +1,37 @@ + + +# Provision CLOS fabric using the Ansible collection for Dell EMC SmartFabric OS10 + +This example describes how to use Ansible to build a CLOS fabric using Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. The sample topology is a two-tier CLOS fabric with two spines and four leaves connected as mesh. eBGP is running between the two tiers. All switches in spine have the same AS number, and each leaf switch has a unique AS number. All AS numbers used are private. + +For application load-balancing purposes, the same prefix is advertised from multiple leaf switches and uses _BGP multipath relax_ feature. + +![CLOS FABRIC Topology](https://ansible-dellos-docs.readthedocs.io/en/latest/_images/topo.png) + +## Create simple Ansible playbook + +**1**. Create an inventory file called `inventory.yaml`, then specify the device IP address. + +**2**. Create a group variable file called `group_vars/all`, then define credentials and SNMP variables. + +**3**. Create a group variable file called `group_vars/spine.yaml`, then define credentials, hostname, and BGP neighbors of spine group. + +**4**. Create a host variable file called `host_vars/spine1.yaml`, then define the host, credentials, and transport. + +**5**. Create a host variable file called `host_vars/spine2.yaml`, then define the host, credentials, and transport. + +**6**. Create a host variable file called `host_vars/leaf1.yaml`, then define the host, credentials, and transport. + +**7**. Create a host variable file called `host_vars/leaf2.yaml`, then define the host, credentials, and transport. + +**8**. Create a host variable file called `host_vars/leaf3.yaml`, then define the host, credentials, and transport. + +**9**. Create a host variable file called `host_vars/leaf4.yaml`, then define the host, credentials, and transport. + +**10**. Create a playbook called `datacenter.yaml`. + +**11**. Run the playbook. + + ansible-playbook -i inventory.yaml datacenter.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml new file mode 100644 index 00000000..7174af84 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml @@ -0,0 +1,11 @@ +--- +- hosts: datacenter + gather_facts: no + connection: network_cli + collections: + - dellemc.os10 + roles: + - os10_interface + - os10_bgp + - os10_snmp + - os10_system diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all new file mode 100644 index 00000000..6985e8ad --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all @@ -0,0 +1,9 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_snmp: + snmp_community: + - name: public + access_mode: ro + state: present diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml new file mode 100644 index 00000000..3524eaaf --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml @@ -0,0 +1,85 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_system: + hostname: "{{ spine_hostname }}" + +os10_bgp: + asn: 64901 + router_id: "{{ bgp_router_id }}" + best_path: + as_path: multipath-relax + as_path_state: present + med: + - attribute: missing-as-worst + state: present + neighbor: + - type: ipv4 + remote_asn: "{{ bgp_neigh1_remote_asn }}" + ip: "{{ bgp_neigh1_ip }}" + admin: up + state: present + - type: ipv4 + remote_asn: "{{ bgp_neigh2_remote_asn }}" + ip: "{{ bgp_neigh2_ip }}" + admin: up + state: present + - type: ipv4 + remote_asn: "{{ bgp_neigh3_remote_asn }}" + ip: "{{ bgp_neigh3_ip }}" + admin: up + state: present + - type: ipv4 + remote_asn: "{{ bgp_neigh4_remote_asn }}" + ip: "{{ bgp_neigh4_ip }}" + admin: up + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh5_remote_asn }}" + ip: "{{ bgp_neigh5_ip }}" + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh6_remote_asn }}" + ip: "{{ bgp_neigh6_ip }}" + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh7_remote_asn }}" + ip: "{{ bgp_neigh7_ip }}" + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh8_remote_asn }}" + ip: "{{ bgp_neigh8_ip }}" + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml new file mode 100644 index 00000000..38691a5e --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml @@ -0,0 +1,77 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 +leaf_hostname: "leaf-1" +os10_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: crc + state: present +os10_interface: + ethernet 1/1/1: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.1.2/24 + ipv6_and_mask: 2001:100:1:1::2/64 + state_ipv6: present + ethernet 1/1/9: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.1.2/24 + ipv6_and_mask: 2001:100:2:1::2/64 + state_ipv6: present +os10_bgp: + asn: 64801 + router_id: 100.0.2.1 + address_family_ipv4: true + address_family_ipv6: true + best_path: + as_path: multipath-relax + as_path_state: present + med: + - attribute: missing-as-worst + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.1.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.1.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:1::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:1::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + state: present diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml new file mode 100644 index 00000000..d760626d --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml @@ -0,0 +1,81 @@ +hostname: leaf2 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 +leaf_hostname: "leaf-2" +os10_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: crc + state: present +os10_interface: + ethernet 1/1/1: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.17.2/24 + ipv6_and_mask: 2001:100:1:11::2/64 + state_ipv6: present + ethernet 1/1/9: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.17.2/24 + ipv6_and_mask: 2001:100:2:11::2/64 +os10_bgp: + asn: 64802 + router_id: 100.0.2.2 + address_family_ipv4: true + address_family_ipv6: true + best_path: + as_path: multipath-relax + as_path_state: present + med: + - attribute: missing-as-worst + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.18.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.1.17.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.17.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:11::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:11::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml new file mode 100644 index 00000000..7b199125 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml @@ -0,0 +1,81 @@ +hostname: leaf3 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 +leaf_hostname: "leaf-3" +os10_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: crc + state: present +os10_interface: + ethernet 1/1/1: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.33.2/24 + ipv6_and_mask: 2001:100:1:21::2/64 + state_ipv6: present + ethernet 1/1/9: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.33.2/24 + ipv6_and_mask: 2001:100:2:21::2/64 +os10_bgp: + asn: 64803 + router_id: 100.0.2.3 + address_family_ipv4: true + address_family_ipv6: true + best_path: + as_path: multipath-relax + as_path_state: present + med: + - attribute: missing-as-worst + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.33.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.33.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:21::1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:22::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:21::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml new file mode 100644 index 00000000..e06099e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml @@ -0,0 +1,77 @@ +hostname: leaf4 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 +leaf_hostname: "leaf-4" +os10_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: crc + state: present +os10_interface: + ethernet 1/1/5: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.49.2/24 + ipv6_and_mask: 2001:100:1:31::2/64 + state_ipv6: present + ethernet 1/1/17: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.49.2/24 + ipv6_and_mask: 2001:100:2:31::2/64 + state_ipv6: present +os10_bgp: + asn: 64804 + router_id: 100.0.2.4 + address_family_ipv4: true + address_family_ipv6: true + best_path: + as_path: multipath-relax + as_path_state: present + med: + - attribute: missing-as-worst + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.49.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.49.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:31::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:31::1 + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml new file mode 100644 index 00000000..2d926034 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml @@ -0,0 +1,61 @@ +hostname: spine1 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 +spine_hostname: "spine-1" + +os10_interface: + ethernet 1/1/1: + desc: "Connected to leaf 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.1.1/24 + ipv6_and_mask: 2001:100:1:1::1/64 + state_ipv6: present + ethernet 1/1/17: + desc: "Connected to leaf 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.33.1/24 + ipv6_and_mask: 2001:100:1:21::1/64 + state_ipv6: present + ethernet 1/1/25: + desc: "Connected to leaf 3" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.17.1/24 + ipv6_and_mask: 2001:100:1:11::1/64 + state_ipv6: present + ethernet 1/1/9: + desc: "Connected to leaf 4" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.49.1/24 + ipv6_and_mask: 2001:100:1:31::1/64 + state_ipv6: present + +bgp_router_id: "100.0.1.1" +bgp_neigh1_remote_asn: 64801 +bgp_neigh1_ip: "100.1.1.2" +bgp_neigh2_remote_asn: 64803 +bgp_neigh2_ip: "100.1.33.2" +bgp_neigh3_remote_asn: 64802 +bgp_neigh3_ip: "100.1.17.2" +bgp_neigh4_remote_asn: 64804 +bgp_neigh4_ip: "100.1.49.2" +bgp_neigh5_remote_asn: 64801 +bgp_neigh5_ip: "2001:100:1:1::2" +bgp_neigh6_remote_asn: 64802 +bgp_neigh6_ip: "2001:100:1:11::2" +bgp_neigh7_remote_asn: 64803 +bgp_neigh7_ip: "2001:100:1:21::2" +bgp_neigh8_remote_asn: 64804 +bgp_neigh8_ip: "2001:100:1:31::2" diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml new file mode 100644 index 00000000..7c616e9f --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml @@ -0,0 +1,60 @@ +hostname: spine2 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 +spine_hostname: "spine-2" +os10_interface: + ethernet 1/1/1: + desc: "Connected to leaf 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.1.1/24 + ipv6_and_mask: 2001:100:2:1::1/64 + state_ipv6: present + ethernet 1/1/25: + desc: "Connected to leaf 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.17.1/24 + ipv6_and_mask: 2001:100:2:11::1/64 + state_ipv6: present + ethernet 1/1/17: + desc: "Connected to leaf 3" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.33.1/24 + ipv6_and_mask: 2001:100:2:21::1/64 + state_ipv6: present + ethernet 1/1/9: + desc: "Connected to leaf 4" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.49.1/24 + ipv6_and_mask: 2001:100:2:31::1/64 + state_ipv6: present + +bgp_router_id: "100.0.1.2" +bgp_neigh1_remote_asn: 64801 +bgp_neigh1_ip: "100.2.1.2" +bgp_neigh2_remote_asn: 64802 +bgp_neigh2_ip: "100.2.33.2" +bgp_neigh3_remote_asn: 64803 +bgp_neigh3_ip: "100.2.17.2" +bgp_neigh4_remote_asn: 64804 +bgp_neigh4_ip: "100.2.49.2" +bgp_neigh5_remote_asn: 64801 +bgp_neigh5_ip: "2001:100:2:1::2" +bgp_neigh6_remote_asn: 64802 +bgp_neigh6_ip: "2001:100:2:11::2" +bgp_neigh7_remote_asn: 64803 +bgp_neigh7_ip: "2001:100:2:21::2" +bgp_neigh8_remote_asn: 64804 +bgp_neigh8_ip: "2001:100:2:31::2" diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml new file mode 100644 index 00000000..9516f660 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=10.11.182.25 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md new file mode 100644 index 00000000..6d1af994 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md @@ -0,0 +1,63 @@ +# VxLAN Symmetric-IRB configuration using BGP EVPN using the Ansible collection for Dell EMC SmartFabric OS10 + + +This example describes how to use Ansible to build a Leaf-Spine topology with Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10, using a VxLAN Symmetric-IRB configuration model. + +VxLAN Symmetric-IRB is configured using BGP EVPN with Leaf-Spine topology. BGP unnumbered is configured between the Leaf and Spine over VLANS for both underlay and overlay. VLT is configured between the pair of Leaf nodes. + +In all Leaf nodes, the L3 VRF VNI "test" is configured to route between different VNs spanned across the topology, and the VTEP router MAC is configured to identify the remote VTEPs. +VN 100 and VN 300 is configured in a pair of Leaf nodes, and VN 200 and VN 300 configured in an other pair of Leaf nodes. + +## Create simple Ansible playbook + +**1**. Create an inventory file called `inventory.yaml`, then specify the device IP address. + +**2**. Create a host variable file called `host_vars/spine1.yaml`, then define the host, credentials, and transport. + +**3**. Create a host variable file called `host_vars/spine2.yaml`, then define the host, credentials, and transport. + +**4**. Use the *os10_interface* and *os10_vlan* roles to configure the required VLANs. + +**5**. Use the *os10_bgp* role to configure BGP unnumbered. + +**Configurations for VTEP-1** + +**1**. Create a host variable file called `host_vars/prim-vtep1.yaml`. + +**2**. Create a host variable file called `host_vars/sec-vtep1.yaml`. + +**3**. Define the host, credentials, and transport. + +**4**. Use the *os10_interface* and *os10_vlan* roles to configure the required VLANs. + +**5**. Use the *os10_bgp* role to configure BGP unnumbered. + +**6**. Use the *os10_vxlan* role to configure VN networks, EVPN and Symmetric IRB functionality. + +**7**. Use *os10_vlt* role to configure VLT between leaves prim-vtep1 and sec-vtep1. + +**Configurations for VTEP-2** + +**1**. Create a host variable file called `host_vars/prim-vtep2.yaml`. + +**2**. Create a host variable file called `host_vars/sec-vtep2.yaml`. + +**3**. Define the host, credentials, and transport. + +**4**. Use *os10_interface* and *os10_vlan* roles to configure the required VLANs. + +**5**. Use the *os10_bgp* role to configure BGP unnumbered. + +**6**. Use *os10_vxlan* role to configure VN networks, EVPN and Symmetric IRB functionality. + +**7**. Use the *os10_vlt* role to configure VLT between leaves prim-vtep2 and sec-vtep2. + +**Create and run the playbook** + +**1**. Create a playbook called `datacenter.yaml`. + +**2**. Run the playbook. + + ansible-playbook -i inventory.yaml datacenter.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml new file mode 100644 index 00000000..d8b1d413 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml @@ -0,0 +1,16 @@ +--- +- hosts: datacenter + connection: network_cli + collections: + - dellemc.os10 + vars: + build_dir: "/home/administrator/ansible/debug" + roles: + - os10_vrf + - os10_interface + - os10_system + - os10_bgp + - os10_lag + - os10_vlan + - os10_vxlan + - os10_vlt diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml new file mode 100644 index 00000000..5ad28505 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml @@ -0,0 +1,210 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_system: + hostname: "VLT1-Primary" + +os10_bgp: + asn: 100 + router_id: 1.1.1.10 + neighbor: + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan10 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan11 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + ipv4_network: 1.1.1.1/32 + redistribute: + - route_type: connected + address_type: ipv4 + state: present + state: "present" + + +os10_interface: + loopback 0: + admin: up + ip_and_mask: 1.1.1.1/32 + ethernet 1/1/6: + switchport: False + admin: up + ethernet 1/1/4: + admin: up + switchport: False + ethernet 1/1/5: + admin: up + switchport: False + ethernet 1/1/1: + admin: up + switchport: False + portmode: "trunk" +# ethernet 1/1/2: + ethernet 1/1/3: + admin: up + switchport: False + portmode: "trunk" + port-channel 10: + portmode: "trunk" + admin: up + vlan 10: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 11: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 20: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + virtual-network 100: + vrf: "test" + ip_and_mask: "15.1.1.1/24" + ip_virtual_gateway_ip: "15.1.1.254" + admin: up + virtual-network 300: + vrf: "test" + ip_and_mask: "25.1.1.1/24" + ip_virtual_gateway_ip: "25.1.1.254" + admin: up + +os10_lag: + port-channel 10: + type: dynamic + channel_members: +# - port: ethernet1/1/5 + - port: ethernet1/1/6 + mode: active + state: present + +os10_vlan: + vlan 10: + tagged_members: + - port: ethernet 1/1/1 + state: "present" + access_vlan: "false" + state: "present" + vlan 20: + tagged_members: +# - port: ethernet 1/1/2 + - port: ethernet 1/1/3 + state: "present" + access_vlan: "false" + state: "present" + +os10_vrf: + vrfdetails: + - vrf_name: "test" + state: "present" + +os10_vxlan: + anycast_gateway_mac: "00:00:aa:bb:ee:ff" + nve: + source_interface: 0 + state: "present" + evpn: + evi: + - id: 100 + vni: + id: 100 + state: "present" + rd: "auto" + route_target: + - type: "manual" + asn_value: "65530:65532" + route_target_type: "both" + state: "present" + state: "present" + - id: 300 + vni: + id: 300 + state: "present" + rd: "auto" + route_target: + - type: "auto" + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + rmac: 00:11:11:11:11:11 + dis_rt_asn: "true" + virtual_network: + virtual_net: + - id: 100 + vlt_vlan_id: 100 + member_interface: + - ifname: "port-channel10" + type: "tagged" + vlanid: 100 + state: "present" + vxlan_vni: + id: 100 + state: "present" + state: "present" + - id: 300 + vlt_vlan_id: 300 + member_interface: + - ifname: "port-channel10" + type: "tagged" + vlanid: 300 + state: "present" + vxlan_vni: + id: 300 + state: "present" + state: "present" + +os10_vlt: + domain: 1 + destination_type: "ipv4" + peer_routing: True +# discovery_intf: "1/1/3-1/1/4" + discovery_intf: "1/1/4-1/1/5" + vlt_mac: 00:00:00:11:22:33 + vlt_peers: + Po 10: + peer_lag: 10 + state: "present" diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml new file mode 100644 index 00000000..ea49d19d --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml @@ -0,0 +1,194 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_system: + hostname: "VLT2-Primary" + +os10_bgp: + asn: 300 + router_id: 2.2.2.10 + ipv4_network: 2.2.2.2/32 + redistribute: + - route_type: connected + address_type: ipv4 + state: present + state: "present" + neighbor: + - type: ipv4 + interface: vlan50 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan60 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan11 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + +os10_interface: + loopback 0: + admin: up + ip_and_mask: 2.2.2.2/32 + loopback 10: + admin: up + vrf: "test" + ip_and_mask: 50.1.1.10/32 + loopback 20: + admin: up + vrf: "test" + ip_and_mask: 60.1.1.10/32 + ethernet 1/1/1: + admin: up + switchport: False + portmode: "trunk" + ethernet 1/1/2: + admin: up + switchport: False + portmode: "trunk" + ethernet 1/1/3: + switchport: False + admin: up + ethernet 1/1/4: + admin: up + switchport: False + ethernet 1/1/5: + switchport: False + vrf: "test" + ip_and_mask: "21.21.21.20/24" + admin: up + vlan 11: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 50: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 60: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + virtual-network 200: + vrf: "test" + ip_and_mask: "30.1.1.100/24" + ip_virtual_gateway_ip: "30.1.1.254" + admin: up + virtual-network 300: + vrf: "test" + ip_and_mask: "25.1.1.100/24" + ip_virtual_gateway_ip: "25.1.1.254" + admin: up + +os10_vlan: + vlan 50: + tagged_members: + - port: ethernet 1/1/1 + state: "present" + access_vlan: "false" + state: "present" + vlan 60: + tagged_members: + - port: ethernet 1/1/2 + state: "present" + access_vlan: "false" + state: "present" + +os10_vrf: + vrfdetails: + - vrf_name: "test" + state: "present" + +os10_vxlan: + anycast_gateway_mac: "00:00:aa:bb:ee:ff" + nve: + source_interface: 0 + state: "present" + evpn: + evi: + - id: 200 + vni: + id: 200 + state: "present" + rd: "auto" + route_target: + - type: "manual" + asn_value: "65530:65533" + route_target_type: "both" + state: "present" + state: "present" + - id: 300 + vni: + id: 300 + state: "present" + rd: "auto" + route_target: + - type: "auto" + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + rmac: 00:00:22:22:22:22 + dis_rt_asn: "true" + virtual_network: + virtual_net: + - id: 200 + vlt_vlan_id: 200 + vxlan_vni: + id: 200 + state: "present" + state: "present" + - id: 300 + vlt_vlan_id: 300 + vxlan_vni: + id: 300 + state: "present" + state: "present" + +os10_vlt: + domain: 1 + destination_type: "ipv4" + peer_routing: True + discovery_intf: "1/1/3-1/1/4" + vlt_mac: 00:00:00:44:55:66 + vlt_peers: + Po 10: + peer_lag: 10 + state: "present" diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml new file mode 100644 index 00000000..ac04c3c6 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml @@ -0,0 +1,206 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_system: + hostname: "VLT1-SEC" + +os10_bgp: + asn: 100 + router_id: 1.1.1.20 + neighbor: + - type: ipv4 + interface: vlan40 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan30 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan11 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + ipv4_network: 1.1.1.1/32 + redistribute: + - route_type: connected + address_type: ipv4 + state: present + state: "present" + + +os10_interface: + loopback 0: + admin: up + ip_and_mask: 1.1.1.1/32 + ethernet 1/1/3: + switchport: False + admin: up + ethernet 1/1/4: + admin: up + switchport: False + ethernet 1/1/5: + admin: up + switchport: False + ethernet 1/1/1: + admin: up + switchport: False + portmode: "trunk" + ethernet 1/1/2: + admin: up + switchport: False + portmode: "trunk" + port-channel 10: + portmode: "trunk" + admin: up + vlan 30: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 11: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 40: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + virtual-network 100: + vrf: "test" + ip_and_mask: "15.1.1.2/24" + ip_virtual_gateway_ip: "15.1.1.254" + admin: up + virtual-network 300: + vrf: "test" + ip_and_mask: "25.1.1.2/24" + ip_virtual_gateway_ip: "25.1.1.254" + admin: up + +os10_lag: + port-channel 10: + type: dynamic + channel_members: + - port: ethernet1/1/5 + mode: active + state: present + +os10_vlan: + vlan 30: + tagged_members: + - port: ethernet 1/1/1 + state: "present" + access_vlan: "false" + state: "present" + vlan 40: + tagged_members: + - port: ethernet 1/1/2 + state: "present" + access_vlan: "false" + state: "present" + +os10_vrf: + vrfdetails: + - vrf_name: "test" + state: "present" + +os10_vxlan: + anycast_gateway_mac: "00:00:aa:bb:ee:ff" + nve: + source_interface: 0 + state: "present" + evpn: + evi: + - id: 100 + vni: + id: 100 + state: "present" + rd: "auto" + route_target: + - type: "manual" + asn_value: "65530:65532" + route_target_type: "both" + state: "present" + state: "present" + - id: 300 + vni: + id: 300 + state: "present" + rd: "auto" + route_target: + - type: "auto" + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + rmac: 00:11:11:11:11:11 + dis_rt_asn: "true" + virtual_network: + virtual_net: + - id: 100 + vlt_vlan_id: 100 + member_interface: + - ifname: "port-channel10" + type: "tagged" + vlanid: 100 + state: "present" + vxlan_vni: + id: 100 + state: "present" + state: "present" + - id: 300 + vlt_vlan_id: 300 + member_interface: + - ifname: "port-channel10" + type: "tagged" + vlanid: 300 + state: "present" + vxlan_vni: + id: 300 + state: "present" + state: "present" + +os10_vlt: + domain: 1 + destination_type: "ipv4" + peer_routing: True + discovery_intf: "1/1/3-1/1/4" + vlt_mac: 00:00:00:11:22:33 + vlt_peers: + Po 10: + peer_lag: 10 + state: "present" diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml new file mode 100644 index 00000000..e23ed9c5 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml @@ -0,0 +1,200 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_system: + hostname: "VLT2-SEC" + +os10_bgp: + asn: 300 + router_id: 2.2.2.20 + neighbor: + - type: ipv4 + interface: vlan70 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan80 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + interface: vlan11 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + vrf: + name: "test" + address_type: ipv4 + redistribute: + - route_type: l2vpn + ipv4_network: 2.2.2.2/32 + redistribute: + - route_type: connected + address_type: ipv4 + state: present + state: "present" + + +os10_interface: + loopback 0: + admin: up + ip_and_mask: 2.2.2.2/32 + loopback 10: + admin: up + vrf: "test" + ip_and_mask: 80.1.1.10/32 + loopback 20: + admin: up + vrf: "test" + ip_and_mask: 90.1.1.10/32 + ethernet 1/1/1: + admin: up + switchport: False + portmode: "trunk" + ethernet 1/1/2: + admin: up + switchport: False + portmode: "trunk" + ethernet 1/1/3: + switchport: False + admin: up + ethernet 1/1/4: + admin: up + switchport: False + ethernet 1/1/5: + switchport: False + vrf: "test" + ip_and_mask: "20.20.20.20/24" + admin: up + vlan 11: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 70: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 80: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + virtual-network 200: + vrf: "test" + ip_and_mask: "30.1.1.101/24" + ip_virtual_gateway_ip: "30.1.1.254" + admin: up + virtual-network 300: + vrf: "test" + ip_and_mask: "25.1.1.101/24" + ip_virtual_gateway_ip: "25.1.1.254" + admin: up + +os10_vlan: + vlan 70: + tagged_members: + - port: ethernet 1/1/1 + state: "present" + access_vlan: "false" + state: "present" + vlan 80: + tagged_members: + - port: ethernet 1/1/2 + state: "present" + access_vlan: "false" + state: "present" + +os10_vrf: + vrfdetails: + - vrf_name: "test" + state: "present" + +os10_vxlan: + anycast_gateway_mac: "00:00:aa:bb:ee:ff" + nve: + source_interface: 0 + state: "present" + evpn: + evi: + - id: 200 + vni: + id: 200 + state: "present" + rd: "auto" + route_target: + - type: "manual" + asn_value: "65530:65533" + route_target_type: "both" + state: "present" + state: "present" + - id: 300 + vni: + id: 300 + state: "present" + rd: "auto" + route_target: + - type: "auto" + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + rmac: 00:00:22:22:22:22 + dis_rt_asn: "true" + virtual_network: + virtual_net: + - id: 200 + vlt_vlan_id: 200 + vxlan_vni: + id: 200 + state: "present" + state: "present" + - id: 300 + vlt_vlan_id: 300 + vxlan_vni: + id: 300 + state: "present" + state: "present" + +os10_vlt: + domain: 1 + destination_type: "ipv4" + peer_routing: True + discovery_intf: "1/1/3-1/1/4" + vlt_mac: 00:00:00:44:55:66 + vlt_peers: + Po 10: + peer_lag: 10 + state: "present" diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml new file mode 100644 index 00000000..4672562b --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml @@ -0,0 +1,95 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_system: + hostname: "spine1" + +os10_bgp: + asn: 200 + router_id: 9.9.9.10 + neighbor: + - type: "peergroup" + name: "ebgp_session" + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + state: present + - type: ipv4 + interface: vlan10 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + - type: ipv4 + interface: vlan30 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + - type: ipv4 + interface: vlan50 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + - type: ipv4 + interface: vlan70 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + state: "present" + + +os10_interface: + vlan 10: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 30: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 50: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 70: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + +os10_vlan: + vlan 10: + tagged_members: + - port: ethernet 1/1/1 + state: "present" + access_vlan: "false" + state: "present" + vlan 30: + tagged_members: + - port: ethernet 1/1/2 + state: "present" + access_vlan: "false" + state: "present" + vlan 50: + tagged_members: + - port: ethernet 1/1/3 + state: "present" + access_vlan: "false" + state: "present" + vlan 70: + tagged_members: + - port: ethernet 1/1/4 + state: "present" + access_vlan: "false" + state: "present" diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml new file mode 100644 index 00000000..0e953b88 --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml @@ -0,0 +1,95 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os10.os10 + +os10_system: + hostname: "spine2" + +os10_bgp: + asn: 201 + router_id: 9.9.9.20 + neighbor: + - type: "peergroup" + name: "ebgp_session" + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + state: present + - type: ipv4 + interface: vlan20 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + - type: ipv4 + interface: vlan40 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + - type: ipv4 + interface: vlan60 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + - type: ipv4 + interface: vlan80 + peergroup: ebgp_session + peergroup_type: ebgp + admin: up + state: present + state: "present" + + +os10_interface: + vlan 20: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 40: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 60: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + vlan 80: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + +os10_vlan: + vlan 20: + tagged_members: + - port: ethernet 1/1/1 + state: "present" + access_vlan: "false" + state: "present" + vlan 40: + tagged_members: + - port: ethernet 1/1/2 + state: "present" + access_vlan: "false" + state: "present" + vlan 60: + tagged_members: + - port: ethernet 1/1/3 + state: "present" + access_vlan: "false" + state: "present" + vlan 80: + tagged_members: + - port: ethernet 1/1/4 + state: "present" + access_vlan: "false" + state: "present" diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml new file mode 100644 index 00000000..104712dc --- /dev/null +++ b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=101.104.34.141 +prim-vtep1 ansible_host=101.104.34.217 +sec-vtep1 ansible_host=101.104.34.218 +spine2 ansible_host=101.104.34.142 +prim-vtep2 ansible_host=101.104.34.219 +sec-vtep2 ansible_host=101.104.34.220 + +[site1] +prim-vtep1 +sec-vtep1 +spine1 + +[site2] +prim-vtep2 +spine2 +sec-vtep2 + +[datacenter:children] +site1 +site2 diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.png b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.png new file mode 100644 index 00000000..b2f695b4 Binary files /dev/null and b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.png differ diff --git a/ansible_collections/dellemc/os10/plugins/action/os10.py b/ansible_collections/dellemc/os10/plugins/action/os10.py new file mode 100644 index 00000000..5669001c --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/action/os10.py @@ -0,0 +1,94 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + + +from ansible import constants as C +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import Connection +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_provider_spec +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + self._config_module = True if self._task.action == 'os10_config' else False + socket_path = None + + if self._play_context.connection == 'network_cli': + provider = self._task.args.get('provider', {}) + if any(provider.values()): + display.warning('provider is unnecessary when using network_cli and will be ignored') + del self._task.args['provider'] + elif self._play_context.connection == 'local': + provider = load_provider(os10_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'dellemc.os10.os10' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + pc.become = provider['authorize'] or False + if pc.become: + pc.become_method = 'enable' + pc.become_pass = provider['auth_pass'] + + display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + + # make sure we are in the right cli context which should be + # enable mode and not config module + if socket_path is None: + socket_path = self._connection.socket_path + + conn = Connection(socket_path) + out = conn.get_prompt() + while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'): + display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) + conn.send_command('exit') + out = conn.get_prompt() + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py b/ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py new file mode 100644 index 00000000..602186c8 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +# (c) 2020, Ansible by Red Hat, inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.six import StringIO, string_types + +from ansible.plugins.action import ActionBase +from ansible.errors import AnsibleError + +try: + import textfsm + HAS_TEXTFSM = True +except ImportError: + HAS_TEXTFSM = False + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + ''' handler for textfsm action ''' + + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + try: + if not HAS_TEXTFSM: + raise AnsibleError('textfsm_parser engine requires the TextFSM library to be installed') + + try: + filename = self._task.args.get('file') + src = self._task.args.get('src') + content = self._task.args['content'] + name = self._task.args.get('name') + except KeyError as exc: + raise AnsibleError('missing required argument: %s' % exc) + + if src and filename: + raise AnsibleError('`src` and `file` are mutually exclusive arguments') + + if not isinstance(content, string_types): + return {'failed': True, 'msg': '`content` must be of type str, got %s' % type(content)} + + if filename: + tmpl = open(filename) + else: + tmpl = StringIO() + tmpl.write(src.strip()) + tmpl.seek(0) + + try: + re_table = textfsm.TextFSM(tmpl) + fsm_results = re_table.ParseText(content) + + except Exception as exc: + raise AnsibleError(str(exc)) + + final_facts = [] + for item in fsm_results: + facts = {} + facts.update(dict(zip(re_table.header, item))) + final_facts.append(facts) + + if name: + result['ansible_facts'] = {name: final_facts} + else: + result['ansible_facts'] = {} + + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/ansible_collections/dellemc/os10/plugins/cliconf/os10.py b/ansible_collections/dellemc/os10/plugins/cliconf/os10.py new file mode 100644 index 00000000..7d009f5a --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/cliconf/os10.py @@ -0,0 +1,88 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +cliconf: os10 +short_description: Use os10 cliconf to run command on Dell OS10 platform +description: + - This os10 plugin provides low level abstraction apis for + sending and receiving CLI commands from Dell OS10 network devices. +""" + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'dellemc.os10.os10' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'OS Version (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'System Type (\S+)', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + reply = self.get('show running-configuration | grep hostname') + data = to_text(reply, errors='surrogate_or_strict').strip() + match = re.search(r'^hostname (.+)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + return self.invalid_params("fetching configuration from %s is not supported" % source) + if source == 'running': + cmd = 'show running-config all' + else: + cmd = 'show startup-config' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['end']): + self.send_command(to_bytes(cmd)) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) diff --git a/ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py b/ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py new file mode 100644 index 00000000..9a6baf44 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Peter Sprygada +# Copyright: (c) 2020, Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + port: + description: + - Specifies the port to use when building the connection to the remote + device. + type: int + username: + description: + - User to authenticate the SSH session to the remote device. If the + value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Password to authenticate the SSH session to the remote device. If the + value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + ssh_keyfile: + description: + - Path to an ssh key used to authenticate the SSH session to the remote + device. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path + timeout: + description: + - Specifies idle timeout (in seconds) for the connection. Useful if the + console freezes before continuing. For example when saving + configurations. + type: int + authorize: + description: + - Instructs the module to enter privileged mode on the remote device before + sending any commands. If not specified, the device will attempt to execute + all commands in non-privileged mode. If the value is not specified in the + task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be + used instead. + type: bool + default: false + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode on the + remote device. If I(authorize) is false, then this argument does nothing. + If the value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTH_PASS) will be used instead. + type: str +notes: + - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking). +''' diff --git a/ansible_collections/dellemc/os10/plugins/module_utils/network/__init__.py b/ansible_collections/dellemc/os10/plugins/module_utils/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py b/ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py new file mode 100644 index 00000000..b287c38c --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py @@ -0,0 +1,42 @@ +from __future__ import (absolute_import, division, print_function) +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from collections import OrderedDict +import traceback + +LIB_IMP_ERR = None +ERR_MSG = None +try: + import xmltodict + import yaml + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + +__copyright__ = "(c) Copyright 2020 Dell Inc. or its subsidiaries. All rights reserved." +__metaclass__ = type + + +class BaseNetworkShow(object): + """The goal of this class is to extended by other in order to implement show system network view ansible modules""" + + def __init__(self): + self.module = AnsibleModule(argument_spec=self.get_fields()) + if not HAS_LIB: + self.module.fail_json( + msg=ERR_MSG, + exception=LIB_IMP_ERR) + self.exit_msg = OrderedDict() + + def xml_to_dict(self, value): + + return xmltodict.parse(value) + + def dict_to_yaml(self, value): + return yaml.safe_dump(value, default_flow_style=False) + + +if __name__ == '__main__': + pass diff --git a/ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py b/ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py new file mode 100644 index 00000000..35976488 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py @@ -0,0 +1,146 @@ +# +# (c) 2020 Peter Sprygada, +# (c) 2020 Red Hat, Inc +# +# Copyright (c) 2020 Dell Inc. +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +from __future__ import (absolute_import, division, print_function) + +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.connection import exec_command +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine + +__metaclass__ = type + +_DEVICE_CONFIGS = {} + +WARNING_PROMPTS_RE = [ + r"[\r\n]?\[confirm yes/no\]:\s?$", + r"[\r\n]?\[y/n\]:\s?$", + r"[\r\n]?\[yes/no\]:\s?$" +] + +os10_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'), + 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True), + 'timeout': dict(type='int'), +} +os10_argument_spec = { + 'provider': dict(type='dict', options=os10_provider_spec), +} + + +def check_args(module, warnings): + pass + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = 'show running-configuration' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + rc, out, err = exec_command(module, cmd) + if rc != 0: + module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict')) + cfg = to_text(out, errors='surrogate_or_strict').strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc) + responses.append(to_text(out, errors='surrogate_or_strict')) + return responses + + +def load_config(module, commands): + rc, out, err = exec_command(module, 'configure terminal') + if rc != 0: + module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict')) + + commands.append('commit') + for command in to_list(commands): + if command == 'end': + continue + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc) + + exec_command(module, 'end') + + +def get_sublevel_config(running_config, module): + contents = list() + current_config_contents = list() + running_config = NetworkConfig(contents=running_config, indent=1) + obj = running_config.get_object(module.params['parents']) + if obj: + contents = obj.children + contents[:0] = module.params['parents'] + + indent = 0 + for c in contents: + if isinstance(c, str): + current_config_contents.append(c.rjust(len(c) + indent, ' ')) + if isinstance(c, ConfigLine): + current_config_contents.append(c.raw) + indent = 1 + sublevel_config = '\n'.join(current_config_contents) + + return sublevel_config diff --git a/ansible_collections/dellemc/os10/plugins/modules/__init__.py b/ansible_collections/dellemc/os10/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py b/ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py new file mode 100644 index 00000000..b7d82f77 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py @@ -0,0 +1,124 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__copyright__ = "(c) Copyright 2020 Dell Inc. or its subsidiaries. All rights reserved." + +__metaclass__ = type + + +DOCUMENTATION = ''' +module: base_xml_to_dict +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Operations for show command output convertion from xml to json format. +description: + + - Get the show system inforamtion of a Leaf-Spine. + +options: + cli_responses: + type: str + description: + - show command xml output + required: True +''' +EXAMPLES = ''' +Copy below YAML into a playbook (e.g. play.yml) and run as follows: + +#$ ansible-playbook -i inv play.yml +name: setup the plabook to get show command output in dict format +hosts: localhost +connection: local +gather_facts: False +vars: + cli: + username: admin + password: admin +tasks: +- name: "Get Dell EMC OS10 Show output in dict format" + os10_command: + commands: "{{ command_list }}" + register: show +- debug: var=show +- name: call to lib to get output in dict + base_xml_to_dict: + cli_responses: "{{ item }}" + loop: "{{ show.stdout }}" +''' + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from collections import OrderedDict +import traceback + +LIB_IMP_ERR = None +ERR_MSG = None +try: + import xmltodict + HAS_LIB = True +except Exception as e: + HAS_LIB = False + ERR_MSG = to_native(e) + LIB_IMP_ERR = traceback.format_exc() + + +class XmlToDictAnsibleModule(object): + """The goal of this class is to convert xml input to dict""" + + def __init__(self): + self.module = AnsibleModule(argument_spec=self.get_fields()) + self.cli_responses = self.module.params['cli_responses'] + self.exit_msg = OrderedDict() + + def get_fields(self): + """Return valid fields""" + base_fields = { + 'cli_responses': { + 'type': 'str', + 'required': True + } + } + return base_fields + + def build_xml_list(self, xml_output): + xml_str_list = [] + xml_declaration_tag = '\n' + for data in xml_output.split(' 0: + self.exit_msg.update({"results": mtu_mismatch_list}) + else: + self.exit_msg.update( + {"results": "There is no MTU mistmatch between neighbors"}) + self.module.exit_json(changed=False, msg=self.exit_msg) + except Exception as e: + self.module.fail_json( + msg=to_native(e), + exception=traceback.format_exc()) + + +def main(): + module_instance = MtuValidation() + module_instance.perform_action() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os10/plugins/modules/os10_command.py b/ansible_collections/dellemc/os10/plugins/modules/os10_command.py new file mode 100644 index 00000000..a99f1a67 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/modules/os10_command.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Peter Sprygada +# Copyright: (c) 2020, Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: os10_command +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Run commands on devices running Dell EMC SmartFabric OS10 +description: + - Sends arbitrary commands to a OS10 device and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(dellemc.os10.os10_config) to configure OS10 devices. +extends_documentation_fragment: dellemc.os10.os10 +options: + commands: + description: + - List of commands to send to the remote OS10 device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + type: list + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of I(retries), the task fails. + See examples. + type: list + elements: str + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + type: str + default: all + choices: [ all, any ] + retries: + description: + - Specifies the number of retries a command should be tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + type: int + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + type: int + default: 1 +""" + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + os10_command: + commands: show version + + - name: run show version and check to see if output contains OS10 + os10_command: + commands: show version + wait_for: result[0] contains OS10 + + - name: run multiple commands on remote nodes + os10_command: + commands: + - show version + - show interface + + - name: run multiple commands and evaluate the output + os10_command: + commands: + - show version + - show interface + wait_for: + - result[0] contains OS10 + - result[1] contains Ethernet +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import run_commands +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_argument_spec, check_args +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + elif item['command'].startswith('conf'): + module.fail_json( + msg='os10_command does not support running config mode ' + 'commands. Please use os10_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', elements='str'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(os10_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os10/plugins/modules/os10_config.py b/ansible_collections/dellemc/os10/plugins/modules/os10_config.py new file mode 100644 index 00000000..925568f1 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/modules/os10_config.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +# +# (c) 2020 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: os10_config +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Manage Dell EMC SmartFabric OS10 configuration sections +description: + - OS10 configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with OS10 configuration sections in + a deterministic way. +extends_documentation_fragment: dellemc.os10.os10 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. This argument is mutually exclusive with I(src). + type: list + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + type: list + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is + mutually exclusive with I(lines). + type: path + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + type: list + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + type: list + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + type: str + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + type: str + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When you set this argument to + I(merge), the configuration changes merge with the current + device running configuration. When you set this argument to I(check) + the configuration updates are determined but not actually configured + on the remote device. + type: str + default: merge + choices: ['merge', 'check'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + type: bool + default: 'no' + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + type: str + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + type: str + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +""" + +EXAMPLES = """ +- os10_config: + lines: ['hostname {{ inventory_hostname }}'] + +- os10_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + - 50 permit ip host 5.5.5.5 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + match: exact + +- os10_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + replace: block + +- os10_config: + lines: ['hostname {{ inventory_hostname }}'] + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device. + returned: always + type: list + sample: ['hostname foo', 'router bgp 1', 'router-id 1.1.1.1'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'router bgp 1', 'router-id 1.1.1.1'] +saved: + description: Returns whether the configuration is saved to the startup + configuration or not. + returned: When not check_mode. + type: bool + sample: True +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/os10_config.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import get_config, get_sublevel_config +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_argument_spec, check_args +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import load_config, run_commands +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import WARNING_PROMPTS_RE +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + commands = module.params['lines'][0] + if (isinstance(commands, dict)) and (isinstance((commands['command']), list)): + candidate.add(commands['command'], parents=parents) + elif (isinstance(commands, dict)) and (isinstance((commands['command']), str)): + candidate.add([commands['command']], parents=parents) + else: + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def get_running_config(module): + contents = module.params['config'] + if not contents: + contents = get_config(module) + return contents + + +def main(): + + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + src=dict(type='path'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', + choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + update=dict(choices=['merge', 'check'], default='merge'), + save=dict(type='bool', default=False), + config=dict(), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec) + ) + + argument_spec.update(os10_argument_spec) + + mutually_exclusive = [('lines', 'src')] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + parents = module.params['parents'] or list() + + match = module.params['match'] + replace = module.params['replace'] + + warnings = list() + check_args(module, warnings) + + result = dict(changed=False, saved=False, warnings=warnings) + + if module.params['backup']: + if not module.check_mode: + result['__backup__'] = get_config(module) + + commands = list() + candidate = get_candidate(module) + + if any((module.params['lines'], module.params['src'])): + if match != 'none': + config = get_running_config(module) + if parents: + contents = get_sublevel_config(config, module) + config = NetworkConfig(contents=contents, indent=1) + else: + config = NetworkConfig(contents=config, indent=1) + configobjs = candidate.difference(config, match=match, replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands') + if ((isinstance((module.params['lines']), list)) and + (isinstance((module.params['lines'][0]), dict)) and + (set(['prompt', 'answer']).issubset(module.params['lines'][0]))): + + cmd = {'command': commands, + 'prompt': module.params['lines'][0]['prompt'], + 'answer': module.params['lines'][0]['answer']} + commands = [module.jsonify(cmd)] + else: + commands = commands.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + if not module.check_mode and module.params['update'] == 'merge': + load_config(module, commands) + + result['changed'] = True + result['commands'] = commands + result['updates'] = commands + + if module.params['save']: + result['changed'] = True + if not module.check_mode: + cmd = {r'command': 'copy running-config startup-config', + r'prompt': r'\[confirm yes/no\]:\s?$', 'answer': 'yes'} + run_commands(module, [cmd]) + result['saved'] = True + else: + module.warn('Skipping command `copy running-config startup-config`' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os10/plugins/modules/os10_facts.py b/ansible_collections/dellemc/os10/plugins/modules/os10_facts.py new file mode 100644 index 00000000..c124422b --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/modules/os10_facts.py @@ -0,0 +1,505 @@ +#!/usr/bin/python +# +# (c) 2020 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: os10_facts +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Collect facts from devices running Dell EMC SmartFabric OS10 +description: + - Collects a base set of device facts from a remote device that + is running OS10. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: dellemc.os10.os10 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + type: list + default: [ '!config' ] +""" + +EXAMPLES = """ +# Collect all facts from the device +- os10_facts: + gather_subset: all + +# Collect only the config and default facts +- os10_facts: + gather_subset: + - config + +# Do not collect hardware facts +- os10_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_name: + description: The name of the OS that is running. + returned: Always. + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_servicetag: + description: The service tag number of the remote device. + returned: always + type: str +ansible_net_model: + description: The model name returned from the device. + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# hardware +ansible_net_cpu_arch: + description: CPU Architecture of the remote device. + returned: when hardware is configured + type: str +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" + +import re + +try: + from lxml import etree as ET +except ImportError: + import xml.etree.ElementTree as ET + +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import run_commands +from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_argument_spec, check_args +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = [] + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version | display-xml', + 'show system | display-xml', + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + xml_data = ET.fromstring(data.encode('utf8')) + + self.facts['name'] = self.parse_name(xml_data) + self.facts['version'] = self.parse_version(xml_data) + self.facts['model'] = self.parse_model(xml_data) + self.facts['hostname'] = self.parse_hostname(xml_data) + + data = self.responses[1] + xml_data = ET.fromstring(data.encode('utf8')) + + self.facts['servicetag'] = self.parse_servicetag(xml_data) + + def parse_name(self, data): + sw_name = data.find('./data/system-sw-state/sw-version/sw-name') + if sw_name is not None: + return sw_name.text + else: + return "" + + def parse_version(self, data): + sw_ver = data.find('./data/system-sw-state/sw-version/sw-version') + if sw_ver is not None: + return sw_ver.text + else: + return "" + + def parse_hostname(self, data): + hostname = data.find('./data/system-state/system-status/hostname') + if hostname is not None: + return hostname.text + else: + return "" + + def parse_model(self, data): + prod_name = data.find('./data/system-sw-state/sw-version/sw-platform') + if prod_name is not None: + return prod_name.text + else: + return "" + + def parse_servicetag(self, data): + svc_tag = data.find('./data/system/node/unit/mfg-info/service-tag') + if svc_tag is not None: + return svc_tag.text + else: + return "" + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show version | display-xml', + 'show processes node-id 1 | grep "Mem :"' + ] + + def populate(self): + + super(Hardware, self).populate() + data = self.responses[0] + + xml_data = ET.fromstring(data.encode('utf8')) + + self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data) + + data = self.responses[1] + match = self.parse_memory(data) + if match: + self.facts['memtotal_mb'] = int(match[0]) // 1024 + self.facts['memfree_mb'] = int(match[1]) // 1024 + + def parse_cpu_arch(self, data): + cpu_arch = data.find('./data/system-sw-state/sw-version/cpu-arch') + if cpu_arch is not None: + return cpu_arch.text + else: + return "" + + def parse_memory(self, data): + return re.findall(r'(\d+)', data, re.M) + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + self.facts['config'] = self.responses[0] + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show interface | display-xml', + 'show lldp neighbors | display-xml' + ] + + def __init__(self, module): + self.intf_facts = dict() + self.lldp_facts = dict() + super(Interfaces, self).__init__(module) + + def populate(self): + super(Interfaces, self).populate() + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + int_show_data = (self.responses[0]).splitlines() + pattern = '?xml version' + data = '' + skip = True + + # The output returns multiple xml trees + # parse them before handling. + for line in int_show_data: + if pattern in line: + if skip is False: + xml_data = ET.fromstring(data.encode('utf8')) + self.populate_interfaces(xml_data) + data = '' + else: + skip = False + + data += line + + if skip is False: + xml_data = ET.fromstring(data.encode('utf8')) + self.populate_interfaces(xml_data) + + self.facts['interfaces'] = self.intf_facts + + lldp_data = (self.responses[1]).splitlines() + data = '' + skip = True + # The output returns multiple xml trees + # parse them before handling. + for line in lldp_data: + if pattern in line: + if skip is False: + xml_data = ET.fromstring(data.encode('utf8')) + self.populate_neighbors(xml_data) + data = '' + else: + skip = False + + data += line + + if skip is False: + xml_data = ET.fromstring(data.encode('utf8')) + self.populate_neighbors(xml_data) + + self.facts['neighbors'] = self.lldp_facts + + def populate_interfaces(self, interfaces): + + for interface in interfaces.findall('./data/interfaces/interface'): + intf = dict() + name = self.parse_item(interface, 'name') + + intf['description'] = self.parse_item(interface, 'description') + intf['duplex'] = self.parse_item(interface, 'duplex') + intf['primary_ipv4'] = self.parse_primary_ipv4(interface) + intf['secondary_ipv4'] = self.parse_secondary_ipv4(interface) + intf['ipv6'] = self.parse_ipv6_address(interface) + intf['mtu'] = self.parse_item(interface, 'mtu') + intf['type'] = self.parse_item(interface, 'type') + + self.intf_facts[name] = intf + + for interface in interfaces.findall('./bulk/data/interface'): + name = self.parse_item(interface, 'name') + try: + intf = self.intf_facts[name] + intf['bandwidth'] = self.parse_item(interface, 'speed') + intf['adminstatus'] = self.parse_item(interface, 'admin-status') + intf['operstatus'] = self.parse_item(interface, 'oper-status') + intf['macaddress'] = self.parse_item(interface, 'phys-address') + except KeyError: + # skip the reserved interfaces + pass + + for interface in interfaces.findall('./data/ports/ports-state/port'): + name = self.parse_item(interface, 'name') + # media-type name interface name format phy-eth 1/1/1 + mediatype = self.parse_item(interface, 'media-type') + + typ, sname = name.split('-eth') + name = "ethernet" + sname + try: + intf = self.intf_facts[name] + intf['mediatype'] = mediatype + except Exception: + # fanout + for subport in range(1, 5): + name = "ethernet" + sname + ":" + str(subport) + try: + intf = self.intf_facts[name] + intf['mediatype'] = mediatype + except Exception: + # valid case to handle 2x50G + pass + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_item(self, interface, item): + elem = interface.find(item) + if elem is not None: + return elem.text + else: + return "" + + def parse_primary_ipv4(self, interface): + ipv4 = interface.find('ipv4') + ip_address = "" + if ipv4 is not None: + prim_ipaddr = ipv4.find('./address/primary-addr') + if prim_ipaddr is not None: + ip_address = prim_ipaddr.text + self.add_ip_address(ip_address, 'ipv4') + + return ip_address + + def parse_secondary_ipv4(self, interface): + ipv4 = interface.find('ipv4') + ip_address = "" + if ipv4 is not None: + sec_ipaddr = ipv4.find('./address/secondary-addr') + if sec_ipaddr is not None: + ip_address = sec_ipaddr.text + self.add_ip_address(ip_address, 'ipv4') + + return ip_address + + def parse_ipv6_address(self, interface): + + ip_address = list() + + for addr in interface.findall('./ipv6/ipv6-addresses/address'): + + ipv6_addr = addr.find('./ipv6-address') + + if ipv6_addr is not None: + ip_address.append(ipv6_addr.text) + self.add_ip_address(ipv6_addr.text, 'ipv6') + + return ip_address + + def populate_neighbors(self, interfaces): + for interface in interfaces.findall('./bulk/data/interface'): + name = interface.find('name').text + rem_sys_name = interface.find('./lldp-rem-neighbor-info/info/rem-system-name') + if rem_sys_name is not None: + self.lldp_facts[name] = list() + fact = dict() + fact['host'] = rem_sys_name.text + rem_sys_port = interface.find('./lldp-rem-neighbor-info/info/rem-lldp-port-id') + fact['port'] = rem_sys_port.text + self.lldp_facts[name].append(fact) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + argument_spec.update(os10_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + check_args(module, warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py b/ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py new file mode 100644 index 00000000..9922b9f8 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved." + +__metaclass__ = type + +DOCUMENTATION = ''' +module: show_system_network_summary +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Operations for show_system_network output in json/yaml format. +description: + + - Get the show system inforamtion of a Leaf-Spine. + +options: + output_type: + type: str + description: + - json or yaml + - Default value is json + default: json + required: False + cli_responses: + type: list + required: True + description: + - show system command xml output +''' +EXAMPLES = ''' +Copy below YAML into a playbook (e.g. play.yml) and run as follows: + +#$ ansible-playbook -i inv show.yml +name: show system Configuration +hosts: localhost +connection: local +gather_facts: False +vars: + cli: + username: admin + password: admin +tasks: +- name: "Get Dell EMC OS10 Show system summary" + os10_command: + commands: ['show system | display-xml'] + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_system +- set_fact: + output: "{{ output|default([])+ [{'inv_name': item.item, 'host': item.invocation.module_args.provider.host, 'stdout_show_system': item.stdout}] }}" + loop: "{{ show_system.results }}" +- debug: var=output +- name: "show system network call to lib " + show_system_network_summary: + cli_responses: "{{ output}} " + output_type: "{{ output_method if output_method is defined else 'json' }}" + register: show_system_network_summary +- debug: var=show_system_network_summary +''' + +import re +from ansible_collections.dellemc.os10.plugins.module_utils.network.base_network_show import BaseNetworkShow + + +class ShowSystemNetworkSummary(BaseNetworkShow): + def __init__(self): + BaseNetworkShow.__init__(self) + self.cli_responses = self.module.params['cli_responses'] + self.output_type = self.module.params['output_type'] + self.changed = False + + def get_fields(self): + spec_fields = { + 'cli_responses': { + 'type': 'list', + 'required': True + }, + 'output_type': { + 'type': 'str', + 'default': "json", + 'required': False + } + } + return spec_fields + + def perform_action(self): + out = list() + show_system_summary = self.cli_responses + if len(show_system_summary) > 0: + for item in show_system_summary: + out_dict = {} + host = item.get("host") + inv_name = item.get("inv_name") + show_system_response = item.get("stdout_show_system") + if show_system_response is not None: + result = BaseNetworkShow.xml_to_dict( + self, show_system_response[0]) + rpc_reply = result.get("rpc-reply") + if rpc_reply is not None: + data = rpc_reply.get("data") + if data is not None: + out_dict["host"] = host + out_dict["inv_name"] = inv_name + system_state = data.get("system-state") + if system_state is not None: + system_status = system_state.get( + "system-status") + if system_status is not None: + out_dict["hostname"] = system_status.get( + "hostname") + system = data.get("system") + if system is not None: + node = system.get("node") + if node is not None: + out_dict["node-mac"] = node.get("node-mac") + unit = node.get("unit") + if unit is not None: + out_dict["software-version"] = unit.get( + "software-version") + mfg_info = node.get("mfg-info") + if mfg_info is not None: + out_dict["service-tag"] = mfg_info.get( + "service-tag") + out_dict["device type"] = mfg_info.get( + "product-name") + if bool(out_dict): + out.append(out_dict) + if self.output_type != "json": + self.exit_msg.update( + {"results": (BaseNetworkShow.dict_to_yaml(self, out))}) + else: + self.exit_msg.update({"results": (out)}) + self.module.exit_json(changed=False, msg=self.exit_msg) + + +def main(): + module_instance = ShowSystemNetworkSummary() + module_instance.perform_action() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py b/ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py new file mode 100644 index 00000000..2042dfe7 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved." + +__metaclass__ = type + +DOCUMENTATION = ''' +module: vlt_validate +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Validate the vlt info, raise an error if peer is not in up state +description: + + - Troubleshoot the show vlt info and raise an error if peer is not up. + +options: + show_vlt: + description: + - show vlt output + type: 'list' + required: True + show_system_network_summary: + description: + - show system summary output + type: 'list' + required: True + intended_vlt_pairs: + description: + - intended vlt pair intput to verify with actual + type: 'list' + required: True + +''' +EXAMPLES = ''' +Copy below YAML into a playbook (e.g. play.yml) and run as follows: + +#$ ansible-playbook -i inv play.yml +name: show system Configuration +hosts: localhost +connection: local +gather_facts: False +tasks: + - name: "Get Dell EMC OS10 Show run vlt" + os10_command: + commands: + - command: "show running-configuration vlt | grep vlt-domain" + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_run_vlt + - set_fact: + output_vlt: "{{ output_vlt|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, + 'stdout_show_vlt': item.stdout.0}] }}" + loop: "{{ show_run_vlt.results }}" + - debug: var=output_vlt + - name: "Get Dell EMC OS10 Show vlt info" + os10_command: + commands: + - command: "show vlt {{ item.stdout_show_vlt.split()[1] }} | display-xml" + provider: "{{ hostvars[item.inv_name].cli }}" + with_items: "{{ output_vlt }}" + register: show_vlt + - set_fact: + vlt_out: "{{ vlt_out|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'show_vlt_stdout': item.stdout.0}] }}" + loop: "{{ show_vlt.results }}" + register: vlt_output + - name: call lib to convert vlt info from xml to dict format + base_xml_to_dict: + cli_responses: "{{ item.show_vlt_stdout }}" + with_items: + - "{{ vlt_out }}" + register: vlt_dict_output + - name: "Get Dell EMC OS10 Show system" + import_role: + name: os10_fabric_summary + register: show_system_network_summary + - name: call lib to process + vlt_validate: + show_vlt : "{{ vlt_dict_output.results }}" + show_system_network_summary: "{{ show_system_network_summary.msg.results }}" + intended_vlt_pairs: "{{ intended_vlt_pairs }}" + register: show_vlt_info + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from collections import OrderedDict +import traceback + + +class VltValidation(object): + def __init__(self): + self.module = AnsibleModule(argument_spec=self.get_fields()) + self.show_vlt = self.module.params['show_vlt'] + self.show_system_network_summary = self.module.params['show_system_network_summary'] + self.intended_vlt_pairs = self.module.params['intended_vlt_pairs'] + self.exit_msg = OrderedDict() + + def get_fields(self): + spec_fields = { + 'show_vlt': { + 'type': 'list', + 'required': True + }, + 'show_system_network_summary': { + 'type': 'list', + 'required': True + }, + 'intended_vlt_pairs': { + 'type': 'list', + 'required': True + } + } + return spec_fields + + # get switch inv name from mac + def get_switch_inv_name_from_mac(self, mac): + inv_name = None + for show_system in self.show_system_network_summary: + if (str.lower(show_system["node-mac"])) == (str.lower(mac)): + inv_name = show_system.get("inv_name") + break + return inv_name + + def validate_vlt_pairs(self, actual_vlt_dict): + final_out = list() + intended_vlt_list = self.intended_vlt_pairs + for intended_vlt in intended_vlt_list: + intended_primary = intended_vlt.get("primary") + intended_secondary = intended_vlt.get("secondary") + actual_vlt = actual_vlt_dict.get(intended_primary) + temp_dict = {} + if actual_vlt is not None: + actual_secondary = actual_vlt.get("secondary") + secondary_status = actual_vlt.get("secondary_status") + if actual_secondary is not None and intended_secondary != actual_secondary: + temp_dict["error_type"] = "secondary_mismatch" + temp_dict["intended_primary"] = intended_primary + temp_dict["intended_secondary"] = intended_secondary + temp_dict["secondary"] = actual_secondary + reason = "config mismatch as {0} is expected, but the actual secondary is {1} " .format( + intended_secondary, actual_secondary) + temp_dict["possible_reason"] = reason + final_out.append(temp_dict) + else: + if actual_secondary is None: + temp_dict["intended_primary"] = intended_primary + temp_dict["intended_secondary"] = intended_secondary + temp_dict["error_type"] = "peer_missing" + reason = "peer info is not configured or peer interface is down" + temp_dict["possible_reason"] = reason + final_out.append(temp_dict) + elif intended_secondary == actual_secondary and secondary_status != "up": + temp_dict["intended_primary"] = intended_primary + temp_dict["intended_secondary"] = intended_secondary + temp_dict["secondary"] = actual_secondary + temp_dict["error_type"] = "peer_down" + reason = "peer interface is down" + temp_dict["possible_reason"] = reason + final_out.append(temp_dict) + else: + temp_dict["intended_primary"] = intended_primary + temp_dict["intended_secondary"] = intended_secondary + temp_dict["error_type"] = "vlt_config_missing" + temp_dict["possible_reason"] = "vlt is not configured" + final_out.append(temp_dict) + return final_out + + def parse_vlt_output(self): + show_vlt_dict = {} + for show_list in self.show_vlt: + source_switch = None + item = show_list.get("item") + if item is not None: + inv_info = item.get("inv_name") + source_switch = inv_info.get("inv_name") + msg = show_list.get("msg") + if msg is not None: + result = msg.get("result") + for sub_result in result: + vlt_dict = {} + rpc_reply = sub_result.get("rpc-reply") + data = rpc_reply.get("data") + if data is not None: + topo_oper_data = data.get("topology-oper-data") + if topo_oper_data is not None: + vlt_domain = topo_oper_data.get("vlt-domain") + if vlt_domain is not None: + local_info = vlt_domain.get("local-info") + if local_info is not None: + local_role = local_info.get("role") + vlt_dict[local_role] = source_switch + local_mac = local_info.get("system-mac") + vlt_dict[local_role + "_mac"] = local_mac + peer_info = vlt_domain.get("peer-info") + if peer_info is not None: + peer_mac = peer_info.get("system-mac") + peer_switch = self.get_switch_inv_name_from_mac( + peer_mac) + peer_role = peer_info.get("role") + vlt_dict[peer_role] = peer_switch + vlt_dict[peer_role + "_mac"] = peer_mac + peer_status = peer_info.get("peer-status") + vlt_dict[peer_role + + "_status"] = peer_status + if bool(vlt_dict): + primary_switch = vlt_dict.get("primary") + vlt_data = show_vlt_dict.get(primary_switch) + if vlt_data is None: + # update database specific to primary, it helps + # to avoid to skip duplicate data + show_vlt_dict[primary_switch] = vlt_dict + return show_vlt_dict + + def perform_action(self): + try: + actual_vlt_dict = self.parse_vlt_output() + final_out = self.validate_vlt_pairs(actual_vlt_dict) + self.exit_msg.update({"results": final_out}) + self.module.exit_json(changed=False, msg=self.exit_msg) + except Exception as e: + self.module.fail_json( + msg=to_native(e), + exception=traceback.format_exc()) + + +def main(): + module_instance = VltValidation() + module_instance.perform_action() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py b/ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py new file mode 100644 index 00000000..7947c1b1 --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved." + +__metaclass__ = type + +DOCUMENTATION = ''' +module: wiring_validate +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Validate the wiring based on the planned wiring details +description: + + - Get the wiring info using lldp output and show system network summary. + +options: + show_lldp_neighbors_list: + description: + - show lldp neighbor output + type: 'list' + required: True + show_system_network_summary: + description: + - show system network summary output + type: 'list' + required: True + planned_neighbors: + description: + - planned neighbours input from group_var to compare actual + type: 'list' + required: True +''' +EXAMPLES = ''' +Copy below YAML into a playbook (e.g. play.yml) and run as follows: + +#$ ansible-playbook -i inv play.yml +name: show system Configuration +hosts: localhost +connection: local +gather_facts: False +tasks: +- name: "Get Dell EMC OS10 Show lldp" + os10_command: + commands: + - command: "show lldp neighbors" + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_lldp +- local_action: copy content={{ show_lldp }} dest=show +- set_fact: + output_lldp: "{{ output_lldp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, + 'stdout_show_lldp': item.stdout}] }}" + loop: "{{ show_lldp.results }}" +- debug: var=output_lldp +- name: "Get Dell EMC OS10 Show system" + import_role: + name: os10_fabric_summary + register: show_system_network_summary +- debug: var=show_system_network_summary +- name: call lib to process + wiring_validate: + show_lldp_neighbors_list: "{{ output_lldp }}" + show_system_network_summary: "{{ show_system_network_summary.msg.results }}" + planned_neighbors: "{{ intended_neighbors }}" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from collections import OrderedDict +import re +import traceback + + +class WiringValidation(object): + def __init__(self): + self.module = AnsibleModule(argument_spec=self.get_fields()) + self.show_lldp_neighbors_list = self.module.params['show_lldp_neighbors_list'] + self.show_system_network_summary = self.module.params['show_system_network_summary'] + self.planned_neighbors = self.module.params['planned_neighbors'] + self.exit_msg = OrderedDict() + + def get_fields(self): + spec_fields = { + 'show_lldp_neighbors_list': { + 'type': 'list', + 'required': True + }, + 'show_system_network_summary': { + 'type': 'list', + 'required': True + }, + 'planned_neighbors': { + 'type': 'list', + 'required': True + } + } + return spec_fields + + # get switch inv name from mac + def get_switch_inv_name_from_mac(self, mac): + inv_name = None + for show_system in self.show_system_network_summary: + if (str.lower(show_system["node-mac"])) == (str.lower(mac)): + inv_name = show_system.get("inv_name") + break + return inv_name + + # get service tag for switch + + def get_service_tag_and_mac(self): + svc_tag_mac = {} + for show_system in self.show_system_network_summary: + temp_dict = {} + temp_dict["svc-tag"] = show_system.get("service-tag") + temp_dict["node-mac"] = show_system.get("node-mac") + if bool(temp_dict): + svc_tag_mac[show_system["inv_name"]] = temp_dict + return svc_tag_mac + + # form actual neighbors per network with help of lldp output and show + # sytem output + def get_actual_neigbor(self, lldp_list): + final_out = list() + for lldp in lldp_list: + # check whether lldp output mac match with system summary mac and + # collect port and host info + source_switch = lldp["inv_name"] + lldp_mac = lldp["rem_mac"] + for index, rem_mac in enumerate(lldp_mac): + final_dict = {} + final_dict["source_switch"] = source_switch + final_dict["source_port"] = lldp["loc_port"][index] + final_dict["dest_port"] = lldp["rem_port"][index] + dest_switch = self.get_switch_inv_name_from_mac(rem_mac) + if dest_switch is not None: + final_dict["dest_switch"] = dest_switch + else: + final_dict["dest_switch"] = "unknown" + final_out.append(final_dict) + return final_out + + def parse_lldp_output(self): + nbr_list = list() + for item in self.show_lldp_neighbors_list: + out_dict = {} + loc_port = list() + rem_port = list() + rem_mac = list() + out_dict["host"] = item.get("host") + out_dict["inv_name"] = item.get("inv_name") + show_lldp_output = item.get("stdout_show_lldp") + if show_lldp_output is not None: + output = str(show_lldp_output[0]) + lldp_regexp = r"(\S+)\s+(\S+)\s+(\S+)\s+(\S+)" + lines = output.splitlines() + for line in lines: + if "Loc PortID" in line: + continue + match = re.match(lldp_regexp, line) + if match: + val = match.groups() + loc_port.append(val[0]) + rem_port.append(val[2]) + rem_mac.append(val[3]) + out_dict["loc_port"] = loc_port + out_dict["rem_port"] = rem_port + out_dict["rem_mac"] = rem_mac + if bool(out_dict): + nbr_list.append(out_dict) + return nbr_list + + def perform_action(self): + try: + lldp_list = self.parse_lldp_output() + actual_nbr = self.get_actual_neigbor(lldp_list) + svc_tag_mac = self.get_service_tag_and_mac() + # Validate the planned neighbors with actual neighbors + mismatch_list = list() + for planned_neighbors in self.planned_neighbors: + bflag = False + if planned_neighbors not in actual_nbr: + for actual_neighbors in actual_nbr: + if (actual_neighbors["source_switch"] == planned_neighbors["source_switch"] + and actual_neighbors["source_port"] == planned_neighbors["source_port"]): + if (actual_neighbors["dest_switch"] != + planned_neighbors["dest_switch"]): + bflag = True + if (actual_neighbors["dest_switch"] + != "unknown"): + reason = ( + "Destination switch is not an expected value, " + "expected switch: {0},port: {1}; actual switch: {2}(svc-tag:{3}, node_mac:{4}), port: {5}" .format( + planned_neighbors["dest_switch"], + planned_neighbors["dest_port"], + actual_neighbors["dest_switch"], + svc_tag_mac.get( + actual_neighbors["dest_switch"]).get("svc-tag"), + svc_tag_mac.get( + actual_neighbors["dest_switch"]).get("node-mac"), + actual_neighbors["dest_port"])) + else: + reason = ( + "Destination switch is not an expected value, " + "expected switch: {0},port: {1}; actual switch: {2}, port: {3}" .format( + planned_neighbors["dest_switch"], + planned_neighbors["dest_port"], + actual_neighbors["dest_switch"], + actual_neighbors["dest_port"])) + planned_neighbors["reason"] = reason + planned_neighbors["error_type"] = "link-mismatch" + break + if(actual_neighbors["dest_port"] != planned_neighbors["dest_port"]): + bflag = True + reason = ( + "Destination switch port is not an expected value, " + "expected port: {0} actual port: {1}" .format( + planned_neighbors["dest_port"], + actual_neighbors["dest_port"])) + planned_neighbors["reason"] = reason + planned_neighbors["error_type"] = "link-mismatch" + break + if not bflag: + reason = "link is not found for source switch: {0},port: {1}".format( + planned_neighbors["source_switch"], planned_neighbors["source_port"]) + planned_neighbors["reason"] = reason + planned_neighbors["error_type"] = "link-missing" + mismatch_list.append(planned_neighbors) + + self.exit_msg.update({"results": mismatch_list}) + self.module.exit_json(changed=False, msg=self.exit_msg) + except Exception as e: + self.module.fail_json( + msg=to_native(e), + exception=traceback.format_exc()) + + +def main(): + module_instance = WiringValidation() + module_instance.perform_action() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os10/plugins/terminal/os10.py b/ansible_collections/dellemc/os10/plugins/terminal/os10.py new file mode 100644 index 00000000..c3e1d3ac --- /dev/null +++ b/ansible_collections/dellemc/os10/plugins/terminal/os10.py @@ -0,0 +1,81 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Copyright (c) 2020 Dell Inc. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import json + +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:#) ?$"), + re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error"), + re.compile(br"% ?Bad secret"), + re.compile(br"Syntax error:"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"[^\r\n]+ not found", re.I), + re.compile(br"'[^']' +returned error code: ?\d+"), + ] + + def on_open_shell(self): + try: + self._exec_cli_command(b'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict') + cmd[u'answer'] = passwd + + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to elevate privilege to enable mode') + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if prompt.strip().endswith(b')#'): + self._exec_cli_command(b'end') + self._exec_cli_command(b'disable') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'disable') diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE b/ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/README.md b/ansible_collections/dellemc/os10/roles/os10_aaa/README.md new file mode 100644 index 00000000..cabee7ea --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/README.md @@ -0,0 +1,136 @@ +AAA role +======== + +This role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of RADIUS server, TACACS server, and AAA. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The AAA role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_aaa keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os10 | +| ``radius_server.retransmit`` | integer | Configures the number of retransmissions | os10 | +| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions | os10 | +| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os10 | +| ``host.ip`` | string | Configures the RADIUS server host address | os10 | +| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 | +| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os10 | +| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``) | os10 | +| ``tacacs_server.timeout`` | integer | Configures the timeout for retransmissions | os10 | +| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os10 | +| ``host.ip`` | string | Configures the TACACS server host address | os10 | +| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 | +| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os10 | +| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 | +| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os10 | +| ``aaa_accounting.accounting.accounting_type`` | dictionary | Configures accounting type | os10 | +| ``aaa_accounting.accounting.connection_type`` | dictionary | Configures accounting connection type | os10 | +| ``aaa_accounting.accounting.account_mode`` | dictionary | Configures accounting mode | os10 | +| ``aaa_accounting.accounting.server_group`` | dictionary | Configures accounting server group | os10 | +| ``aaa_accounting.accounting.state`` | string: present,absent | Configures/unconfigures accounting parameters | os10 | +| ``aaa_authentication`` | dictionary | Configures authentication parameters (see ``aaa_authentication.*``) | os10 | +| ``aaa_authentication.login`` | dictionary | Configures authentication login (see ``aaa_authentication.login.*``)| os10 | +| ``aaa_authentication.login.console`` | dictionary | Configures authentication method for console login | os10 | +| ``aaa_authentication.login.state`` | string: present,absent | Unconfigures authentication login if set to absent | os10 | +| ``aaa_authentication.login.type`` | dictionary | Configures authentication type | os10 | +| ``aaa_authentication.re_authenticate`` | boolean | Configures re-authenticate by enable if set to true | os10 | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_aaa* role to configure AAA for radius and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os10_aaa* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_aaa: + radius_server: + retransmit: 5 + timeout: 10 + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + tacacs_server: + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + timeout: 6 + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: present + re_authenticate: true + aaa_accounting: + accounting: + - accounting_type: commands + connection_type: console + account_mode: start-stop + server_group: group tacacs+ + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_aaa + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml new file mode 100644 index 00000000..4f8b5c8c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# defaults file for dellemc.os10.os10_aaa +attribute_type: + mandatory: mandatory + on_for_login_auth: on-for-login-auth + include_in_access_req: include-in-access-req + mac: "mac format" + mac_ietf: "mac format ietf" + mac_ietf_lower_case: "mac format ietf lower-case" + mac_ietf_upper_case: "mac format ietf upper-case" + mac_legacy: "mac format legacy" + mac_legacy_lower_case: "mac format legacy lower-case" + mac_legacy_upper_case: "mac format legacy upper-case" + mac_unformatted: "mac format unformatted" + mac_unformatted_lower_case: "mac format unformatted lower-case" + mac_unformatted_upper_case: "mac format unformatted upper-case" diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml new file mode 100644 index 00000000..0b86ef12 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_aaa diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml new file mode 100644 index 00000000..54fde54e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml @@ -0,0 +1,17 @@ +# copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_aaa role facilitates the configuration of Authentication Authorization Acccounting (AAA) attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml new file mode 100644 index 00000000..77eb07fc --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os10 + + - name: "Generating AAA configuration for os10" + template: + src: os10_aaa.j2 + dest: "{{ build_dir }}/aaa10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning AAA configuration for os10" + os10_config: + src: os10_aaa.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2 b/ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2 new file mode 100644 index 00000000..438c0f8e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2 @@ -0,0 +1,148 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure AAA commands for os10 Devices + +os10_aaa: + radius_server: + retransmit: 5 + timeout: 10 + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + tacacs_server: + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + timeout: 6 + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: present + re_authenticate: true + aaa_accounting: + accounting: + - accounting_type: commands + connection_type: console + account_mode: start-stop + server_group: group tacacs+ + state: present + +##################################################} +{% if os10_aaa is defined and os10_aaa %} + {% if os10_aaa.radius_server is defined and os10_aaa.radius_server %} + {% set item = os10_aaa.radius_server %} + {% if item.retransmit is defined and item.retransmit %} +radius-server retransmit {{ item.retransmit }} + {% else %} +no radius-server retransmit + {% endif %} + {% if item.timeout is defined and item.timeout %} +radius-server timeout {{ item.timeout }} + {% else %} +no radius-server timeout + {% endif %} + {% if item.host is defined and item.host %} + {% for it in item.host %} + {% if it.ip is defined and it.ip %} + {% if it.state is defined and it.state == "absent" %} +no radius-server host {{ it.ip }} + {% else %} + {% if it.auth_port is defined and it.auth_port %} +radius-server host {{ it.ip }} auth-port {{ it.auth_port }} key {{ it.key }} {{ it.value }} + {% else %} +radius-server host {{ it.ip }} key {{ it.key }} {{ it.value }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if os10_aaa.tacacs_server is defined and os10_aaa.tacacs_server %} + {% set item = os10_aaa.tacacs_server %} + {% if item.timeout is defined and item.timeout %} +tacacs-server timeout {{ item.timeout }} + {% else %} +no tacacs-server timeout + {% endif %} + {% if item.host is defined and item.host %} + {% for it in item.host %} + {% if it.ip is defined and it.ip %} + {% if item.state is defined and item.state == "absent"%} +no tacacs-server host {{it.ip}} + {% else %} + {% if it.auth_port is defined and it.auth_port %} +tacacs-server host {{it.ip}} auth-port {{it.auth_port}} key {{it.key}} {{it.value}} + {% else %} +tacacs-server host {{it.ip}} key {{it.key}} {{it.value}} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if os10_aaa.aaa_authentication is defined and os10_aaa.aaa_authentication %} + {% set aaa_list = os10_aaa.aaa_authentication %} + {% if aaa_list.login is defined and aaa_list.login %} + {% for aaa_vars in aaa_list.login %} + {% if aaa_vars.console is defined and aaa_vars.console %} + {% if aaa_vars.state is defined and aaa_vars.state == "absent"%} +no aaa authentication login console + {% else %} + {% if aaa_vars.type is defined and aaa_vars.type %} +aaa authentication login console {{aaa_vars.type}} + {% endif %} + {% endif %} + {% else %} + {% if aaa_vars.state is defined and aaa_vars.state == "absent"%} +no aaa authentication login default + {% else %} + {% if aaa_vars.type is defined and aaa_vars.type %} +aaa authentication login default {{aaa_vars.type}} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% set aaa_vars = os10_aaa.aaa_authentication %} + {% if aaa_vars.re_authenticate is defined %} + {% if aaa_vars.re_authenticate %} +aaa re-authenticate enable + {% else %} +no aaa re-authenticate enable + {% endif %} + {% endif %} + {% endif %} + {% if os10_aaa.aaa_accounting is defined and os10_aaa.aaa_accounting %} + {% set acc_list = os10_aaa.aaa_accounting %} + {% if acc_list.accounting is defined and acc_list.accounting %} + {% for aaa_vars in acc_list.accounting %} + {% if aaa_vars.accounting_type is defined and aaa_vars.accounting_type %} + {% if aaa_vars.accounting_type == "commands" %} + {% set accounting_type = aaa_vars.accounting_type + " all" %} + {% endif %} + {% if aaa_vars.connection_type is defined and aaa_vars.connection_type %} + {% if aaa_vars.state is defined and aaa_vars.state == "absent"%} +no aaa accounting {{accounting_type}} {{aaa_vars.connection_type}} + {% else %} + {% if aaa_vars.account_mode is defined and aaa_vars.account_mode == "none" %} +aaa accounting {{accounting_type}} {{aaa_vars.connection_type}} {{aaa_vars.account_mode}} + {% else %} + {% if aaa_vars.server_group is defined and aaa_vars.server_group %} +aaa accounting {{accounting_type}} {{aaa_vars.connection_type}} {{aaa_vars.account_mode}} {{aaa_vars.server_group}} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml new file mode 100644 index 00000000..a845c14d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml @@ -0,0 +1,35 @@ +--- +# vars file for dellemc.os10.os10_aaa, +# below gives a sample configuration +# Sample vars for OS10 device +os10_aaa: + radius_server: + retransmit: 5 + timeout: 10 + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + tacacs_server: + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + value: "abc" + auth_port: 3 + state: present + timeout: 6 + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: present + re_authenticate: true + aaa_accounting: + accounting: + - accounting_type: commands + connection_type: console + account_mode: start-stop + server_group: group tacacs+ + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml new file mode 100644 index 00000000..b3d685fb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_aaa diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml new file mode 100644 index 00000000..6854698e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml @@ -0,0 +1 @@ +# vars file for dellemc.os10.os10_aaa diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/LICENSE b/ansible_collections/dellemc/os10/roles/os10_acl/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/README.md b/ansible_collections/dellemc/os10/roles/os10_acl/README.md new file mode 100644 index 00000000..14a1fe2a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/README.md @@ -0,0 +1,130 @@ +ACL role +======== + +This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The ACL role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_acl keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os10 | +| ``name`` | string (required) | Configures the name of the access-control list | os10 | +| ``description`` | string | Configures the description about the access-control list | os10 | +| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os10| +| ``remark.number`` | integer (required) | Configures the remark sequence number | os10 | +| ``remark.description`` | string | Configures the remark description | os10 | +| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os10 | +| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os10 | +| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os10 | +| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os10 | +| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os10 | +| ``entries.source`` | string (required) | Specifies the source address to match in the packets | os10 | +| ``entries.src_condition`` | string | Specifies the condition to filter packets from the source address; ignored if MAC | os10 | +| ``entries.destination`` | string (required) | Specifies the destination address to match in the packets | os10 | +| ``entries.dest_condition`` | string | Specifies the condition to filter packets to the destination address | os10 | +| ``entries.other_options`` | string | Specifies the other options applied on packets (count, log, order, monitor, and so on) | os10 | +| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os10 | +| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os10 | +| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os10 | +| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os10 | +| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os10 | +| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os10 | +| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os10 | +| ``lineterminal`` | list | Configures the terminal to apply the ACL (see ``lineterminal.*``) | os10 | +| ``lineterminal.state`` | string: absent,present\* | Deletes the access-class from line terminal if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_acl* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_acl: + - name: ssh + type: ipv4 + description: acl + remark: + - description: 1 + number: 3 + state: absent + entries: + - number: 14 + permit: true + protocol: tcp + source: any + src_condition: neq 6 + destination: any + dest_condition: eq 4 + other_options: count + state: present + stage_ingress: + - name: ethernet 1/1/1 + state: absent + - name: ethernet 1/1/2 + state: absent + stage_egress: + - name: ethernet 1/1/3 + state: absent + lineterminal: + state: absent + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_acl + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml new file mode 100644 index 00000000..9c7559e3 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_acl diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml new file mode 100644 index 00000000..162d4a3f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_acl diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml new file mode 100644 index 00000000..c354b58f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_acl role facilitates the configuration of access control list (ACL) attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml new file mode 100644 index 00000000..ace51340 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating ACL configuration for os10" + template: + src: os10_acl.j2 + dest: "{{ build_dir }}/acl10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning ACL configuration for os10" + os10_config: + src: os10_acl.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2 b/ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2 new file mode 100644 index 00000000..7d6cb31d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2 @@ -0,0 +1,212 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure ACL commands for OS10 devices + +os10_acl: + - name: ssh-only + type: ipv4 + description: acl + remark: + - description: 1 + number: 3 + state: present + entries: + - number: 10 + permit: true + protocol: tcp + source: any + destination: any + src_condition: eq 22 + dest_condition: eq 2 ack + other_options: count + state: present + stage_ingress: + - name: ethernet 1/1/1 + state: present + - name: ethernet 1/0/1 + state: present + stage_egress: + - name: ethernet 1/1/2 + state: present + lineterminal: + state: present + state: present +#####################################} +{% if os10_acl is defined and os10_acl %} + {% for val in os10_acl %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} + {% if val.type is defined and val.type == "ipv4" %} +no ip access-list {{ val.name }} + {% elif val.type is defined and val.type == "ipv6" %} +no ipv6 access-list {{ val.name }} + {% elif val.type is defined and val.type == "mac" %} +no mac access-list {{ val.name }} + {% endif %} + {% else %} + {% if val.type is defined and val.type == "ipv4" %} +ip access-list {{ val.name }} + {% elif val.type is defined and val.type == "ipv6" %} +ipv6 access-list {{ val.name }} + {% elif val.type is defined and val.type == "mac" %} +mac access-list {{ val.name }} + {% endif %} + {% if val.description is defined %} + {% if val.description %} + description "{{ val.description }}" + {% endif %} + {% endif %} + {% if val.remark is defined and val.remark %} + {% for remark in val.remark %} + {% if remark.description is defined and remark.description %} + {% if remark.number is defined and remark.number %} + {% if remark.state is defined and remark.state == "absent" %} + no seq {{ remark.number }} + {% else %} + seq {{ remark.number }} remark "{{ remark.description }}" + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if val.entries is defined and val.entries %} + {% for rule in val.entries %} + {% if rule.number is defined and rule.number %} + {% if rule.state is defined and rule.state == "absent" %} + no seq {{ rule.number }} + {% else %} + {% if rule.permit is defined %} + {% if rule.permit %} + {% set is_permit = "permit" %} + {% else %} + {% set is_permit = "deny" %} + {% endif %} + {% endif %} + {% if val.type is defined and val.type %} + {% if rule.protocol is defined and rule.protocol %} + {% if rule.source is defined and rule.source %} + {% if rule.src_condition is defined and rule.src_condition %} + {% if rule.destination is defined and rule.destination %} + {% if rule.dest_condition is defined and rule.dest_condition %} + {% if rule.other_options is defined and rule.other_options %} + {% set other_options = rule.other_options %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }} + {% endif %} + {% else %} + {% if rule.other_options is defined and rule.other_options %} + {% set other_options = rule.other_options %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} + {% endif %} + {% endif %} + {% endif %} + {% else %} + {% if rule.destination is defined and rule.destination %} + {% if rule.dest_condition is defined and rule.dest_condition %} + {% if rule.other_options is defined and rule.other_options %} + {% set other_options = rule.other_options %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }} + {% endif %} + {% else %} + {% if rule.other_options is defined and rule.other_options %} + {% set other_options = rule.other_options %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if val.lineterminal is defined and val.lineterminal %} + {% if val.type is defined and not val.type == "mac" %} +line vty + {% if val.lineterminal.state is defined and val.lineterminal.state == "absent" %} + {% if val.type == "ipv6" %} + no ipv6 access-class {{ val.name }} + {% elif val.type == "ipv4" %} + no ip access-class {{ val.name }} + {% endif %} + {% else %} + {% if val.type == "ipv6" %} + ipv6 access-class {{ val.name }} + {% elif val.type == "ipv4" %} + ip access-class {{ val.name }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + + {% if val.stage_ingress is defined and val.stage_ingress %} + {% for intf in val.stage_ingress %} + {% if intf.state is defined and intf.state == "absent" %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + no mac access-group {{ val.name }} in + {% elif val.type is defined and val.type == "ipv6" %} + no ipv6 access-group {{ val.name }} in + {% else %} + no ip access-group {{ val.name }} in + {% endif %} + {% endif %} + {% else %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + mac access-group {{ val.name }} in + {% elif val.type is defined and val.type == "ipv6" %} + ipv6 access-group {{ val.name }} in + {% else %} + ip access-group {{ val.name }} in + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if val.stage_egress is defined and val.stage_egress %} + {% for intf in val.stage_egress %} + {% if intf.state is defined and intf.state == "absent" %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + no mac access-group {{ val.name }} out + {% elif val.type is defined and val.type == "ipv6" %} + no ipv6 access-group {{ val.name }} out + {% else %} + no ip access-group {{ val.name }} out + {% endif %} + {% endif %} + {% else %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + mac access-group {{ val.name }} out + {% elif val.type is defined and val.type == "ipv6" %} + ipv6 access-group {{ val.name }} out + {% else %} + ip access-group {{ val.name }} out + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml new file mode 100644 index 00000000..c3db9c98 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml @@ -0,0 +1,33 @@ +--- +# vars file for dellemc.os10.os10_acl, +# below gives a sample configuration +# Sample variables for OS10 device +os10_acl: + - name: ssh + type: ipv4 + description: acl + remark: + - description: 1 + number: 3 + state: absent + entries: + - number: 14 + permit: true + protocol: tcp + source: any + src_condition: neq 6 + destination: any + dest_condition: eq 4 + other_options: count + state: present + stage_ingress: + - name: ethernet 1/1/1 + state: absent + - name: ethernet 1/1/2 + state: absent + stage_egress: + - name: ethernet 1/1/3 + state: absent + lineterminal: + state: absent + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml new file mode 100644 index 00000000..653f9d69 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_acl diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml new file mode 100644 index 00000000..0cd37964 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_acl diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE b/ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/README.md b/ansible_collections/dellemc/os10/roles/os10_bfd/README.md new file mode 100644 index 00000000..c6907992 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/README.md @@ -0,0 +1,89 @@ +BFD role +=========== + +This role facilitates the configuration of bidirectional forwarding detection (BFD) global attributes. It specifically enables configuration of BFD interval, min_rx, multiplier, and role. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The BFD role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_bfd keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``bfd`` | dictionary | Configures global BFD parameters (see ``bfd.*``) | os10 | +| ``bfd.interval`` | integer | Configures the time interval in ms (100 to 1000) | os10 | +| ``bfd.min_rx`` | integer | Configures maximum waiting time for receiving control packets from BFD peers in ms (100 to 1000)| os10 | +| ``bfd.multiplier`` | integer | Configures the maximum number of consecutive packets that are not received from BFD peers before session state changes to Down (3 to 50) | os10 | +| ``bfd.role`` | string: passive,active\* | Configures the BFD role | os10 | +| ``bfd.state`` | string: absent,present\* | Removes the global BFD if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +******************** + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_bfd role* to completely set the global BFD attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The bfd role writes a simple playbook that only references the *os10_bfd* role. By including the role, you automatically get access to all of the tasks to configure BFD feature. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_bfd: + bfd: + interval: 100 + min_rx: 100 + multiplier: 3 + role: "active" + state: "present" + +**Simple playbook to setup bfd — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_bfd + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml new file mode 100644 index 00000000..4aa9bfbb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_bfd diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml new file mode 100644 index 00000000..b490464e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_bfd diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml new file mode 100644 index 00000000..fce02059 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + role_name: os10_bfd + author: Dell EMC Networking Engineering + description: The os10_bfd role facilitates the configuration of global bfd attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml new file mode 100644 index 00000000..88ac0eb5 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating bfd global configuration for os10" + template: + src: os10_bfd.j2 + dest: "{{ build_dir }}/bfd10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning bfd global configuration for os10" + os10_config: + src: os10_bfd.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j2 b/ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j2 new file mode 100644 index 00000000..18c94644 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j2 @@ -0,0 +1,34 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +#Purpose: +Configure bfd global commands for os10 Devices + +os10_bfd: + bfd: + interval: 200 + min_rx: 200 + multiplier: 3 + role: "active" + state: "present" +###############################################} +{% if os10_bfd is defined and os10_bfd %} + {% set bfd_vars = os10_bfd %} + {% if bfd_vars.bfd is defined and bfd_vars.bfd %} + {% set bfd = bfd_vars.bfd %} + {% if bfd.state is defined and bfd.state == "absent" %} +no bfd enable +no bfd interval + {% else %} +bfd enable + {% if bfd.interval is defined and bfd.min_rx is defined and bfd.multiplier is defined %} + {% if bfd.interval and bfd.min_rx and bfd.multiplier %} + {% if bfd.role is defined and bfd.role %} +bfd interval {{ bfd.interval }} min_rx {{ bfd.min_rx }} multiplier {{ bfd.multiplier }} role {{ bfd.role }} + {% else %} +bfd interval {{ bfd.interval }} min_rx {{ bfd.min_rx }} multiplier {{ bfd.multiplier }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml new file mode 100644 index 00000000..844b91c1 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml @@ -0,0 +1,11 @@ +--- +# vars file for dellemc.os10.os10_bfd, +# below gives a sample configuration +# Sample variables for OS10 device +os10_bfd: + bfd: + interval: 100 + min_rx: 100 + multiplier: 3 + role: "active" + state: "present" diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml new file mode 100644 index 00000000..a0de5db5 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_bfd diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml new file mode 100644 index 00000000..781a25c8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_bfd diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/README.md b/ansible_collections/dellemc/os10/roles/os10_bgp/README.md new file mode 100644 index 00000000..e4e7c94e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/README.md @@ -0,0 +1,729 @@ +BGP role +======== + +This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum paths. This role is abstracted for Dell EMC PowerSwitch platforms running SmartFabric OS10. + +The BGP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_bgp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os10 | +| ``router_id`` | string | Configures the IP address of the local BGP router instance | os10 | +| ``graceful_restart`` | boolean | Configures graceful restart capability | os10 | +| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os10 | +| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os10 | +| ``log_neighbor_changes`` | boolean | Configures log neighbors up/down | os10 | +| ``fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down | os10 | +| ``always_compare_med`` | boolean | Configures comparing MED from different neighbors | os10 | +| ``default_loc_pref`` | integer | Configures the default local preference value | os10 | +| ``as_notation`` | string | Configures AS number notation format | os10 | +| ``enforce_first_as`` | boolean | Configures the first AS for eBGP routes | os10 | +| ``non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm | os10 | +| ``outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members | os10 | +| ``confederation`` | dictionary | Configures AS confederation parameters (see ``confederation.*``) | os10 | +| ``confederation.identifier`` | integer | Configures the routing domain confederation AS | os10 | +| ``confederation.peers`` | string | Configures the peer AS in BGP confederation | os10 | +| ``confederation.peers_state`` | string: absent,present\* | Deletes the peer AS in BGP confederation if set to absent | os10 | +| ``route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) | os10 | +| ``route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection | os10 | +| ``route_reflector.cluster_id`` | string | Configures the route reflector cluster-id | os10 | +| ``address_family_ipv4`` | dictionary | Configures IPv4 address family parameters (see ``address_family_ipv4.*``) | os10 | +| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary only if true | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent | os10 | +| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures internal BGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the internal BGP redistribution for an IPv4 address family | os10 | +| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPv4 address family | os10 | +| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv4 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``address_family_ipv6`` | dictionary | Configures IPv6 address family parameters (see ``address_family_ipv6.*``) | os10 | +| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 | +| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for an IPv6 address family | os10 | +| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 | +| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for an IPv6 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``best_path`` | list | Configures the default best-path selection (see ``best_path.*``) | os10 | +| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 | +| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 | +| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 | +| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 | +| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 | +| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 | +| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 | +| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 | +| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 | +| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 | +| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 | +| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 | +| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os10 | +| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 | +| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 | +| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 | +| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 | +| ``neighbor.auto_peer`` |string: unnumbered-auto | Enables auto discovery of neighbors | os10 | +| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 | +| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 | +| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 | +| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 | +| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 | +| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 | +| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 | +| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 | +| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 | +| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 | +| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 | +| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 | +| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 | +| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6/EVPN address family command mode on the BGP neighbor | os10 | +| ``address_family.activate`` | boolean | Configures activation/deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 | +| ``address_family.sender_loop_detect`` | boolean | Enables/disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6/l2vpn address family | os10 | +| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 | +| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 | +| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 | +| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both ', 'send ', 'receive')| os10 | +| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 | +| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 | +| ``route_map.filter`` | string | Configures the filter for routing updates | os10 | +| ``route_map.state`` | string, choices: absent,present\* | Deletes the route-map of the BGP neighbor if set to absent | os10 | +| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.\*``) | os10 | +| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 | +| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 | +| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 | +| ``max_prefix.warning``| boolean | Configures a warning without dropping the session when maximum limit exceeds if set to true | os10| +| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 | +| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 | +| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 | +| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 | +| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 | +| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 | +| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 | +| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 | +| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 | +| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 | +| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 | +| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 | +| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of a neighbor | os10 | +| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 | +| ``neighbor.remove_pri_as`` | string: absent,present | Configures the remove private AS number from outbound updates | os10 | +| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 | +| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 | +| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 | +| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 | +| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 | +| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 | +| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 | +| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 | +| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 | +| ``neighbor.description`` | string | Configures neighbor description | os10 | +| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 | +| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os10 | +| ``redistribute.route_type`` | string (required): static,connected,imported_bgp,l2vpn,ospf | Configures the name of the routing protocol to redistribute | os10 | +| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 | +| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 | +| ``redistribute.ospf_id`` | string | Configures the redistribute OSPF | os10 | +| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 | +| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 | +| ``bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors | os10 | +| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 | +| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 | +| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 | +| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 | +| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 | +| ``vrfs`` | list | Enables VRF under BGP | os10 | +| ``vrf.name`` | string (Required)| Configures VRF name | os10 | +| ``vrf.router_id`` | string | Configures the IP address of the local BGP router instance in VRF | os10 | +| ``vrf.graceful_restart`` | boolean | Configures graceful restart capability in VRF | os10 | +| ``vrf.maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) in VRF | os10 | +| ``vrf.maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) in VRF | os10 | +| ``vrf.log_neighbor_changes`` | boolean | Configures log neighbors up/down in VRF | os10 | +| ``vrf.fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down in VRF | os10 | +| ``vrf.always_compare_med`` | boolean | Configures comparing MED from different neighbors in VRF | os10 | +| ``vrf.default_loc_pref`` | integer | Configures the default local preference value in VRF | os10 | +| ``vrf.as_notation`` | string | Changes the AS number notation format in VRF | os10 | +| ``vrf.enforce_first_as`` | boolean | Configures the first AS for eBGP routes in VRF | os10 | +| ``vrf.non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm in VRF | os10 | +| ``vrf.outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members in VRF | os10 | +| ``vrf.route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) in VRF | os10 | +| ``vrf.route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection in VRF | os10 | +| ``vrf.route_reflector.cluster_id`` | string | Configures the route-reflector cluster-id in VRF | os10 | +| ``vrf.address_family_ipv4`` | dictionary | Configures IPv4 address family parameters in VRF (see ``address_family_ipv4.*``) in VRF | os10 | +| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) in VRF | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address in VRF | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true in VRF | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent in VRF | os10 | +| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPV4 address family | os10 | +| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPV4 address family | os10 | +| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPV4 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``vrf.address_family_ipv6`` | dictionary | Configures IPv6 address family parameters in VRF (see ``address_family_ipv6.*``) | os10 | +| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 | +| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 | +| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 | +| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 | +| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 | +| ``dampening.value`` | dictionary | Configures dampening values ( format; default 15 750 2000 60) | os10 | +| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 | +| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 | +| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 | +| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPv6 address family | os10 | +| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 | +| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 | +| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv6 address family ( format; distance bgp 2 3 4) | os10 | +| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 | +| ``vrf.best_path`` | list | Configures the default best-path selection in VRF (see ``best_path.*``) | os10 | +| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 | +| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 | +| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 | +| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 | +| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 | +| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 | +| ``vrf.ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 | +| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 | +| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 | +| ``vrf.ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 | +| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 | +| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 | +| ``vrf.neighbor`` | list | Configures IPv4 BGP neighbors in VRF (see ``neighbor.*``) | os10 | +| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 | +| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 | +| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 | +| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 | +| ``neighbor.auto_peer`` |string: unnumbered-auto | Enable auto-discovery of neighbors | os10 | +| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 | +| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 | +| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 | +| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 | +| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 | +| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 | +| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 | +| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 | +| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 | +| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 | +| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 | +| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 | +| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 | +| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6 EVPN address family command mode on the BGP neighbor | os10 | +| ``address_family.activate`` | boolean | Configures activation or deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 | +| ``address_family.sender_loop_detect`` | boolean | Enables or disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6 l2vpn address family | os10 | +| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 | +| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 | +| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 | +| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both ', 'send ', 'receive')| os10 | +| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 | +| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 | +| ``route_map.filter`` | string | Configures the filter for routing updates | os10 | +| ``route_map.state`` | string, choices: absent,present* | Deletes the route-map of the BGP neighbor if set to absent | os10 | +| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.*``) | os10 | +| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 | +| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 | +| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 | +| ``max_prefix.warning``| boolean | Configures a warning without dropping session when maximum limit exceeds if set to true | os10| +| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 | +| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 | +| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 | +| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 | +| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 | +| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer-group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 | +| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 | +| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 | +| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 | +| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 | +| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 | +| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 | +| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 | +| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of the neighbor | os10 | +| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 | +| ``neighbor.remove_pri_as`` | string: absent,present | Removes private AS number from outbound updates | os10 | +| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 | +| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 | +| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 | +| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 | +| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 | +| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 | +| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 | +| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 | +| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 | +| ``neighbor.description`` | string | Configures neighbor description | os10 | +| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 | +| ``vrf.redistribute`` | list | Configures the redistribute list to get information from other routing protocols in VRF (see ``redistribute.*``) | os10 | +| ``redistribute.route_type`` | string (required): static,connected,imported_bgp | Configures the name of the routing protocol to redistribute | os10 | +| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 | +| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 | +| ``redistribute.ospf_id`` | string | Configures the redistribute ospf | os10 | +| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 | +| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 | +| ``vrf.bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors in VRF ((see ``bfd_all_neighbors.*``))| os10 | +| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 | +| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 | +| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 | +| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 | +| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 | +| ``vrf.state`` | string: absent,present\* | Deletes the VRF instance under router BGP if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_bgp* role to configure the BGP network and neighbors. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_bgp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_bgp: + asn: 12 + router_id: 90.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: true + fast_ext_fallover: false + always_compare_med: true + default_loc_pref: 1000 + as_notation: asdot + enforce_first_as: false + non_deterministic_med: true + outbound_optimization: true + confederation: + identifier: 25 + peers: 23 24 + peers_state: present + route_reflector: + client_to_client: false + cluster_id: 4294967295 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + summary_only: true + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + default_metric: 10 + distance_bgp: + value: 3 4 6 + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + address_family: + - type: ipv4 + activate: false + state: present + max_prefix: + count: 20 + threshold: 90 + warning: true + state: present + listen: + - subnet: 4.4.4.4/32 + limit: 4 + subnet_state: present + - subnet: 20::/64 + limit: 4 + subnet_state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2-spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + password: bgppassword + route_reflector_client: true + adv_start: 100 + adv_start_state: present + conn_retry_timer: 20 + remove_pri_as: present + src_loopback: 0 + address_family: + - type: ipv4 + activate: true + state: present + max_prefix: + count: 10 + threshold: 40 + warning: true + state: present + default_originate: + route_map: aa + state: present + distribute_list: + in: XX + in_state: present + out: YY + out_state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: absent + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + sender_loop_detect: true + password: bgppassword + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + activate: true + sender_loop_detect: false + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + vrfs: + - name: "GREEN" + router_id: 50.1.1.1 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: false + fast_ext_fallover: false + always_compare_med: true + default_loc_pref: 1000 + route_reflector: + client_to_client: false + cluster_id: 1 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-worst + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan10 + description: U_site2 vlan + send_community: + - type: extended + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.20.1 + name: peer1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + route_reflector_client: true + src_loopback: 0 + address_family: + - type: ipv4 + activate: false + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.15.3 + address_family: + - type: ipv4 + activate: false + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: present + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + address_family: + - type: ipv4 + activate: false + sender_loop_detect: false + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + - route_type: connected + route_map_name: bb + address_type: ipv4 + state: present + - route_type: l2vpn + route_map_name: cc + address_type: ipv4 + state: present + - route_type: imported_bgp + imported_bgp_vrf_name: test6 + route_map_name: dd + address_type: ipv4 + state: present + - route_type: ospf + ospf_id: 12 + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + state: present + state: present + + +**Simple playbook to configure BGP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_bgp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml new file mode 100644 index 00000000..0b8cbfc8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_bgp diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml new file mode 100644 index 00000000..b0141ca3 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_bgp diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml new file mode 100644 index 00000000..047c70dc --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_bgp role facilitates the configuration of BGP attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml new file mode 100644 index 00000000..05c44354 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating BGP configuration for os10" + template: + src: os10_bgp.j2 + dest: "{{ build_dir }}/bgp10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning BGP configuration for os10" + os10_config: + src: os10_bgp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j2 b/ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j2 new file mode 100644 index 00000000..d4859eba --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j2 @@ -0,0 +1,1244 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{########################################### +Purpose: +Configure BGP commands for os10 Devices +os10_bgp: + asn: 12 + router_id: 90.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: true + fast_ext_fallover: false + always_compare_med: true + default_loc_pref: 1000 + confederation: + identifier: 25 + peers: 23 24 + peers_state: present + route_reflector: + client_to_client: false + cluster_id: 4294967295 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + summary_only: true + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2-spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + password: bgppassword + route_reflector_client: true + src_loopback: 0 + address_family: + - type: ipv4 + activate: true + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + max_prefix: + - count: 10 + threshold: 80 + warning: true + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: absent + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + sender_loop_detect: true + password: bgppassword + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + activate: true + sender_loop_detect: false + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + vrfs : + - name: "GREEN" + router_id: 1.1.1.1 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + summary_only: true + state: present + neighbor: + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2-spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + password: bgppassword + route_reflector_client: true + src_loopback: 0 + address_family: + - type: ipv4 + activate: true + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: absent + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + sender_loop_detect: true + password: bgppassword + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + activate: true + sender_loop_detect: false + state: present + send_community: + - type: standard + state: present + admin: up + state: present + state: present + state: present +################################} +{% macro render_default_metric_configs(af_vars) %} + {% if af_vars.default_metric is defined %} + {% if af_vars.default_metric %} + default-metric {{ af_vars.default_metric }} + {% else %} + no default-metric + {% endif %} + {% endif %} +{% endmacro %} + +{% macro render_outbound_optimization_configs(out_opt_vars) %} + {% if out_opt_vars %} + outbound-optimization + {% else %} + outbound-optimization + {% endif %} +{% endmacro %} + +{% macro render_non_deterministic_med_configs(non_deter_med_vars) %} + {% if non_deter_med_vars %} + non-deterministic-med + {% else %} + no non-deterministic-med + {% endif %} +{% endmacro %} + +{% macro render_enforce_first_as_configs(enforce_first_as_vars) %} + {% if enforce_first_as_vars %} + enforce-first-as + {% else %} + no enforce-first-as + {% endif %} +{% endmacro %} + +{% macro render_as_notation_configs(as_vars) %} + {% if as_vars %} + as-notation {{ as_vars }} + {% endif %} +{% endmacro %} + +{% macro render_neigh_configs(neigh_vars,indent_space) %} + {% if neigh_vars is defined and neigh_vars %} + {% for neighbor in neigh_vars %} + {% if neighbor.type is defined %} + {% if neighbor.type == "ipv4" or neighbor.type =="ipv6" %} + {% if neighbor.ip is defined and neighbor.ip %} + {% set tag_or_ip = neighbor.ip %} + {% if neighbor.state is defined and neighbor.state == "absent" %} + {{ indent_space }}no neighbor {{ tag_or_ip }} + {% else %} + {{ indent_space }}neighbor {{ tag_or_ip }} + {% if neighbor.description is defined %} + {% if neighbor.description %} + {{ indent_space }}description "{{ neighbor.description }}" + {% else %} + {{ indent_space }}no description + {% endif %} + {% endif %} + {% if neighbor.peergroup is defined and neighbor.peergroup %} + {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %} + {{ indent_space }}no inherit template {{ neighbor.peergroup }} + {% else %} + {{ indent_space }}inherit template {{ neighbor.peergroup }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if neighbor.interface is defined and neighbor.interface %} + {% set tag_or_ip = neighbor.interface %} + {% if neighbor.state is defined and neighbor.state == "absent" %} + {{ indent_space }}no neighbor interface {{ neighbor.interface }} + {% else %} + {{ indent_space }}neighbor interface {{ neighbor.interface }} + {% if neighbor.description is defined %} + {% if neighbor.description %} + {{ indent_space }}description "{{ neighbor.description }}" + {% else %} + {{ indent_space }}no description + {% endif %} + {% endif %} + {% if neighbor.admin is defined %} + {% if neighbor.admin == "up" %} + {{ indent_space }}no shutdown + {% else %} + {{ indent_space }}shutdown + {% endif %} + {% endif %} + {% if neighbor.peergroup is defined and neighbor.peergroup %} + {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %} + {{ indent_space }}no inherit template {{ neighbor.peergroup }} + {% elif neighbor.peergroup_type is defined %} + {{ indent_space }}inherit template {{ neighbor.peergroup }} inherit-type {{ neighbor.peergroup_type }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if neighbor.auto_peer is defined and neighbor.auto_peer %} + {% set tag_or_ip = neighbor.auto_peer %} + {% if neighbor.state is defined and neighbor.state == "absent" %} + {{ indent_space }}no neighbor {{ neighbor.auto_peer }} + {% else %} + {{ indent_space }}neighbor {{ neighbor.auto_peer }} + {% if neighbor.description is defined %} + {% if neighbor.description %} + {{ indent_space }}description "{{ neighbor.description }}" + {% else %} + {{ indent_space }}no description + {% endif %} + {% endif %} + {% if neighbor.admin is defined %} + {% if neighbor.admin == "up" %} + {{ indent_space }}no shutdown + {% else %} + {{ indent_space }}shutdown + {% endif %} + {% endif %} + {% if neighbor.ebgp_peergroup is defined and neighbor.ebgp_peergroup %} + {% if neighbor.ebgp_peergroup_state is defined and neighbor.ebgp_peergroup_state == "absent" %} + {{ indent_space }}no inherit ebgp-template {{ neighbor.ebgp_peergroup }} + {% else %} + {{ indent_space }}inherit ebgp-template {{ neighbor.ebgp_peergroup }} + {% endif %} + {% endif %} + {% if neighbor.ibgp_peergroup is defined and neighbor.ibgp_peergroup %} + {% if neighbor.ibgp_peergroup_state is defined and neighbor.ibgp_peergroup_state == "absent" %} + {{ indent_space }}no inherit ibgp-template {{ neighbor.ibgp_peergroup }} + {% else %} + {{ indent_space }}inherit ibgp-template {{ neighbor.ibgp_peergroup }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% elif neighbor.type == "peergroup" %} + {% if neighbor.name is defined and neighbor.name %} + {% set tag_or_ip = neighbor.name %} + {% if neighbor.state is defined and neighbor.state == "absent" %} + {{ indent_space }}no template {{ tag_or_ip }} + {% else %} + {{ indent_space }}template {{ tag_or_ip }} + {% if neighbor.description is defined %} + {% if neighbor.description %} + {{ indent_space }}description "{{ neighbor.description }}" + {% else %} + {{ indent_space }}no description + {% endif %} + {% endif %} + {% if neighbor.listen is defined and neighbor.listen %} + {% for item in neighbor.listen %} + {% if item.subnet is defined and item.subnet %} + {% if item.subnet_state is defined and item.subnet_state =="absent" %} + {{ indent_space }}no listen {{ item.subnet }} + {% else %} + {% set listen_str = item.subnet %} + {% if item.limit is defined and item.limit %} + {% set listen_str = listen_str ~ " limit " ~ item.limit %} + {% endif %} + {{ indent_space }}listen {{ listen_str }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} + {% endif %} + {% endif %} + {% if tag_or_ip is defined and tag_or_ip %} + {% if (neighbor.state is defined and not neighbor.state == "absent" ) or neighbor.state is not defined %} + {% if neighbor.remote_asn is defined and neighbor.remote_asn %} + {% if neighbor.remote_asn_state is defined and neighbor.remote_asn_state == "absent" %} + {{ indent_space }}no remote-as {{ neighbor.remote_asn }} + {% else %} + {{ indent_space }}remote-as {{ neighbor.remote_asn }} + {% endif %} + {% endif %} + {% if neighbor.local_as is defined %} + {% if neighbor.local_as %} + {{ indent_space }}local-as {{ neighbor.local_as }} + {% else %} + {{ indent_space }}no local-as + {% endif %} + {% endif %} + {% if neighbor.weight is defined %} + {% if neighbor.weight %} + {{ indent_space }}weight {{ neighbor.weight }} + {% else %} + {{ indent_space }}no weight + {% endif %} + {% endif %} + {% if neighbor.src_loopback is defined %} + {% if neighbor.src_loopback|int(-1) != -1 %} + {{ indent_space }}update-source loopback{{ neighbor.src_loopback }} + {% else %} + {{ indent_space }}no update-source loopback + {% endif %} + {% endif %} + + {% if neighbor.ebgp_multihop is defined %} + {% if neighbor.ebgp_multihop %} + {{ indent_space }}ebgp-multihop {{ neighbor.ebgp_multihop }} + {% else %} + {{ indent_space }}no ebgp-multihop + {% endif %} + {% endif %} + {% if neighbor.route_reflector_client is defined %} + {% if neighbor.route_reflector_client %} + {{ indent_space }}route-reflector-client + {% else %} + {{ indent_space }}no route-reflector-client + {% endif %} + {% endif %} + {% if neighbor.password is defined %} + {% if neighbor.password %} + {{ indent_space }}password {{ neighbor.password }} + {% else %} + {{ indent_space }}no password a + {% endif %} + {% endif %} + {% if neighbor.send_community is defined and neighbor.send_community %} + {% for comm in neighbor.send_community %} + {% if comm.type is defined and comm.type %} + {% if comm.state is defined and comm.state == "absent" %} + {{ indent_space }}no send-community {{ comm.type }} + {% else %} + {{ indent_space }}send-community {{ comm.type }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if neighbor.address_family is defined and neighbor.address_family %} + {% for af in neighbor.address_family %} + {% if af.type is defined and af.type %} + {% if af.state is defined and af.state == "absent" %} + {% if af.type == "l2vpn" %} + {{ indent_space }}no address-family {{ af.type }} evpn + {% else %} + {{ indent_space }}no address-family {{ af.type }} unicast + {% endif %} + {% else %} + {% if af.type == "l2vpn" %} + {{ indent_space }}address-family {{ af.type }} evpn + {% else %} + {{ indent_space }}address-family {{ af.type }} unicast + {% endif %} + {% if af.activate is defined %} + {% if af.activate %} + {{ indent_space }}activate + {% else %} + {{ indent_space }}no activate + {% endif %} + {% endif %} + {% if af.sender_loop_detect is defined %} + {% if af.sender_loop_detect %} + {{ indent_space }}sender-side-loop-detection + {% else %} + {{ indent_space }}no sender-side-loop-detection + {% endif %} + {% endif %} + {% if af.allow_as_in is defined %} + {% if af.allow_as_in %} + {{ indent_space }}allowas-in {{ af.allow_as_in }} + {% else %} + {{ indent_space }}no allowas-in + {% endif %} + {% endif %} + {% if af.route_map is defined and af.route_map %} + {% for item in af.route_map %} + {% if item.name is defined and item.name %} + {% if item.filter is defined and item.filter %} + {% if item.state is defined and item.state == "absent" %} + {{ indent_space }}no route-map {{ item.name }} {{ item.filter }} + {% else %} + {{ indent_space }}route-map {{ item.name }} {{ item.filter }} + {% endif%} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if af.next_hop_self is defined %} + {% if af.next_hop_self %} + {{ indent_space }}next-hop-self + {% else %} + {{ indent_space }}no next-hop-self + {% endif %} + {% endif %} + {% if af.soft_reconf is defined %} + {% if af.soft_reconf %} + {{ indent_space }}soft-reconfiguration inbound + {% else %} + {{ indent_space }}no soft-reconfiguration inbound + {% endif %} + {% endif %} + {% if af.add_path is defined %} + {% if af.add_path %} + {{ indent_space }}add-path {{ af.add_path }} + {% else %} + {{ indent_space }}no add-path + {% endif %} + {% endif %} + {% if af.max_prefix is defined %} + {% if af.max_prefix.count is defined and af.max_prefix.count %} + {% if af.max_prefix.state is defined and af.max_prefix.state == "absent" %} + {{ indent_space }}no maximum-prefix {{ af.max_prefix.count }} + {% else %} + {% set max_pfrx_str = af.max_prefix.count %} + {% if af.max_prefix.threshold is defined and af.max_prefix.threshold %} + {% set max_pfrx_str = max_pfrx_str ~ " " ~ af.max_prefix.threshold %} + {% endif %} + {% if af.max_prefix.warning is defined and af.max_prefix.warning %} + {% set max_pfrx_str = max_pfrx_str ~ " warning-only" %} + {% endif %} + {{ indent_space }}maximum-prefix {{ max_pfrx_str }} + {% endif %} + {% endif %} + {% endif %} + {% if af.default_originate is defined %} + {% if af.default_originate.state is defined and af.default_originate.state == "absent" %} + {{ indent_space }}no default-originate + {% else %} + {% set def_origin_str = " " %} + {% if af.default_originate.route_map is defined and af.default_originate.route_map %} + {% set def_origin_str = "route-map " ~ af.default_originate.route_map %} + {% endif %} + {{ indent_space }}default-originate {{ def_origin_str }} + {% endif %} + {% endif %} + {% if af.distribute_list is defined and af.distribute_list %} + {% if af.distribute_list.in is defined and af.distribute_list.in %} + {% if af.distribute_list.in_state is defined and af.distribute_list.in_state == "absent" %} + {{ indent_space }}no distribute-list {{ af.distribute_list.in }} in + {% else %} + {{ indent_space }}distribute-list {{ af.distribute_list.in }} in + {% endif %} + {% endif %} + {% if af.distribute_list.out is defined and af.distribute_list.out %} + {% if af.distribute_list.out_state is defined and af.distribute_list.out_state == "absent" %} + {{ indent_space }}no distribute-list {{ af.distribute_list.out }} out + {% else %} + {{ indent_space }}distribute-list {{ af.distribute_list.out }} out + {% endif %} + {% endif %} + {% endif %} + + + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if neighbor.adv_interval is defined %} + {% if neighbor.adv_interval %} + {{ indent_space }}advertisement-interval {{ neighbor.adv_interval }} + {% else %} + {{ indent_space }}no advertisement-interval + {% endif %} + {% endif %} + {% if neighbor.adv_start is defined and neighbor.adv_start >= 0 %} + {% if neighbor.adv_start_state is defined and neighbor.adv_start_state == "absent" %} + {{ indent_space }}no advertisement-start + {% else %} + {{ indent_space }}advertisement-start {{ neighbor.adv_start }} + {% endif %} + {% endif %} + {% if neighbor.conn_retry_timer is defined %} + {% if neighbor.conn_retry_timer %} + {{ indent_space }}connection-retry-timer {{ neighbor.conn_retry_timer }} + {% else %} + {{ indent_space }}no connection-retry-timer + {% endif %} + {% endif %} + {% if neighbor.remove_pri_as is defined and neighbor.remove_pri_as == "present" %} + {{ indent_space }}remove-private-as + {% elif neighbor.remove_pri_as is defined and neighbor.remove_pri_as == "absent" %} + {{ indent_space }}no remove-private-as + {% endif %} + {% if neighbor.fall_over is defined and neighbor.fall_over == "present" %} + {{ indent_space }}fall-over + {% elif neighbor.fall_over is defined and neighbor.fall_over == "absent" %} + {{ indent_space }}no fall-over + {% endif %} + {% if neighbor.bfd is defined and neighbor.bfd %} + {{ indent_space }}bfd + {% elif neighbor.bfd is defined and not neighbor.bfd %} + {{ indent_space }}no bfd + {% endif %} + {% if neighbor.timer is defined %} + {% if neighbor.timer %} + {{ indent_space }}timers {{ neighbor.timer }} + {% else %} + {{ indent_space }}no timers + {% endif %} + {% endif %} + {% if neighbor.admin is defined %} + {% if neighbor.admin == "up" %} + {{ indent_space }}no shutdown + {% else %} + {{ indent_space }}shutdown + {% endif %} + {% endif %} + {% if neighbor.distribute_list is defined and neighbor.distribute_list %} + {{ indent_space }}address-family ipv4 unicast + {% if neighbor.distribute_list.in is defined and neighbor.distribute_list.in %} + {% if neighbor.distribute_list.in_state is defined and neighbor.distribute_list.in_state == "absent" %} + {{ indent_space }}no distribute-list {{ neighbor.distribute_list.in }} in + {% else %} + {{ indent_space }}distribute-list {{ neighbor.distribute_list.in }} in + {% endif %} + {% endif %} + {% if neighbor.distribute_list.out is defined and neighbor.distribute_list.out %} + {% if neighbor.distribute_list.out_state is defined and neighbor.distribute_list.out_state == "absent" %} + {{ indent_space }}no distribute-list {{ neighbor.distribute_list.out }} out + {% else %} + {{ indent_space }}distribute-list {{ neighbor.distribute_list.out }} out + {% endif %} + {% endif %} + {% endif %} + {% if neighbor.sender_loop_detect is defined %} + {{ indent_space }}address-family ipv4 unicast + {% if neighbor.sender_loop_detect %} + {{ indent_space }}sender-side-loop-detection + {% else %} + {{ indent_space }}no sender-side-loop-detection + {% endif %} + {% endif %} + + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endmacro %} + +{% macro render_ibgp_redist_internal_configs(af_vars) %} + {% if af_vars.ibgp_redist_internal is defined and af_vars.ibgp_redist_internal%} + {% if af_vars.ibgp_redist_internal.state is defined and af_vars.ibgp_redist_internal.state == "absent" %} + no bgp redistribute-internal + {% else %} + bgp redistribute-internal + {% endif %} + {% endif %} +{% endmacro %} + +{% macro render_distance_bgp_configs(af_vars) %} + {% if af_vars.distance_bgp is defined and af_vars.distance_bgp %} + {% if af_vars.distance_bgp.state is defined and af_vars.distance_bgp.state == "absent" %} + no distance bgp + {% else %} + {% if af_vars.distance_bgp.value is defined and af_vars.distance_bgp.value %} + distance bgp {{ af_vars.distance_bgp.value }} + {% endif %} + {% endif %} + {% endif %} +{% endmacro %} + + +{% macro render_dampening_configs(af_vars) %} + {% if af_vars.dampening is defined and af_vars.dampening %} + {% if af_vars.dampening.state is defined and af_vars.dampening.state == "absent" %} + no dampening + {% else %} + {% if af_vars.dampening.value is defined and af_vars.dampening.value %} + {% if af_vars.dampening.route_map is defined and af_vars.dampening.route_map %} + dampening {{ af_vars.dampening.value }} route-map {{ af_vars.dampening.route_map }} + {% else %} + dampening {{ af_vars.dampening.value }} + {% endif %} + {% else %} + {% if af_vars.dampening.route_map is defined and af_vars.dampening.route_map %} + dampening 15 750 2000 60 route-map {{ af_vars.dampening.route_map }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} +{% endmacro %} + +{% macro render_af_configs(af_vars) %} + {% if af_vars is defined and af_vars %} + {% if af_vars.aggregate_address is defined and af_vars.aggregate_address %} + {% for addr in af_vars.aggregate_address %} + {% if addr.ip_and_mask is defined and addr.ip_and_mask %} + {% if addr.state is defined and addr.state == "absent" %} + no aggregate-address {{ addr.ip_and_mask }} + {% else %} + {% set aggr_str = addr.ip_and_mask %} + {% if addr.adv_map is defined and addr.adv_map %} + {% set aggr_str = aggr_str ~ " advertise-map " ~ addr.adv_map %} + {% endif %} + {% if addr.as_set is defined and addr.as_set %} + {% set aggr_str = aggr_str ~ " as-set " %} + {% endif %} + {% if addr.attr_map is defined and addr.attr_map %} + {% set aggr_str = aggr_str ~ " attribute-map " ~ addr.attr_map %} + {% endif %} + {% if addr.summary is defined and addr.summary %} + {% set aggr_str = aggr_str ~ " summary-only" %} + {% endif %} + {% if addr.suppress_map is defined and addr.suppress_map %} + {% set aggr_str = aggr_str ~ " suppress-map " ~ addr.suppress_map %} + {% endif %} + aggregate-address {{ aggr_str }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endmacro %} + +{% macro render_rtid_configs(routerid_vars) %} + {% if routerid_vars %} + router-id {{ routerid_vars }} + {% else %} + no router-id + {% endif %} +{% endmacro %} + +{% macro render_bfd_all_neigh_configs(bfd_all_neigh_vars) %} + {% if bfd_all_neigh_vars.state is defined and bfd_all_neigh_vars.state == "absent"%} + no bfd all-neighbors + {% else %} + {% set bfd_vars = bfd_all_neigh_vars %} + {% if bfd_vars.interval is defined and bfd_vars.min_rx is defined and bfd_vars.multiplier is defined %} + bfd all-neighbors interval {{ bfd_vars.interval }} min_rx {{ bfd_vars.min_rx }} multiplier {{ bfd_vars.multiplier }} role {{ bfd_vars.role }} + {% else %} + bfd all-neighbors + {% endif %} + {% endif %} +{% endmacro %} + +{% macro render_log_neigh_change_configs(log_neigh_change_vars) %} + {% if log_neigh_change_vars %} + log-neighbor-changes + {% else %} + no log-neighbor-changes + {% endif %} +{% endmacro %} + +{% macro render_maxpath_ebgp_configs(maxpath_ebgp_vars) %} + {% if maxpath_ebgp_vars %} + maximum-paths ebgp {{ maxpath_ebgp_vars }} + {% else %} + no maximum-paths ebgp + {% endif %} +{% endmacro %} + +{% macro render_maxpath_ibgp_configs(maxpath_ibgp_vars) %} + {% if maxpath_ibgp_vars %} + maximum-paths ibgp {{ maxpath_ibgp_vars }} + {% else %} + no maximum-paths ibgp + {% endif %} +{% endmacro %} + +{% macro render_graceful_restart_configs(graceful_restart_vars) %} + {% if graceful_restart_vars %} + graceful-restart role receiver-only + {% else %} + no graceful-restart role receiver-only + {% endif %} +{% endmacro %} + +{% macro render_always_compare_med_configs(always_compare_med_vars) %} + {% if always_compare_med_vars %} + always-compare-med + {% else %} + no always-compare-med + {% endif %} +{% endmacro %} + +{% macro render_default_loc_pref_configs(default_loc_pref_vars) %} + {% if default_loc_pref_vars %} + default local-preference {{ default_loc_pref_vars }} + {% else %} + no default local-preference + {% endif %} +{% endmacro %} + +{% macro render_fast_ext_fallover_configs(fast_ext_fallover_vars) %} + {% if fast_ext_fallover_vars %} + fast-external-fallover + {% else %} + no fast-external-fallover + {% endif %} +{% endmacro %} + +{% macro render_confederation_configs(confederation_vars) %} + {% if confederation_vars.identifier is defined %} + {% if confederation_vars.identifier %} + confederation identifier {{ confederation_vars.identifier }} + {% else %} + no confederation identifier 1 + {% endif %} + {% endif %} + {% if confederation_vars.peers is defined and confederation_vars.peers %} + {% if confederation_vars.peers_state is defined and confederation_vars.peers_state == "absent" %} + no confederation peers {{ confederation_vars.peers }} + {% else %} + confederation peers {{ confederation_vars.peers }} + {% endif %} + {% endif %} +{% endmacro %} + +{% macro render_route_reflector_client_configs(route_reflector_vars) %} + {% if route_reflector_vars.client_to_client is defined %} + {% if route_reflector_vars.client_to_client %} + client-to-client reflection + {% else %} + no client-to-client reflection + {% endif %} + {% endif %} +{% endmacro %} +{% macro render_route_reflector_cluster_configs(route_reflector_vars) %} + {% if route_reflector_vars.cluster_id is defined %} + {% if route_reflector_vars.cluster_id %} + cluster-id {{ route_reflector_vars.cluster_id }} + {% else %} + no cluster-id 1 + {% endif %} + {% endif %} +{% endmacro %} + +{% macro render_best_path_as_configs(best_path_vars) %} + {% if best_path_vars.as_path is defined and best_path_vars.as_path %} + {% if best_path_vars.as_path_state is defined and best_path_vars.as_path_state == "absent" %} + no bestpath as-path {{ best_path_vars.as_path }} + {% else %} + bestpath as-path {{ best_path_vars.as_path }} + {% endif %} + {% endif %} +{% endmacro %} +{% macro render_best_path_routerid_configs(best_path_vars) %} + {% if best_path_vars.ignore_router_id is defined %} + {% if best_path_vars.ignore_router_id %} + bestpath router-id ignore + {% else %} + no bestpath router-id ignore + {% endif %} + {% endif %} +{% endmacro %} +{% macro render_best_path_med_configs(best_path_vars,indent_space) %} + {% if best_path_vars.med is defined and best_path_vars.med %} + {% for med in best_path_vars.med %} + {% if med.attribute is defined and med.attribute %} + {% if med.state is defined and med.state == "absent" %} + {{ indent_space }}no bestpath med {{ med.attribute }} + {% else %} + {{ indent_space }}bestpath med {{ med.attribute }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endmacro %} + +{% macro render_ipv4_network_configs(ipv4_network_vars) %} + {% for net in ipv4_network_vars %} + {% if net.address is defined and net.address %} + {% if net.state is defined and net.state == "absent"%} + no network {{ net.address }} + {% else %} + network {{ net.address }} + {% endif %} + {% endif %} + {% endfor %} +{% endmacro %} + +{% macro render_ipv6_network_configs(ipv6_network_vars) %} + {% for net in ipv6_network_vars %} + {% if net.address is defined and net.address %} + {% if net.state is defined and net.state == "absent"%} + no network {{ net.address }} + {% else %} + network {{ net.address }} + {% endif %} + {% endif %} + {% endfor %} +{% endmacro %} + +{% macro render_redistribute_configs(redistribute_vars,indent_space) %} + {% for route in redistribute_vars %} + {% if route.route_type is defined and route.route_type %} + {% if route.address_type is defined and route.address_type %} + {{ indent_space }}address-family {{ route.address_type }} unicast + {% if route.state is defined and route.state == "absent" %} + {% if route.route_type == "imported_bgp" %} + {% set redist_str = "imported-bgp-routes vrf " ~ route.imported_bgp_vrf_name %} + {% elif route.route_type == "ospf" and route.ospf_id %} + {% set redist_str = route.route_type ~ " " ~ route.ospf_id %} + {% elif route.route_type == "l2vpn" %} + {% set redist_str = route.route_type ~ " evpn" %} + {% else %} + {% set redist_str = route.route_type %} + {% endif %} + {{ indent_space }}no redistribute {{ redist_str }} + {% else %} + {% if route.route_map_name is defined and route.route_map_name %} + {% if route.route_type == "imported_bgp" %} + {% set redist_str = "imported-bgp-routes vrf " ~ route.imported_bgp_vrf_name ~ " route-map " ~ route.route_map_name %} + {% elif route.route_type == "ospf" and route.ospf_id %} + {% set redist_str = route.route_type ~ " " ~ route.ospf_id ~ " route-map " ~ route.route_map_name %} + {% elif route.route_type == "l2vpn" %} + {% set redist_str = route.route_type ~ " evpn route-map " ~ route.route_map_name %} + {% else %} + {% set redist_str = route.route_type ~ " route-map " ~ route.route_map_name %} + {% endif %} + {{ indent_space }}redistribute {{ redist_str }} + {% else %} + {% if route.route_type == "imported_bgp" %} + {% set redist_str = "imported-bgp-routes vrf " ~ route.imported_bgp_vrf_name %} + {% elif route.route_type == "ospf" and route.ospf_id %} + {% set redist_str = route.route_type ~ " " ~ route.ospf_id %} + {% elif route.route_type == "l2vpn" %} + {% set redist_str = route.route_type ~ " evpn" %} + {% else %} + {% set redist_str = route.route_type %} + {% endif %} + {{ indent_space }}redistribute {{ redist_str }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endmacro %} + +{% if os10_bgp is defined and os10_bgp %} +{% set bgp_vars = os10_bgp %} +{% if bgp_vars.state is defined and bgp_vars.state == "absent" %} +no router bgp +{% else %} + {# Add Feature to the switch #} + {% if bgp_vars.asn is defined and bgp_vars.asn %} +router bgp {{ bgp_vars.asn }} + {% set indent_space = "" %} + {% if bgp_vars.router_id is defined %} + {% set routerid_vars = bgp_vars.router_id %} +{{ render_rtid_configs(routerid_vars) }} + {% endif %} + + {% if bgp_vars.as_notation is defined %} + {% set as_vars = bgp_vars.as_notation %} +{{ render_as_notation_configs(as_vars) }} + {% endif %} + + {% if bgp_vars.enforce_first_as is defined %} + {% set enforce_first_as_vars = bgp_vars.enforce_first_as %} +{{ render_enforce_first_as_configs(enforce_first_as_vars) }} + {% endif %} + + {% if bgp_vars.non_deterministic_med is defined %} + {% set non_deter_med_vars = bgp_vars.non_deterministic_med %} +{{ render_non_deterministic_med_configs(non_deter_med_vars) }} + {% endif %} + + {% if bgp_vars.outbound_optimization is defined %} + {% set out_opt_vars = bgp_vars.outbound_optimization %} +{{ render_outbound_optimization_configs(out_opt_vars) }} + {% endif %} + + {% if bgp_vars.bfd_all_neighbors is defined and bgp_vars.bfd_all_neighbors is defined %} + {% set bfd_all_neigh_vars = bgp_vars.bfd_all_neighbors %} +{{ render_bfd_all_neigh_configs(bfd_all_neigh_vars) }} + {% endif %} + + {% if bgp_vars.log_neighbor_changes is defined %} + {% set log_neigh_change_vars = bgp_vars.log_neighbor_changes %} +{{ render_log_neigh_change_configs(log_neigh_change_vars) }} + {% endif %} + + {% if bgp_vars.maxpath_ebgp is defined %} + {% set maxpath_ebgp_vars = bgp_vars.maxpath_ebgp %} +{{ render_maxpath_ebgp_configs(maxpath_ebgp_vars) }} + {% endif %} + + {% if bgp_vars.maxpath_ibgp is defined %} + {% set maxpath_ibgp_vars = bgp_vars.maxpath_ibgp %} +{{ render_maxpath_ibgp_configs(maxpath_ibgp_vars) }} + {% endif %} + + {% if bgp_vars.graceful_restart is defined %} + {% set graceful_restart_vars = bgp_vars.graceful_restart %} +{{ render_graceful_restart_configs(graceful_restart_vars) }} + {% endif %} + + {% if bgp_vars.always_compare_med is defined %} + {% set always_compare_med_vars = bgp_vars.always_compare_med %} +{{ render_always_compare_med_configs(always_compare_med_vars) }} + {% endif %} + + {% if bgp_vars.default_loc_pref is defined %} + {% set default_loc_pref_vars = bgp_vars.default_loc_pref %} +{{ render_default_loc_pref_configs(default_loc_pref_vars) }} + {% endif %} + + {% if bgp_vars.fast_ext_fallover is defined %} + {% set fast_ext_fallover_vars = bgp_vars.fast_ext_fallover %} +{{ render_fast_ext_fallover_configs(fast_ext_fallover_vars) }} + {% endif %} + + {% if bgp_vars.confederation is defined and bgp_vars.confederation %} + {% set confederation_vars = bgp_vars.confederation %} +{{ render_confederation_configs(confederation_vars) }} + {% endif %} + + {% if bgp_vars.route_reflector is defined and bgp_vars.route_reflector %} + {% set route_reflector_vars = bgp_vars.route_reflector %} +{{ render_route_reflector_client_configs(route_reflector_vars) }} +{{ render_route_reflector_cluster_configs(route_reflector_vars) }} + {% endif %} + + {% if bgp_vars.best_path is defined and bgp_vars.best_path %} + {% set best_path_vars = bgp_vars.best_path %} +{{ render_best_path_as_configs(best_path_vars) }} +{{ render_best_path_routerid_configs(best_path_vars) }} +{{ render_best_path_med_configs(best_path_vars,indent_space) }} + {% endif %} + + {% if bgp_vars.address_family_ipv4 is defined and bgp_vars.address_family_ipv4 %} + {% set af_vars = bgp_vars.address_family_ipv4 %} + address-family ipv4 unicast +{{ render_af_configs(af_vars) }} +{{ render_ibgp_redist_internal_configs(af_vars) }} +{{ render_dampening_configs(af_vars) }} +{{ render_default_metric_configs(af_vars) }} +{{ render_distance_bgp_configs(af_vars) }} + {% endif %} + + {% if bgp_vars.address_family_ipv6 is defined and bgp_vars.address_family_ipv6 %} + {% set af_vars = bgp_vars.address_family_ipv6 %} + address-family ipv6 unicast +{{ render_af_configs(af_vars) }} +{{ render_ibgp_redist_internal_configs(af_vars) }} +{{ render_dampening_configs(af_vars) }} +{{ render_default_metric_configs(af_vars) }} +{{ render_distance_bgp_configs(af_vars) }} + {% endif %} + + {% if bgp_vars.ipv4_network is defined and bgp_vars.ipv4_network %} + {% set ipv4_network_vars = bgp_vars.ipv4_network %} + address-family ipv4 unicast +{{ render_ipv4_network_configs(ipv4_network_vars) }} + {% endif %} + + {% if bgp_vars.ipv6_network is defined and bgp_vars.ipv6_network %} + {% set ipv6_network_vars = bgp_vars.ipv6_network %} + address-family ipv6 unicast +{{ render_ipv6_network_configs(ipv6_network_vars) }} + {% endif %} + + {% if bgp_vars.redistribute is defined and bgp_vars.redistribute %} + {% set redistribute_vars = bgp_vars.redistribute %} +{{ render_redistribute_configs(redistribute_vars,indent_space) }} + {% endif %} + + {% if bgp_vars.neighbor is defined and bgp_vars.neighbor %} + {% set neigh_vars = bgp_vars.neighbor %} +{{ render_neigh_configs(neigh_vars,indent_space) }} + {% endif %} + + {% if bgp_vars.vrfs is defined %} + {% set indent_space = " " %} + {% for vrf in bgp_vars.vrfs %} + {% if vrf.state is defined and vrf.state == "absent" %} + no vrf {{ vrf.name }} + {% else %} + vrf {{ vrf.name }} + + {% if vrf.router_id is defined %} + {% set routerid_vars = vrf.router_id %} + {{ render_rtid_configs(routerid_vars) }} + {% endif %} + + {% if vrf.as_notation is defined %} + {% set as_vars = vrf.as_notation %} + {{ render_as_notation_configs(as_vars) }} + {% endif %} + + {% if vrf.enforce_first_as is defined %} + {% set enforce_first_as_vars = vrf.enforce_first_as %} + {{ render_enforce_first_as_configs(enforce_first_as_vars) }} + {% endif %} + + {% if vrf.non_deterministic_med is defined %} + {% set non_deter_med_vars = vrf.non_deterministic_med %} + {{ render_non_deterministic_med_configs(non_deter_med_vars) }} + {% endif %} + + {% if vrf.outbound_optimization is defined %} + {% set out_opt_vars = vrf.outbound_optimization %} + {{ render_outbound_optimization_configs(out_opt_vars) }} + {% endif %} + + {% if vrf.bfd_all_neighbors is defined and vrf.bfd_all_neighbors is defined %} + {% set bfd_all_neigh_vars = vrf.bfd_all_neighbors %} + {{ render_bfd_all_neigh_configs(bfd_all_neigh_vars) }} + {% endif %} + + {% if vrf.log_neighbor_changes is defined %} + {% set log_neigh_change_vars = vrf.log_neighbor_changes %} + {{ render_log_neigh_change_configs(log_neigh_change_vars) }} + {% endif %} + + {% if vrf.maxpath_ebgp is defined %} + {% set maxpath_ebgp_vars = vrf.maxpath_ebgp %} + {{ render_maxpath_ebgp_configs(maxpath_ebgp_vars) }} + {% endif %} + + {% if vrf.maxpath_ibgp is defined %} + {% set maxpath_ibgp_vars = vrf.maxpath_ibgp %} + {{ render_maxpath_ibgp_configs(maxpath_ibgp_vars) }} + {% endif %} + + {% if vrf.graceful_restart is defined %} + {% set graceful_restart_vars = vrf.graceful_restart %} + {{ render_graceful_restart_configs(graceful_restart_vars) }} + {% endif %} + + {% if vrf.always_compare_med is defined %} + {% set always_compare_med_vars = vrf.always_compare_med %} + {{ render_always_compare_med_configs(always_compare_med_vars) }} + {% endif %} + + {% if vrf.default_loc_pref is defined %} + {% set default_loc_pref_vars = vrf.default_loc_pref %} + {{ render_default_loc_pref_configs(default_loc_pref_vars) }} + {% endif %} + + {% if vrf.fast_ext_fallover is defined %} + {% set fast_ext_fallover_vars = vrf.fast_ext_fallover %} + {{ render_fast_ext_fallover_configs(fast_ext_fallover_vars) }} + {% endif %} + + {% if vrf.route_reflector is defined and vrf.route_reflector %} + {% set route_reflector_vars = vrf.route_reflector %} + {{ render_route_reflector_client_configs(route_reflector_vars) }} + {{ render_route_reflector_cluster_configs(route_reflector_vars) }} + {% endif %} + + {% if vrf.best_path is defined and vrf.best_path %} + {% set best_path_vars = vrf.best_path %} + {{ render_best_path_as_configs(best_path_vars) }} + {{ render_best_path_routerid_configs(best_path_vars) }} +{{ render_best_path_med_configs(best_path_vars,indent_space) }} + {% endif %} + + {% if vrf.address_family_ipv4 is defined and vrf.address_family_ipv4 %} + {% set af_vars = vrf.address_family_ipv4 %} + address-family ipv4 unicast + {{ render_af_configs(af_vars) }} + {{ render_dampening_configs(af_vars) }} + {{ render_ibgp_redist_internal_configs(af_vars) }} + {{ render_default_metric_configs(af_vars) }} + {{ render_distance_bgp_configs(af_vars) }} + {% endif %} + + {% if vrf.address_family_ipv6 is defined and vrf.address_family_ipv6 %} + {% set af_vars = vrf.address_family_ipv6 %} + address-family ipv6 unicast + {{ render_af_configs(af_vars) }} + {{ render_dampening_configs(af_vars) }} + {{ render_ibgp_redist_internal_configs(af_vars) }} + {{ render_default_metric_configs(af_vars) }} + {{ render_distance_bgp_configs(af_vars) }} + {% endif %} + + {% if vrf.ipv4_network is defined and vrf.ipv4_network %} + {% set ipv4_network_vars = vrf.ipv4_network %} + address-family ipv4 unicast + {{ render_ipv4_network_configs(ipv4_network_vars) }} + {% endif %} + + {% if vrf.ipv6_network is defined and vrf.ipv6_network %} + {% set ipv6_network_vars = vrf.ipv6_network %} + address-family ipv6 unicast + {{ render_ipv6_network_configs(ipv6_network_vars) }} + {% endif %} + + {% if vrf.redistribute is defined and vrf.redistribute %} + {% set redistribute_vars = vrf.redistribute %} +{{ render_redistribute_configs(redistribute_vars,indent_space) }} + {% endif %} + + {% if vrf.neighbor is defined and vrf.neighbor %} + {% set neigh_vars = vrf.neighbor %} +{{ render_neigh_configs(neigh_vars,indent_space) }} + {% endif %} + + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml new file mode 100644 index 00000000..e556186d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml @@ -0,0 +1,384 @@ +--- +# vars file for dellemc.os10.os10_bgp, +# below gives a sample configuration +# Sample variables for OS10 device +os10_bgp: + asn: 12 + router_id: 90.1.1.4 + as_notation: asdot + enforce_first_as: false + non_deterministic_med: true + outbound_optimization: true + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: true + fast_ext_fallover: false + always_compare_med: true + default_loc_pref: 1000 + confederation: + identifier: 25 + peers: 23 24 + peers_state: present + route_reflector: + client_to_client: false + cluster_id: 4294967295 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + default_metric: 10 + distance_bgp: + value: 3 4 6 + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + address_family: + - type: ipv4 + activate: false + state: present + max_prefix: + count: 20 + threshold: 90 + warning: true + state: present + listen: + - subnet: 4.4.4.4/32 + limit: 4 + subnet_state: present + - subnet: 6.6.6.6/32 + limit: 3 + subnet_state: present + - subnet: 23::/64 + limit: + subnet_state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2-spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + password: bgppassword + route_reflector_client: true + adv_start: 100 + adv_start_state: present + conn_retry_timer: 20 + remove_pri_as: present + src_loopback: 0 + address_family: + - type: ipv4 + activate: true + state: present + max_prefix: + count: 30 + threshold: 50 + state: present + default_originate: + route_map: aa + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: absent + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + sender_loop_detect: true + password: bgppassword + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + activate: true + sender_loop_detect: false + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + vrfs: + - name: "test1" + router_id: 70.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: false + enforce_first_as: false + non_deterministic_med: true + outbound_optimization: true + fast_ext_fallover: false + always_compare_med: true + default_loc_pref: 1000 + route_reflector: + client_to_client: false + cluster_id: 2000 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + distance_bgp: + value: 3 4 6 + state: present + ibgp_redist_internal: + state: present + default_metric: 10 + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-worst + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + description: "template peer1" + adv_interval: 50 + adv_start: 100 + adv_start_state: present + ebgp_multihop: 20 + fall_over: present + conn_retry_timer: 20 + remove_pri_as: present + bfd: yes + address_family: + - type: ipv4 + state: present + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan10 + send_community: + - type: extended + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.10.1 + name: peer1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + route_reflector_client: true + src_loopback: 0 + address_family: + - type: ipv4 + activate: false + distribute_list: + in: dd + in_state: present + out: dd + out_state: present + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: false + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + default_originate: + route_map: aa + state: present + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ebgp_peergroup: ebgp_pg + ebgp_peergroup_state: present + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + address_family: + - type: ipv4 + activate: false + sender_loop_detect: false + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + default_originate: + route_map: dd + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + - route_type: connected + route_map_name: bb + address_type: ipv4 + state: present + - route_type: l2vpn + route_map_name: cc + address_type: ipv4 + state: present + - route_type: imported_bgp + imported_bgp_vrf_name: test2 + route_map_name: dd + address_type: ipv4 + state: present + - route_type: ospf + ospf_id: 12 + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + as_notation: asdot + state: present + - name: "test2" + router_id: 80.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + as_notation: asdot + state: present + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml new file mode 100644 index 00000000..fd5211f4 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_bgp diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml new file mode 100644 index 00000000..de9999bd --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_bgp diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE b/ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/README.md b/ansible_collections/dellemc/os10/roles/os10_copy_config/README.md new file mode 100644 index 00000000..eadefecb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/README.md @@ -0,0 +1,131 @@ +Copy-config role +================ + +This role is used to push the backup running configuration into a Dell EMC PowerSwitch platform running Dell EMC SmartFabric OS10, and merges the configuration in the template file with the running configuration of the device. + +The copy-config role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- No predefined variables are part of this role +- Use *host_vars* or *group_vars* as part of the template file +- Configuration file is host-specific +- Copy the host-specific configuration to the respective file under the template directory in *.j2* format +- Variables and values are case-sensitive + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_copy_config* role to push the configuration file into the device. It creates a *hosts* file with the switch details and corresponding variables. It writes a simple playbook that only references the *os10_copy_config* role. By including the role, you automatically get access to all of the tasks to push configuration file. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + + # This variable shall be applied in the below jinja template for each host by defining here + os10_bgp + asn: 64801 + +**Sample roles/os10_copy_config/templates/leaf1.j2** + + ! Leaf1 BGP profile on Dell OS10 switch + snmp-server community public ro + hash-algorithm ecmp crc + ! + interface ethernet1/1/1:1 + no switchport + ip address 100.1.1.2/24 + ipv6 address 2001:100:1:1::2/64 + mtu 9216 + no shutdown + ! + interface ethernet1/1/9:1 + no switchport + ip address 100.2.1.2/24 + ipv6 address 2001:100:2:1::2/64 + mtu 9216 + no shutdown + ! + router bgp {{ os10_bgp.asn }} + bestpath as-path multipath-relax + bestpath med missing-as-worst + router-id 100.0.2.1 + ! + address-family ipv4 unicast + ! + address-family ipv6 unicast + ! + neighbor 100.1.1.1 + remote-as 64901 + no shutdown + ! + neighbor 100.2.1.1 + remote-as 64901 + no shutdown + ! + neighbor 2001:100:1:1::1 + remote-as 64901 + no shutdown + ! + address-family ipv4 unicast + no activate + exit + ! + address-family ipv6 unicast + activate + exit + ! + neighbor 2001:100:2:1::1 + remote-as 64901 + no shutdown + ! + address-family ipv4 unicast + no activate + exit + ! + address-family ipv6 unicast + activate + exit + ! + +**Simple playbook to setup to push configuration file into device — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_copy_config + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml new file mode 100644 index 00000000..de0edc0c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_copy_config diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml new file mode 100644 index 00000000..e11a8805 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_copy_config diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml new file mode 100644 index 00000000..16658939 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + This role shall be used to push the backup running configuration into the device. + This role shall merge the configuration in the template file with the running configuration of the device + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml new file mode 100644 index 00000000..dd62a63f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# tasks file for dellemc.os10.os10_copy_config + - name: "Merge the config file to running configuration for OS10" + os10_config: + src: "{{ hostname }}.j2" + when: (ansible_network_os is defined and ansible_network_os== "dellemc.os10.os10") +# notify: save config os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j2 b/ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j2 new file mode 100644 index 00000000..b02686f5 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j2 @@ -0,0 +1,3 @@ +! Version 10.3.0E +! Last configuration change at March 09 21:47:35 2020 +! diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory new file mode 100644 index 00000000..85a255f9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory @@ -0,0 +1,2 @@ +--- +localhost diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml new file mode 100644 index 00000000..6c7b8039 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os10.os10_copy_config diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml new file mode 100644 index 00000000..9f021ecf --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_copy_config diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/LICENSE b/ansible_collections/dellemc/os10/roles/os10_dns/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/README.md b/ansible_collections/dellemc/os10/roles/os10_dns/README.md new file mode 100644 index 00000000..b65d7622 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/README.md @@ -0,0 +1,125 @@ +DNS role +======== + +This role facilitates the configuration of the domain name service (DNS). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The DNS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_dns keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``name_server`` | list | Configures DNS (see ``name_server.*``) | os10 | +| ``name_server.ip`` | list | Configures the name server IP | os10 | +| ``name_server.vrf`` | list | Configures VRF for each IP | os10 | +| ``name_server.state`` | string: absent,present\* | Deletes the name server IP if set to absent | os10 | +| ``domain_list`` | list | Configures domain-list (see ``domain_list.*``) | os10 | +| ``domain_list.name`` | list | Configures the domain-list name | os10 | +| ``domain_list.vrf`` | list | Configures VRF for each domain-list name | os10 | +| ``domain_list.state`` | string: absent,present\* | Deletes the domain-list if set to absent | os10 | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_dns* role to completely set up the DNS server configuration. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_dns* role. By including the role, you automatically get access to all of the tasks to configure DNS. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_dns: + domain_lookup: true + name_server: + - ip: + - 3.1.1.1 + - 3.1.1.2 + vrf: + - test + - test1 + - ip: + - 3.1.1.2 + vrf: + - test1 + state: absent + - ip: + - 2.2.2.2 + - ip: + - 3.3.2.2 + state: absent + domain_list: + - name: + - dname7 + - dname8 + vrf: + - test + - test1 + - name: + - dname7 + vrf: + - test + - test1 + state: absent + - name: + - dname3 + - dname4 + - name: + - dname5 + - dname6 + state: absent + +> **NOTE**: vrf should be present which can be configured using os10_vrf role + +**Simple playbook to setup DNS — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_dns + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml new file mode 100644 index 00000000..d826575e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_dns diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml new file mode 100644 index 00000000..a6cd5e69 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_dns diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml new file mode 100644 index 00000000..1f0baa16 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_dns role facilitates the configuration DNS attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml new file mode 100644 index 00000000..417ebacf --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for Dellos10 + + - name: "Generating DNS configuration for os10" + template: + src: os10_dns.j2 + dest: "{{ build_dir }}/dns10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning DNS configuration for os10" + os10_config: + src: os10_dns.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2 b/ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2 new file mode 100644 index 00000000..f381b3d0 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2 @@ -0,0 +1,101 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### +Purpose: +Configure DNS commands for OS10 devices +os10_dns: +os10_dns: + domain_lookup: true + name_server: + - ip: + - 3.1.1.1 + - 3.1.1.2 + vrf: + - test + - test1 + - ip: + - 3.1.1.2 + vrf: + - test1 + state: absent + - ip: + - 2.2.2.2 + - ip: + - 3.3.2.2 + state: absent + domain_list: + - name: + - dname7 + - dname8 + vrf: + - test + - test1 + - name: + - dname7 + vrf: + - test + - test1 + state: absent + - name: + - dname3 + - dname4 + - name: + - dname5 + - dname6 + state: absent +#####################################} +{% if (os10_dns is defined and os10_dns) %} + {% if (os10_dns.name_server is defined and os10_dns.name_server) %} + {% for name_server in os10_dns.name_server %} + {% set absent = "" %} + {% if name_server.state is defined and name_server.state == "absent" %} + {% set absent = "no " %} + {% endif %} + + {% set vrf_name_list = name_server.vrf %} + {% if (vrf_name_list is defined and vrf_name_list ) %} + {% for vrf_name in vrf_name_list %} + {% set ip_list = name_server.ip %} + {% if (ip_list is defined and ip_list ) %} + {% for ip_val in ip_list %} + {{ absent }}ip name-server vrf {{ vrf_name }} {{ ip_val }} + {% endfor %} + {% elif name_server.state is defined and name_server.state == "absent"%} + {{ absent }}ip name-server vrf {{ vrf_name }} + {% endif %} + {% endfor %} + {% else %} + {% set ip_list = name_server.ip %} + {% if (ip_list is defined and ip_list ) %} + {{ absent }}ip name-server {{ ip_list|join(' ') }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if (os10_dns.domain_list is defined and os10_dns.domain_list) %} + {% for domain in os10_dns.domain_list %} + {% set absent = "" %} + {% if domain.state is defined and domain.state == "absent" %} + {% set absent = "no " %} + {% endif %} + + {% set vrf_name_list = domain.vrf %} + {% if (vrf_name_list is defined and vrf_name_list ) %} + {% for vrf_name in vrf_name_list %} + {% set name_list = domain.name %} + {% if (name_list is defined and name_list ) %} + {% for name_val in name_list %} + {{ absent }}ip domain-list vrf {{ vrf_name }} {{ name_val }} + {% endfor %} + {% elif domain.state is defined and domain.state == "absent"%} + {{ absent }}ip domain-list vrf {{ vrf_name }} + {% endif %} + {% endfor %} + {% else %} + {% set name_list = domain.name %} + {% if (name_list is defined and name_list ) %} + {{ absent }}ip domain-list {{ name_list|join(' ') }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml new file mode 100644 index 00000000..6305318d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml @@ -0,0 +1,43 @@ +--- +# vars file for dellemc.os10.os10_dns, +# below gives a sample configuration +# Sample variables for OS10 device +os10_dns: + domain_lookup: true + name_server: + - ip: + - 3.1.1.1 + - 3.1.1.2 + vrf: + - test + - test1 + - ip: + - 3.1.1.2 + vrf: + - test1 + state: absent + - ip: + - 2.2.2.2 + - ip: + - 3.3.2.2 + state: absent + domain_list: + - name: + - dname7 + - dname8 + vrf: + - test + - test1 + - name: + - dname7 + vrf: + - test + - test1 + state: absent + - name: + - dname3 + - dname4 + - name: + - dname5 + - dname6 + state: absent diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml new file mode 100644 index 00000000..ab6aaca5 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_dns diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml new file mode 100644 index 00000000..19959956 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_dns diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/README.md b/ansible_collections/dellemc/os10/roles/os10_ecmp/README.md new file mode 100644 index 00000000..6932fdf6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/README.md @@ -0,0 +1,78 @@ +ECMP role +========= + +This role facilitates the configuration of equal cost multi-path (ECMP), and it supports the configuration of ECMP for IPv4. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The ECMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_ecmp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``ecmp_group_max_paths`` | integer | Configures the number of maximum paths per ECMP group | os10 | +| ``trigger_threshold`` | integer | Configures the number of link bundle utilization trigger threshold | os10 | + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_ecmp* role to configure ECMP for IPv4. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The example writes a simple playbook that only references the *os10_ecmp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_ecmp: + ecmp_group_max_paths: 3 + trigger_threshold: 50 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_ecmp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml new file mode 100644 index 00000000..406d1cfc --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_ecmp diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml new file mode 100644 index 00000000..24ccf4de --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_ecmp diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml new file mode 100644 index 00000000..f6448d4c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_ecmp role facilitates the configuration of ECMP group attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml new file mode 100644 index 00000000..012d4119 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os10 + + - name: "Generating ECMP configuration for os10" + template: + src: os10_ecmp.j2 + dest: "{{ build_dir }}/ecmp10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning ECMP configuration for os10" + os10_config: + src: os10_ecmp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j2 b/ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j2 new file mode 100644 index 00000000..6a0b04dd --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j2 @@ -0,0 +1,25 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### +Purpose: +Configure ECMP commands for OS10 devices +os10_ecmp: + ecmp_group_max_paths: 3 + trigger_threshold: 50 +#####################################} +{% if os10_ecmp is defined and os10_ecmp %} + {% if os10_ecmp.ecmp_group_max_paths is defined %} + {% if os10_ecmp.ecmp_group_max_paths %} +ip ecmp-group maximum-paths {{ os10_ecmp.ecmp_group_max_paths }} + {% else %} +no ip ecmp-group maximum-paths + {% endif %} + {% endif %} + {% if os10_ecmp.trigger_threshold is defined %} + {% if os10_ecmp.trigger_threshold %} +link-bundle-utilization trigger-threshold {{ os10_ecmp.trigger_threshold }} + {% else %} +no link-bundle-utilization trigger-threshold + {% endif %} + {% endif %} + +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml new file mode 100644 index 00000000..ff00dfd4 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml @@ -0,0 +1,7 @@ +--- +# vars file for dellemc.os10.os10_ecmp, +# below gives a sample configuration +# Sample variables for OS10 device +os10_ecmp: + ecmp_group_max_paths: 3 + trigger_threshold: 50 diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml new file mode 100644 index 00000000..2df95ee6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_ecmp diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml new file mode 100644 index 00000000..cfd6a141 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_ecmp diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md new file mode 100644 index 00000000..0ff99bf2 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md @@ -0,0 +1,119 @@ +os10_fabric_summary +===================================== +This role is used to get show system information of all devices in the fabric. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Fabric summary role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used | +| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Dependencies +------------ + +- *xmltodict* library should be installed to get show command output in dict format from XML +- To install the package use the *pip install xmltodict* command + +Example playbook +---------------- + +This example uses the *os10_fabric_summary* role to completely get the show attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name. + +The *os10_fabric_summary* role has a simple playbook that only references the *os10_fabric_summary* role. + +**Sample hosts file** + + site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + [spine] + site1-spine1 + site1-spine2 + site2-spine1 + site2-spine2 + [LeafAndSpineSwitch:children] + spine + +**Sample host_vars/site1-spine1** + + + cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + os10_cli_user: xxxx + os10_cli_pass: xxxx + ansible_network_os: dellemc.os10.os10 + +**Simple playbook to setup fabric summary — provision.yaml** + + --- + - name: show system summary command + hosts: localhost + gather_facts: False + connection: local + roles: + - os10_fabric_summary + +**Run** + + ansible-playbook -i hosts provision.yaml + +**Samaple Output** + + "results": [ + { + "device type": "S6010-ON", + "host": "10.11.180.21", + "hostname": "host3", + "inv_name": "site1-spine1", + "node-mac": "e4:f0:04:9b:e5:dc", + "service-tag": "D33FXC2", + "software-version": "10.4.9999EX" + }, + { + "device type": "S6010-ON", + "host": "10.11.180.22", + "hostname": "host22", + "inv_name": "site1-spine2", + "node-mac": "e4:f0:04:9b:eb:dc", + "service-tag": "J33FXC2", + "software-version": "10.4.9999EX" + }, + { + "device type": "S6010-ON", + "host": "10.11.180.24", + "hostname": "site2-spine1", + "inv_name": "site2-spine1", + "node-mac": "e4:f0:04:9b:ee:dc", + "service-tag": "343FXC2", + "software-version": "10.4.9999EX" + }, + { + "device type": "S6010-ON", + "host": "10.11.180.23", + "hostname": "site2-spine2", + "inv_name": "site2-spine2", + "node-mac": "e4:f0:04:9b:f1:dc", + "service-tag": "543FXC2", + "software-version": "10.4.9999EX" + } + ] + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml new file mode 100644 index 00000000..428d79f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + role_name: os10_fabric_summary + author: Dell EMC Networking Engineering + description: This role provides the system network information of all the switches in the fabric Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml new file mode 100644 index 00000000..784d6642 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: "Get Dell EMC OS10 Show system summary" + os10_command: + commands: ['show system | display-xml'] + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_system +- name: "set fact to form database" + set_fact: + output: "{{ output|default([])+ [{'inv_name': item.item, 'host': item.invocation.module_args.provider.host, 'stdout_show_system': item.stdout}] }}" + loop: "{{ show_system.results }}" +- name: "debug the output of system summary DB" + debug: var=output +- name: "show system network call to lib " + show_system_network_summary: + cli_responses: "{{ output }}" + output_type: "{{ output_method if output_method is defined else 'json' }}" + register: show_system_network_summary +- name: "debug the output of system summary DB" + debug: var=show_system_network_summary diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine1 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine1 new file mode 100644 index 00000000..36a99cdb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine1 @@ -0,0 +1,12 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 + diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine2 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine2 new file mode 100644 index 00000000..36a99cdb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine2 @@ -0,0 +1,12 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 + diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine1 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine1 new file mode 100644 index 00000000..36a99cdb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine1 @@ -0,0 +1,12 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 + diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine2 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine2 new file mode 100644 index 00000000..36a99cdb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine2 @@ -0,0 +1,12 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 + diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml new file mode 100644 index 00000000..ff511df5 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml @@ -0,0 +1,14 @@ +--- +site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 +site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 +site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 +site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + +[spine] +site1-spine1 +site1-spine2 +site2-spine1 +site2-spine2 + +[LeafAndSpineSwitch:children] +spine diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml new file mode 100644 index 00000000..1f450079 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml @@ -0,0 +1,11 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml new file mode 100644 index 00000000..e865c790 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml @@ -0,0 +1,7 @@ +--- +- name: setup for os10 fabric summary + hosts: localhost + gather_facts: False + connection: local + roles: + - dellemc.os10.os10_fabric_summary diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md new file mode 100644 index 00000000..dd98aa95 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md @@ -0,0 +1,152 @@ +ACL flow-based monitor role +=========================== + +This role facilitates configuring ACL flow-based monitoring attributes. Flow-based mirroring is a mirroring session in which traffic matches specified policies that are mirrored to a destination port. Port-based mirroring maintains a database that contains all monitoring sessions (including port monitor sessions). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The ACL flow-based role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os10_flow_monitor` (dictionary) with session ID key (in *session * format; 1 to 18) +- Variables and values are case-sensitive + +**session ID keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``session_type`` | string: local_*_,rspan-source,erspan-source | Configures the monitoring session type | os10 | +| ``description`` | string | Configures the monitor session description | os10 | +| ``port_match`` | list | Displays a list of interfaces with location source and destination | os10 | +| ``port_match.interface_name`` | string | Configures the interface | os10 | +| ``port_match.location`` | string: source,destination | Configures the source/destination of an interface | os10 | +| ``port_match.state`` | string: absent,present\* | Deletes the interface if set to absent | os10 | +| ``flow_based`` | boolean | Enables flow-based monitoring | os10 | +| ``shutdown`` | string: up,down\* | Enable/disables the monitoring session | os10 | +| ``state`` | string: absent,present\* | Deletes the monitoring session corresponding to the session ID if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_flow_monitor* role to configure session monitor configuration. It creates a *hosts* file with the switch details and corresponding variables. The hosts file defines the `anisble_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, the variable is set to false. +It writes a simple playbook that only references the *os10_flow_monitor* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + os10_flow_monitor: + session 1: + session_type: local + description: "Discription goes here" + port_match: + - interface_name: ethernet 1/1/4 + location: source + state: present + - interface_name: ethernet 1/1/3 + location: destination + state: present + flow_based: true + shutdown: up + state: present + session 2: + session_type: local + description: "Discription of session goes here" + port_match: + - interface_name: ethernet 1/1/6 + location: source + state: present + - interface_name: ethernet 1/1/7 + location: destination + state: present + flow_based: true + shutdown: up + state: present + session 3: + state: absent + os10_acl: + - name: testflow + type: ipv4 + description: testflow description + extended: true + entries: + - number: 5 + permit: true + protocol: icmp + source: any + destination: any + other_options: capture session 1 count + state: present + - number: 10 + permit: true + protocol: ip + source: 102.1.1.0/24 + destination: any + other_option: capture session 1 count byte + state: present + - number: 15 + permit: false + protocol: udp + source: any + destination: any + other_options: capture session 2 count byte + state: present + - number: 20 + permit: false + protocol: tcp + source: any + destination: any + other_options: capture session 2 count byte + state: present + stage_ingress: + - name: ethernet 1/1/1 + state: present + +> **NOTE**: Destination port should not be an L2/L3 port which can be configured using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_flow_monitor + - dellemc.os10.os10_acl + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml new file mode 100644 index 00000000..3cc17642 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_flow_monitor diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml new file mode 100644 index 00000000..91b1038e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_flow_moitor diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml new file mode 100644 index 00000000..c81fad54 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_flow_monitor role facilitates the configuration of ACL flow based monitor attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - os10 + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml new file mode 100644 index 00000000..b5bf0bc3 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for Dellos10 + - name: "Generating Flow monitor configuration for os10" + template: + src: os10_flow_monitor.j2 + dest: "{{ build_dir }}/flow_monitor10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning Flow monitor configuration for os10" + os10_config: + src: os10_flow_monitor.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j2 b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j2 new file mode 100644 index 00000000..535c6180 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j2 @@ -0,0 +1,86 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{########################################## +Purpose: +Configure Flow monitor commands for os10 Devices +os10_flow_monitor: + session 1: + session_type: local + description: "Discription goes here" + port_match: + - interface_name: ethernet 1/1/4 + location: source + state: present + - interface_name: ethernet 1/1/3 + location: destination + state: present + flow_based: true + shutdown: up + state: present + session 2: + session_type: local + description: "Discription of session goes here" + port_match: + - interface_name: ethernet 1/1/6 + location: source + state: present + - interface_name: ethernet 1/1/7 + location: destination + state: present + flow_based: false + shutdown: up + state: present + session 3: + state: absent +#########################################} +{% if os10_flow_monitor is defined and os10_flow_monitor %} +{% for key in os10_flow_monitor.keys() %} +{% set session_id = key.split(" ") %} +{% set session_vars = os10_flow_monitor[key] %} +{% set session_type = "" %} + +{% if session_vars.session_type is defined and session_vars.session_type != "local" %} +{% set session_type = "type " + session_vars.session_type %} +{% endif %} + + {% if session_vars.state is defined and session_vars.state == "absent" %} +no monitor session {{ session_id[1] }} + {% else %} +monitor session {{ session_id[1] }} {{ session_type }} + + {% if session_vars.description is defined and session_vars.description %} + description "{{ session_vars.description }}" + {% else %} + no description + {% endif %} + + {% if session_vars.port_match is defined and session_vars.port_match %} + {% for match_vars in session_vars.port_match %} + {% set negate = "" %} + {% if match_vars["state"] is defined and match_vars["state"] == "absent" %} + {% set negate = "no " %} + {% endif %} + {% set location = "source" %} + {% if match_vars["location"] is defined and match_vars["location"] == "destination" %} + {% set location = "destination" %} + {% endif %} + {{ negate }}{{ location }} interface {{ match_vars["interface_name"] }} + {% endfor %} + {% endif %} + + {% if session_vars.shutdown is defined and session_vars.shutdown == "up" %} + no shut + {% else %} + shut + {% endif %} + +{% if session_vars.flow_based is defined %} + {% if session_vars.flow_based %} + flow-based enable + {% else %} + no flow-based enable + {% endif %} +{% endif %} + +{% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml new file mode 100644 index 00000000..750932c8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml @@ -0,0 +1,33 @@ +--- +# vars file for dellemc.os10.os10_flow_monitor, +# below gives a example configuration +# Sample variables for OS10 device +os10_flow_monitor: + session 1: + session_type: local + description: "Discription goes here" + port_match: + - interface_name: ethernet 1/1/4 + location: source + state: present + - interface_name: ethernet 1/1/3 + location: destination + state: present + flow_based: true + shutdown: up + state: present + session 2: + session_type: local + description: "Discription of session goes here" + port_match: + - interface_name: ethernet 1/1/6 + location: source + state: present + - interface_name: ethernet 1/1/7 + location: destination + state: present + flow_based: false + shutdown: up + state: present + session 3: + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml new file mode 100644 index 00000000..44a56b7e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_flow_monitor diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml new file mode 100644 index 00000000..0943cd2c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_flow_monitor diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md new file mode 100644 index 00000000..9ae8f731 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md @@ -0,0 +1,73 @@ +Image upgrade role +=================================== + +This role facilitates upgrades or installation of a software image. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Image upgrade role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_image_upgrade keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``operation_type`` | string: cancel,install | Displays the type of image operation | os10 | +| ``software_image_url`` | string | Configures the URL path to the image file | os10 | +| ``software_version`` | string | Displays the software version of the image file | os10 | +| ``number_of_retries`` | int | Configures the numbe of retries to check the status of image install process | os10 | + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_image_upgrade* role to upgrade/install software image. It creates a *hosts* file with the switch details, corresponding *host_vars* file, and a simple playbook that references the *os10_image_upgrade* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + os10_image_upgrade: + operation_type: install + software_image_url: tftp://10.16.148.8/PKGS_OS10-Enterprise-10.2.9999E.5790-installer-x86_64.bin + software_version: 10.2.9999E + number_of_retries: 50 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_image_upgrade + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml new file mode 100644 index 00000000..809f7a43 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_image_upgrade diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml new file mode 100644 index 00000000..7bfc6bc7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_image_upgrade diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml new file mode 100644 index 00000000..b35a5382 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_image_upgrade role facilitates install/upgrade software image for OS10 switches + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml new file mode 100644 index 00000000..ee2d557d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml @@ -0,0 +1,37 @@ +--- + - block: + - name: "Process image {{ os10_image_upgrade.operation_type }} operation" + vars: + command_postfix: "{{ os10_image_upgrade.operation_type }} {{ '' if os10_image_upgrade.operation_type == 'cancel' else os10_image_upgrade.software_image_url }}" + os10_command: + commands: + - command: "image {{ command_postfix }}" + prompt: "yes/no]:" + answer: "yes" + register: result + - name: "Get image {{ os10_image_upgrade.operation_type }} operation status" + os10_command: + commands: "show image status" + register: image_status + - name: "Validate image status" + fail: msg="Image {{ os10_image_upgrade.operation_type }} operation Failed" + when: image_status.stdout.0.find("Failed") > 1 + - name: "Wait for image {{ os10_image_upgrade.operation_type }} operation" + os10_command: + commands: "show image status" + register: result + until: result.stdout.0.find("In progress") < 1 + retries: "{{ os10_image_upgrade.number_of_retries }}" + delay: 15 + - name: "Wait for image {{ os10_image_upgrade.operation_type }} operation" + os10_command: + commands: "show image status" + register: image_status + - name: "Validate software version" + fail: msg="Image {{ os10_image_upgrade.operation_type }} operation Failed" + when: image_status.stdout.0.find("Failed") > 1 + - name: "Image {{ os10_image_upgrade.operation_type }} status" + debug: msg="Image {{ os10_image_upgrade.operation_type }} successful" + when: image_status.stdout.0.find("Failed") <= -1 + when: + - os10_image_upgrade.operation_type == "install" diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory new file mode 100644 index 00000000..85a255f9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory @@ -0,0 +1,2 @@ +--- +localhost diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml new file mode 100644 index 00000000..66507dc7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml @@ -0,0 +1,9 @@ +--- +# vars file for dellemc.os10.os10_image_upgrade, +# below gives a example configuration + +os10_image_upgrade: + operation_type: install + software_image_url: http://10.16.127.7//tftpboot/NGOS/FMB-ar-rel_10.5.1-release/AmazonInstallers/last_good/PKGS_OS10-Enterprise-10.5.1.0.124stretch-installer-x86_64.bin + software_version: 10.5.1.0 + number_of_retries: 50 diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml new file mode 100644 index 00000000..a2ed1161 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os10.os10_image_upgrade diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml new file mode 100644 index 00000000..a9fa1154 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_image_upgrade, below gives a example configuration diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/LICENSE b/ansible_collections/dellemc/os10/roles/os10_interface/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/README.md b/ansible_collections/dellemc/os10/roles/os10_interface/README.md new file mode 100644 index 00000000..bbb4f8ee --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/README.md @@ -0,0 +1,178 @@ +Interface role +============== + +This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra, and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Interface role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os10_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name +- For physical interfaces, the interface name must be in * * format; for logical interfaces, the interface must be in * * format; physical interface name can be *ethernet 1/1/32* +- For interface ranges, the interface name must be in *range * format; *range ethernet 1/1/1-1/1/4* +- Logical interface names can be *vlan 1* or *port-channel 1* +- Variables and values are case-sensitive + +> **NOTE**: Only define supported variables for the interface type, and do not define the *switchport* variable for a logical interface. + +**interface name keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``desc`` | string | Configures a single line interface description | os10 | +| ``portmode`` | string | Configures port-mode according to the device type | os10 | +| ``switchport`` | boolean: true,false\* | Configures an interface in L2 mode | os10 | +| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os10 | +| ``mtu`` | integer | Configures the MTU size for L2 and L3 interfaces (1280 to 65535) | os10 | +| ``fanout`` | string:dual, single; string:10g-4x, 40g-1x, 25g-4x, 100g-1x, 50g-2x (os10) | Configures fanout to the appropriate value | os10 | +| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os10 | +| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os10 | +| ``ipv6_type_dynamic`` | boolean: true,false | Configures an IPv6 address for DHCP if set to true (*ipv6_and_mask* is ignored if set to true) | os10 | +| ``ipv6_autoconfig`` | boolean: true,false | Configures stateless configuration of IPv6 addresses if set to true (*ipv6_and_mask* is ignored if set to true) | os10 | +| ``vrf`` | string | Configures the specified VRF to be associated to the interface | os10 | +| ``min_ra`` | string | Configures RA minimum interval time period | os10 | +| ``max_ra`` | string | Configures RA maximum interval time period | os10 | +| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 | +| ``ipv6_and_mask`` | string | Configures a specified IPv6 address to the interface | os10 | +| ``virtual_gateway_ip`` | string | Configures an anycast gateway IP address for a VxLAN virtual network as well as VLAN interfaces| os10 | +| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 | +| ``state_ipv6`` | string: absent,present\* | Deletes the IPV6 address if set to absent | os10 | +| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os10 | +| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os10 | +| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os10 | +| ``flowcontrol`` | dictionary | Configures the flowcontrol attribute (see ``flowcontrol.*``) | os10 | +| ``flowcontrol.mode`` | string: receive,transmit | Configures the flowcontrol mode | os10 | +| ``flowcontrol.enable`` | string: on,off | Configures the flowcontrol mode on | os10 | +| ``flowcontrol.state`` | string: absent,present\* | Deletes the flowcontrol if set to absent | os10 | +| ``ipv6_bgp_unnum`` | dictionary | Configures the IPv6 BGP unnum attributes (see ``ipv6_bgp_unnum.*``) below | os10 | +| ``ipv6_bgp_unnum.state`` | string: absent,present\* | Disables auto discovery of BGP unnumbered peer if set to absent | os10 | +| ``ipv6_bgp_unnum.peergroup_type`` | string: ebgp,ibgp | Specifies the type of template to inherit from | os10 | + +| ``stp_rpvst_default_behaviour`` | boolean: false,true | Configures RPVST default behaviour of BPDU's when set to True which is default | os10 | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_interface* role to set up description, MTU, admin status, port mode, and switchport details for an interface. The example creates a *hosts* file with the switch details and orresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os10_interface* role. + +**Sample hosts file** + + leaf3 ansible_host= + +**Sample host_vars/leaf3** + + hostname: "leaf3" + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_interface: + ethernet 1/1/32: + desc: "Connected to Core 2" + mtu: 2500 + stp_rpvst_default_behaviour: False + portmode: + admin: up + switchport: False + ip_and_mask: + ip_type_dynamic: True + ipv6_type_dynamic: True + ethernet 1/1/12: + desc: "ipv6 auto config" + switchport: False + mtu: 2500 + admin: up + ipv6_autoconfig: True + ethernet 1/1/14: + fanout: 10g-4x + ethernet 1/1/13: + desc: "set ipv6 address" + switchport: False + admin: up + ipv6_and_mask: 2001:4898:5809:faa2::10/126 + state_ipv6: present + ethernet 1/1/1: + desc: "Connected to Leaf1" + portmode: "trunk" + switchport: True + suppress_ra: present + admin: up + stp_rpvst_default_behaviour: False + ethernet 1/1/3: + desc: site2-spine2 + ip_and_mask: 10.9.0.4/31 + mtu: 9216 + switchport: False + admin: up + flowcontrol: + mode: "receive" + enable: "on" + state: "present" + + vlan 100: + ip_and_mask: + ipv6_and_mask: 2001:4898:5808:ffaf::1/64 + state_ipv6: present + ip_helper: + - ip: 10.0.0.33 + state: present + admin: up + range ethernet 1/1/1-1/1/32: + mtu: 2500 + port-channel 10: + admin: up + switchport: False + suppress_ra: + stp_rpvst_default_behaviour: True + ipv6_bgp_unnum: + state: present + peergroup_type: ebgp + vlan 10: + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + admin: up + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf3 + roles: + - dellemc.os10.os10_interface + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml new file mode 100644 index 00000000..6f214632 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_interface diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml new file mode 100644 index 00000000..72e65874 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_interface diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml new file mode 100644 index 00000000..8f0bfd3b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_interface role facilitates the configuration of interface attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml new file mode 100644 index 00000000..c8656c51 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating interface configuration for os10" + template: + src: os10_interface.j2 + dest: "{{ build_dir }}/intf10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning interface configuration for os10" + os10_config: + src: os10_interface.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2 b/ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2 new file mode 100644 index 00000000..c4dc61b7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2 @@ -0,0 +1,258 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{##################################################### +Purpose: +Configure interface commands for os10 Devices. +os10_interface: + ethernet 1/1/31: + desc: "OS10 intf" + portmode: trunk + mtu: 2000 + switchport: False + admin: up + ip_type_dynamic: True + ip_and_mask: "192.168.11.1/24" + virtual_gateway_ip: "172.17.17.1" + suppress_ra: present + ipv6_autoconfig: True + ipv6_and_mask: 2001:4898:5808:ffa2::5/126 + state_ipv6: present + ip_helper: + - ip: 10.0.0.36 + state: present + flowcontrol: + mode: "receive" + enable: "on" + state: "present" + ethernet 1/1/3: + fanout: 10g-4x + range ethernet 1/1/1-1/1/4: + switchport: True + admin: down + stp_rpvst_default_behaviour: False + virtual-network 100: + vrf: "test" + ip_and_mask: "15.1.1.1/24" + virtual_gateway_ip: "15.1.1.254" + admin: up + port-channel 10: + admin: up + switchport: False + suppress_ra: + ipv6_bgp_unnum: + state: present + peergroup_type: ebgp + port-channel 20: + admin: up + stp_rpvst_default_behaviour: True + vlan 10: + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + admin: up + +#####################################################} +{% if os10_interface is defined and os10_interface %} +{% for key in os10_interface.keys() %} + {% set intf_vars = os10_interface[key] %} + {% set port = key.split(" ") %} + {% set interface_key = "" %} + {% if intf_vars.fanout is defined %} + {% if intf_vars.fanout %} +interface breakout {{ port[1] }} map {{ intf_vars.fanout }} + {% else %} +no interface breakout {{ port[1] }} + {% endif %} + {% endif %} +{% endfor %} +{% for key in os10_interface.keys() %} + {% set intf_vars = os10_interface[key] %} + {% set port = key.split(" ") %} + {% set interface_key = "" %} + {% if (intf_vars.fanout is defined and not intf_vars.fanout) or (intf_vars.fanout is not defined) %} + {% if key.startswith('range')%} + {% set interface_key = port[0] + " " + port[1] + port[2] %} + {% else %} + {% set interface_key = port[0] + port[1] %} + {% endif %} +interface {{ interface_key }} + {% if intf_vars.desc is defined %} + {% if intf_vars.desc %} + {% if intf_vars.desc|wordcount > 1 %} + description "{{ intf_vars.desc }}" + {% else %} + description {{ intf_vars.desc }} + {% endif %} + {% else %} + no description + {% endif %} + {% endif %} + + + + {% if intf_vars.switchport is defined %} + {% if intf_vars.switchport %} + {% if intf_vars.portmode is defined and intf_vars.portmode %} + switchport mode {{ intf_vars.portmode }} + {% endif %} + {% else %} + no switchport + {% endif %} + {% else %} + {% if intf_vars.portmode is defined %} + {% if intf_vars.portmode %} + switchport mode {{ intf_vars.portmode }} + {% else %} + no switchport + {% endif %} + {% endif %} + {% endif %} + + {% if intf_vars.mtu is defined %} + {% if intf_vars.mtu %} + mtu {{ intf_vars.mtu }} + {% else %} + no mtu + {% endif %} + {% endif %} + + {% if intf_vars.ip_type_dynamic is defined %} + {% if intf_vars.ip_type_dynamic %} + ip address dhcp + {% else %} + no ip address + {% endif %} + {% else %} + {% if intf_vars.vrf is defined %} + {% if intf_vars.vrf %} + ip vrf forwarding {{ intf_vars.vrf }} + {% else %} + no ip address vrf + {% endif %} + {% endif %} + {% if intf_vars.ip_and_mask is defined %} + {% if intf_vars.ip_and_mask %} + ip address {{ intf_vars.ip_and_mask }} + {% else %} + no ip address + {% endif %} + {% endif %} + {% endif %} + + {% if intf_vars.virtual_gateway_ip is defined %} + {% if intf_vars.virtual_gateway_ip %} + ip virtual-router address {{ intf_vars.virtual_gateway_ip }} + {% else %} + no ip virtual-router address + {% endif %} + {% endif %} + + {% if intf_vars.virtual_gateway_ipv6 is defined %} + {% if intf_vars.virtual_gateway_ipv6 %} + ipv6 virtual-router address {{ intf_vars.virtual_gateway_ipv6 }} + {% else %} + no ipv6 virtual-router address + {% endif %} + {% endif %} + + {% if intf_vars.suppress_ra is defined %} + {% if intf_vars.suppress_ra == "present" %} + no ipv6 nd send-ra + {% else %} + ipv6 nd send-ra + {% endif %} + {% endif %} + + {% if intf_vars.stp_rpvst_default_behaviour is defined %} + {% if intf_vars.stp_rpvst_default_behaviour %} + spanning-tree rapid-pvst default-behavior + {% else %} + no spanning-tree rapid-pvst default-behavior + {% endif %} + {% endif %} + + {% if intf_vars.ipv6_autoconfig is defined %} + {% if intf_vars.ipv6_autoconfig %} + ipv6 address autoconfig + {% else %} + no ipv6 address + {% endif %} + {% elif intf_vars.ipv6_type_dynamic is defined %} + {% if intf_vars.ipv6_type_dynamic %} + ipv6 address dhcp + {% else %} + no ipv6 address + {% endif %} + {% else %} + {% if intf_vars.ipv6_and_mask is defined %} + {% if intf_vars.ipv6_and_mask %} + {% if intf_vars.state_ipv6 is defined and intf_vars.state_ipv6 == "absent" %} + no ipv6 address {{ intf_vars.ipv6_and_mask }} + {% else %} + ipv6 address {{ intf_vars.ipv6_and_mask }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if intf_vars.max_ra is defined %} + {% if intf_vars.max_ra %} + ipv6 nd max-ra-interval {{ intf_vars.max_ra }} + {% else %} + no ipv6 nd max-ra-interval + {% endif %} + {% endif %} + {% if intf_vars.min_ra is defined %} + {% if intf_vars.min_ra %} + ipv6 nd min-ra-interval {{ intf_vars.min_ra }} + {% else %} + no ipv6 nd min-ra-interval + {% endif %} + {% endif %} + {% if intf_vars.ip_helper is defined and intf_vars.ip_helper %} + {% for helper in intf_vars.ip_helper %} + {% if helper.ip is defined and helper.ip %} + {% if helper.state is defined and helper.state == "absent" %} + no ip helper-address {{ helper.ip }} + {% else %} + ip helper-address {{ helper.ip }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if intf_vars.flowcontrol is defined and intf_vars.flowcontrol %} + {% if intf_vars.flowcontrol.mode is defined %} + {% if intf_vars.flowcontrol.mode %} + {% if intf_vars.flowcontrol.state is defined and intf_vars.flowcontrol.state == "absent" %} + no flowcontrol {{ intf_vars.flowcontrol.mode }} + {% else %} + {% if intf_vars.flowcontrol.enable is defined %} + {% if intf_vars.flowcontrol.enable == "on" %} + flowcontrol {{ intf_vars.flowcontrol.mode }} on + {% else %} + flowcontrol {{ intf_vars.flowcontrol.mode }} off + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + + {% if intf_vars.admin is defined %} + {% if intf_vars.admin == "up" %} + no shutdown + {% elif intf_vars.admin == "down" %} + shutdown + {% endif %} + {% endif %} + {% if intf_vars.ipv6_bgp_unnum is defined and intf_vars.ipv6_bgp_unnum %} + {% if intf_vars.ipv6_bgp_unnum.state == "absent" %} + no ipv6 bgp unnumbered + {% elif intf_vars.ipv6_bgp_unnum.state == "present" and intf_vars.ipv6_bgp_unnum.peergroup_type == "ebgp" %} + ipv6 bgp unnumbered ebgp-template + {% elif intf_vars.ipv6_bgp_unnum.state == "present" and intf_vars.ipv6_bgp_unnum.peergroup_type == "ibgp" %} + ipv6 bgp unnumbered ibgp-template + {% endif %} + {% endif %} + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml new file mode 100644 index 00000000..d77b7fc6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml @@ -0,0 +1,72 @@ +--- +# vars file for dellemc.os10.os10_interface +# Sample variables for OS10 device +os10_interface: + ethernet 1/1/32: + desc: "Connected to Core 2" + mtu: 2500 + portmode: + admin: up + switchport: False + ip_and_mask: + ip_type_dynamic: True + ipv6_type_dynamic: True + ethernet 1/1/12: + desc: "ipv6 auto config" + switchport: False + mtu: 2500 + admin: up + ipv6_autoconfig: True + ethernet 1/1/14: + fanout: 10g-4x + ethernet 1/1/13: + desc: "set ipv6 address" + switchport: False + admin: up + ipv6_and_mask: 2001:4898:5809:faa2::10/126 + state_ipv6: present + ethernet 1/1/1: + desc: "Connected to Leaf1" + portmode: "trunk" + switchport: True + suppress_ra: present + admin: up + stp_rpvst_default_behaviour: False + ethernet 1/1/3: + desc: site2-spine2 + ip_and_mask: 10.9.0.4/31 + mtu: 9216 + switchport: False + admin: up + flowcontrol: + mode: "receive" + enable: "on" + state: "present" + stp_rpvst_default_behaviour: True + vlan 100: + ip_and_mask: + ipv6_and_mask: 2001:4898:5808:ffaf::1/64 + state_ipv6: present + ip_helper: + - ip: 10.0.0.33 + state: present + admin: up + range ethernet 1/1/1-1/1/32: + mtu: 2500 + admin: up + switchport: False + port-channel 10: + admin: up + switchport: False + suppress_ra: + ipv6_bgp_unnum: + state: present + peergroup_type: ebgp + port-channel 20: + admin: up + stp_rpvst_default_behaviour: False + vlan 10: + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + admin: up diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml new file mode 100644 index 00000000..5b1ac094 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_interface diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml new file mode 100644 index 00000000..247ea157 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_interface diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/LICENSE b/ansible_collections/dellemc/os10/roles/os10_lag/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/README.md b/ansible_collections/dellemc/os10/roles/os10_lag/README.md new file mode 100644 index 00000000..eb679dcf --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/README.md @@ -0,0 +1,103 @@ +LAG role +======== + +This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The LAG role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Object drives the tasks in this role +- `os10_lag` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- `os10_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po ` format (1 to 128) +- Variables and values are case-sensitive + +**port-channel ID keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os10 | +| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 32) | os10 | +| ``max_bundle_size`` | integer | Configures the maximum bundle size for the port channel | os10 | +| ``lacp_system_priority`` | integer | Configures the LACP system-priority value | os10 | +| ``lacp_fallback_enable`` | boolean | Configures LACP fallback | os10 | +| ``channel_members`` | list | Specifies the list of port members to be associated to the port-channel (see ``channel_members.*``) | os10 | +| ``channel_members.port`` | string | Specifies valid interface names to be configured as port-channel members | os10 | +| ``channel_members.mode`` | string: active,passive,on | Configures mode of channel members | os10 | +| ``channel_members.port_priority`` | integer | Configures port priority on devices for channel members | os10 | +| ``channel_members.lacp_rate_fast`` | boolean | Configures the LACP rate as fast if set to true | os10 | +| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port-channel ID if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port-channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lag* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_lag: + Po 12: + type: dynamic + min_links: 2 + max_bundle_size: 2 + lacp_system_priority: 2 + channel_members: + - port: ethernet 1/1/31 + mode: "active" + port_priority: 3 + lacp_rate_fast: true + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_lag + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml new file mode 100644 index 00000000..6eaa54ea --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_lag diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml new file mode 100644 index 00000000..06b4bef8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_lag diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml new file mode 100644 index 00000000..6fcd3c68 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_lag role facilitates the configuration of LAG attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml new file mode 100644 index 00000000..e103552f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating LAG configuration for os10" + template: + src: os10_lag.j2 + dest: "{{ build_dir }}/lag10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning LAG configuration for os10" + os10_config: + src: os10_lag.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j2 b/ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j2 new file mode 100644 index 00000000..722ff5ff --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j2 @@ -0,0 +1,89 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ +Purpose: +Configure LAG commands for os10 Devices. +os10_lag: + Po 12: + type: dynamic + min_links: 2 + max_bundle_size: 2 + lacp_system_priority: 2 + channel_members: + - port: ethernet 1/1/31 + mode: "active" + port_priority: 3 + lacp_rate_fast: true + state: present +################################} +{% if os10_lag is defined and os10_lag %} +{% for key in os10_lag.keys() %} +{% set channel_id = key.split(" ") %} +{% set lag_vars = os10_lag[key] %} + + {% if lag_vars.lacp_system_priority is defined %} + {% if lag_vars.lacp_system_priority %} +lacp system-priority {{ lag_vars.lacp_system_priority }} + {% else %} +no lacp system-priority + {% endif %} + {% endif %} + + {% if lag_vars.state is defined and lag_vars.state == "absent" %} +no interface port-channel {{ channel_id[1] }} + {% else %} +interface port-channel{{ channel_id[1] }} + {% if lag_vars.min_links is defined %} + {% if lag_vars.min_links %} + minimum-links {{ lag_vars.min_links }} + {% else %} + no minimum-links + {% endif %} + {% endif %} + {% if lag_vars.max_bundle_size is defined %} + {% if lag_vars.max_bundle_size %} + lacp max-bundle {{ lag_vars.max_bundle_size }} + {% else %} + no lacp max-bundle + {% endif %} + {% endif %} + {% if lag_vars.lacp_fallback_enable is defined and lag_vars.lacp_fallback_enable %} + lacp fallback enable + {% endif %} + {% if lag_vars.channel_members is defined %} + {% for ports in lag_vars.channel_members %} + {% if ports.port is defined and ports.port %} +interface {{ ports.port }} + {% if lag_vars.type is defined and lag_vars.type == "static" %} + {% if ports.mode is defined and ports.mode == "on" %} + channel-group {{ channel_id[1] }} mode on + {% else %} + no channel-group + {% endif %} + {% elif lag_vars.type is defined and lag_vars.type == "dynamic" %} + {% if ports.mode is defined and ports.mode == "active" or ports.mode == "passive" %} + channel-group {{ channel_id[1] }} mode {{ ports.mode }} + {% else %} + no channel-group + {% endif %} + {% endif %} + {% if ports.lacp_rate_fast is defined %} + {% if ports.lacp_rate_fast %} + lacp rate fast + {% else %} + no lacp rate fast + {% endif %} + {% endif %} + {% if ports.port_priority is defined %} + {% if ports.port_priority %} + lacp port-priority {{ ports.port_priority }} + {% else %} + no lacp port-priority + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml new file mode 100644 index 00000000..52518981 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml @@ -0,0 +1,15 @@ +--- +# vars file for dellemc.os10.os10_lag +# Sample variables for OS10 device +os10_lag: + Po 12: + type: dynamic + min_links: 2 + max_bundle_size: 2 + lacp_system_priority: 2 + channel_members: + - port: ethernet 1/1/31 + mode: "active" + port_priority: 3 + lacp_rate_fast: true + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml new file mode 100644 index 00000000..6c130a0e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_lag diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml new file mode 100644 index 00000000..5b1cd5b1 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_lag diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/README.md b/ansible_collections/dellemc/os10/roles/os10_lldp/README.md new file mode 100644 index 00000000..0c08af4d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/README.md @@ -0,0 +1,149 @@ +LLDP role +========= + +This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, and iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The LLDP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_lldp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``enable`` | boolean | Enables or disables LLDP at a global level | os10 | +| ``multiplier`` | integer | Configures the global LLDP multiplier (2 to 10) | os10 | +| ``reinit`` | integer | Configures the reinit value (1 to 10) | os10 | +| ``timer`` | integer | Configures the timer value (5 to 254) | os10 | +| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os10 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | os10 | +| ``med.fast_start_repeat_count`` | integer | Configures med fast start repeat count value (1 to 10) | os10 | +| ``med.application`` | list | Configures global MED TLVs advertisement for an application (see ``application.*``) | os10 | +| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os10 | +| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement (1 to 4094) | os10 | +| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement (0 to 7) | os10 | +| ``application.code_point_value`` | integer | Configures differentiated services code point values for MED TLVs advertisement (0 to 63) | os10 | +| ``application.vlan_type`` | string: tag, untag | Configures the VLAN type for the application MED TLvs advertisement | os10 | +| ``application.network_policy_id`` | integer | Configures network policy ID for the application MED TLVs advertisement | os10 | +| ``application.state`` | string: present\*,absent | Deletes the application if set to absent | os10 | +| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os10 | +| ``local_interface.`` | dictionary | Configures LLDP at the interface level (see ``.*``) | os10 | +| ``.mode`` | string: rx,tx | Configures LLDP mode configuration at the interface level | os10 | +| ``.mode_state`` | string: absent,present | Configures transmit/receive at the interface level| os10 | +| ``.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os10 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os10 | +| ``med.enable`` | boolean | Enables interface level MED capabilities | os10 | +| ``med.tlv`` | string | Configures MED TLV advertisement at interface level | os10 | +| ``med.tlv_state`` | string: present\*,absent | Deletes the interface level MED configuration if set to absent | os10 | +| ``med.application`` | list | Configures MED TLVs advertisement for the application at the interface level (see ``application.*``) | os10 | +| ``application.network_policy_id`` | integer | Configures the *network_policy_id* for the application of MED | os10 | +| ``application.state`` | string: present\*,absent | Deletes the associated network policy ID for the application if set to absent.| os10 | +| ``advertise.tlv`` | list | Configures TLVs advertisement at interface level (see `.tlv.*`) | os10 | +| ``tlv.name`` | string: basic-tlv,dcbxp,dcbxp-appln,dot1-tlv,dot3-tlv | Configures corresponding to the TLV name specified at the interface | os10 | +| ``tlv.value`` | string | Specifies corresponding TLV value according to the name as a string | os10 | +| ``tlv.state`` | string: present\*,absent | Deletes the interface level TLVs advertisement if set to absent | os10 | + + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lldp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_lldp: + enable: false + multiplier: 3 + reinit: 2 + timer: 5 + advertise: + med: + fast_start_repeat_count: 4 + application: + - name: guest-voice + network_policy_id: 0 + vlan_id: 2 + vlan_type: tag + l2_priority: 3 + code_point_value: 4 + state: present + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: absent + local_interface: + ethernet 1/1/1: + mode: rx + mode_state: present + advertise: + med: + enable: true + tlv: inventory + tlv_state: present + application: + - network_policy_id: 4 + state: present + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_lldp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml new file mode 100644 index 00000000..464b4d96 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_lldp diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml new file mode 100644 index 00000000..f49343b1 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_lldp diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml new file mode 100644 index 00000000..7d843eed --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os10_lldp role facilitates the configuration of Link Layer Discovery Protocol(LLDP) attributes in devices + running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml new file mode 100644 index 00000000..fc86a9d4 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for Dellos10 + - name: "Generating LLDP configuration for os10" + template: + src: os10_lldp.j2 + dest: "{{ build_dir }}/lldp10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning LLDP configuration for os10" + os10_config: + src: os10_lldp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2 b/ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2 new file mode 100644 index 00000000..6d362e21 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2 @@ -0,0 +1,195 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################################### +Purpose: +Configure LLDP commands for os10 Devices. + +os10_lldp: + enable: false + multiplier: 3 + reinit: 2 + timer: 5 + advertise: + med: + fast_start_repeat_count: 4 + application: + - name: guest-voice + network_policy_id: 0 + vlan_id: 2 + vlan_type: tag + l2_priority: 3 + code_point_value: 4 + state: present + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: absent + local_interface: + ethernet 1/1/1: + mode: rx + mode_state: present + advertise: + med: + enable: true + tlv : inventory + tlv_state: present + application: + - network_policy_id: 4 + state: present + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: present + +{###############################################################################################} +{% if os10_lldp is defined and os10_lldp %} +{% for key,value in os10_lldp.items() %} + {% if key == "enable" %} + {% if value %} +lldp enable + {% else %} +no lldp enable + {% endif %} + {% elif key == "reinit" %} + {% if value %} +lldp reinit {{ value }} + {% else %} +no lldp reinit + {% endif %} + {% elif key == "multiplier" %} + {% if value %} +lldp holdtime-multiplier {{ value }} + {% else %} +no lldp holdtime-multiplier + {% endif %} + {% elif key == "timer" %} + {% if value %} +lldp timer {{ value }} + {% else %} +no lldp timer + {% endif %} + {% elif key == "advertise" %} + {% if value %} + {% for ke,valu in value.items() %} + {% if ke == "med" %} + {% if valu %} + {% for med,val in valu.items() %} + {% if med == "fast_start_repeat_count" %} + {% if val %} +lldp med fast-start-repeat-count {{ val }} + {% else %} +no lldp med fast-start-repeat-count + {% endif %} + {% elif med == "application" %} + {% if val %} + {% for item in val %} + {% if item.network_policy_id is defined and item.network_policy_id %} + {% if item.state is defined and item.state == "absent" %} +no lldp med network-policy {{ item.network_policy_id }} + {% else %} + {% if item.name is defined and item.name %} + {% if item.vlan_id is defined and item.vlan_id %} + {% if item.vlan_type is defined and item.vlan_type %} + {% if item.l2_priority is defined and item.l2_priority %} + {% if item.code_point_value is defined and item.code_point_value %} +lldp med network-policy {{ item.network_policy_id }} app {{ item.name }} vlan {{ item.vlan_id }} vlan-type {{ item.vlan_type }} priority {{ item.l2_priority }} dscp {{ item.code_point_value }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endfor %} +{% endif %} +{% if os10_lldp is defined and os10_lldp %} +{% for key in os10_lldp.keys() %} +{% set lldp_vars = os10_lldp[key] %} +{% if key == "local_interface" %} + {% for intf in lldp_vars.keys() %} + {% set intf_vars = lldp_vars[intf] %} +interface {{ intf }} + {% if intf_vars.mode is defined and intf_vars.mode %} + {% if intf_vars.mode_state is defined and intf_vars.mode_state == "absent" %} + {% if intf_vars.mode == "rx" %} + no lldp receive + {% elif intf_vars.mode == "tx" %} + no lldp transmit + {% endif %} + {% else %} + {% if intf_vars.mode == "rx" %} + lldp receive + {% elif intf_vars.mode == "tx" %} + lldp transmit + {% endif %} + {% endif %} + {% endif %} + + {% if intf_vars.advertise is defined and intf_vars.advertise %} + {% if intf_vars.advertise.med is defined and intf_vars.advertise.med %} + {% if intf_vars.advertise.med.enable is defined %} + {% if intf_vars.advertise.med.enable %} + lldp med enable + {% else %} + lldp med disable + {% endif %} + {% endif %} + {% if intf_vars.advertise.med.tlv is defined and intf_vars.advertise.med.tlv %} + {% if intf_vars.advertise.med.tlv_state is defined and intf_vars.advertise.med.tlv_state == "absent" %} + no lldp med tlv-select {{ intf_vars.advertise.med.tlv }} + {% else %} + lldp med tlv-select {{ intf_vars.advertise.med.tlv }} + {% endif %} + {% endif %} + {% if intf_vars.advertise.med.application is defined and intf_vars.advertise.med.application %} + {% for item in intf_vars.advertise.med.application %} + {% if item.network_policy_id is defined and item.network_policy_id %} + {% if item.state is defined and item.state == "absent" %} + lldp med network-policy remove {{ item.network_policy_id }} + {% else %} + lldp med network-policy add {{ item.network_policy_id }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if intf_vars.advertise.tlv is defined and intf_vars.advertise.tlv %} + {% for it in intf_vars.advertise.tlv %} + {% if it.name is defined and it.name %} + {% if it.state is defined and it.state == "absent" %} + {% if it.value is defined and it.value %} + no lldp tlv-select {{ it.name }} {{ it.value }} + {% else %} + no lldp tlv-select {{ it.name }} + {% endif %} + {% else %} + {% if it.value is defined and it.value %} + lldp tlv-select {{ it.name }} {{ it.value }} + {% else %} + lldp tlv-select {{ it.name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml new file mode 100644 index 00000000..f07408ed --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml @@ -0,0 +1,48 @@ +--- +# vars file for dellemc.os10.os10_lldp, +# below gives a sample configuration +# Sample variables for OS10 device +os10_lldp: + enable: false + multiplier: 3 + reinit: 2 + timer: 5 + advertise: + med: + fast_start_repeat_count: 4 + application: + - name: guest-voice + network_policy_id: 0 + vlan_id: 2 + vlan_type: tag + l2_priority: 3 + code_point_value: 4 + state: present + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: absent + local_interface: + ethernet 1/1/1: + mode: rx + mode_state: present + advertise: + med: + enable: true + tlv: inventory + tlv_state: present + application: + - network_policy_id: 4 + state: present + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml new file mode 100644 index 00000000..e928c00f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_lldp diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml new file mode 100644 index 00000000..8802ce76 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_lldp diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/LICENSE b/ansible_collections/dellemc/os10/roles/os10_logging/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/README.md b/ansible_collections/dellemc/os10/roles/os10_logging/README.md new file mode 100644 index 00000000..c8a2dbf2 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/README.md @@ -0,0 +1,97 @@ +Logging role +============ + +This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Logging role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_logging keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``logging`` | list | Configures the logging server (see ``logging.*``) | os10 | +| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os10 | +| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os10 | +| ``console`` | dictionary | Configures logging to the console (see ``console.*``) | os10 | +| ``console.enable`` | boolean | Enables/disables logging to the console | os10 | +| ``console.severity`` | string | Configures the minimum severity level for logging to the console | os10 | +| ``log_file`` | dictionary | Configures logging to a log file (see ``log_file.*``) | os10 | +| ``log_file.enable`` | boolean | Enables/disables logging to a log file | os10 | +| ``log_file.severity`` | string | Configures the minimum severity level for logging to a log file | os10 | +| ``source_interface`` | string | Configures the source interface for logging | os10 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_logging: + logging: + - ip: 1.1.1.1 + state: absent + console: + enable: True + severity: log-err + log_file: + enable: True + severity: log-err + source_interface: "ethernet1/1/30" + +**Simple playbook to setup logging — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_logging + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml new file mode 100644 index 00000000..2fbccfcd --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_logging diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml new file mode 100644 index 00000000..b79ed93f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_logging diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml new file mode 100644 index 00000000..a9b06cab --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_logging role facilitates the configuration of logging attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml new file mode 100644 index 00000000..91ce9e7b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating logging configuration for os10" + template: + src: os10_logging.j2 + dest: "{{ build_dir }}/logging10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning logging configuration for os10" + os10_config: + src: os10_logging.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j2 b/ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j2 new file mode 100644 index 00000000..442376b5 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j2 @@ -0,0 +1,67 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure logging commands for os10 Devices +os10_logging: + logging: + - ip: 1.1.1.1 + state: present + console: + enable: True + severity: log-err + log_file: + enable: True + severity: log-err + source_interface: "ethernet1/1/30" +###############################################} +{% if os10_logging is defined and os10_logging %} + {% for key,value in os10_logging.items() %} + {% if key == "logging" %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% if item.state is defined and item.state == "absent" %} +no logging server {{ item.ip }} + {% else %} +logging server {{ item.ip }} + {% endif %} + {% endif %} + {% endfor %} + {% elif key == "log_file" %} + {% if value.enable is defined %} + {% if value.enable %} +logging log-file enable + {% else %} +logging log-file disable + {% endif %} + {% endif %} + {% if value.severity is defined %} + {% if value.severity %} +logging log-file severity {{ value.severity }} + {% else %} +no logging log-file severity + {% endif %} + {% endif %} + {% elif key == "console" %} + {% if value.enable is defined %} + {% if value.enable %} +logging console enable + {% else %} +logging console disable + {% endif %} + {% endif %} + {% if value.severity is defined %} + {% if value.severity %} +logging console severity {{ value.severity }} + {% else %} +no logging console severity + {% endif %} + {% endif %} + {% elif key == "source_interface" %} + {% if value %} +logging source-interface {{ value }} + {% else %} +no logging source-interface + {% endif %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml new file mode 100644 index 00000000..d32792f7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.16.148.72 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml new file mode 100644 index 00000000..c9255b23 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml @@ -0,0 +1,15 @@ +--- +# vars file for dellemc.os10.os10_logging, +# below gives a sample configuration +# Sample variables for OS10 device +os10_logging: + logging: + - ip: 1.1.1.1 + state: absent + console: + enable: True + severity: log-err + log_file: + enable: True + severity: log-err + source_interface: "ethernet1/1/30" diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml new file mode 100644 index 00000000..cb66b3a8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_logging diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml new file mode 100644 index 00000000..736ff3b8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_logging diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE b/ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/README.md b/ansible_collections/dellemc/os10/roles/os10_network_validation/README.md new file mode 100644 index 00000000..e9014c42 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/README.md @@ -0,0 +1,304 @@ +Network validation role +========================= + +This roles is used to verify network validation. It validates network features of a wiring connection, BGP neighbors, MTU between neighbors, and VLT pairing. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. The Network validation role requires an SSH connection for connectivity to a Dell EMC OS10 device. You can use any of the built-in OS connection variables. + +- **Wiring validation** — Based on the LLDP neighbor establishment, the intended neighbor input model is defined by the _group_var/all_ user which is compared with the actual LLDP neighbor; report is generated if there is any mismatch with the intended neighbors + +- **BGP validation** — Based on the BGP neighbor state establishment, report is generated if the BGP neighbor state is not in an established state + +- **MTU validation** — Based on the interface MTU, the report is generated if there is an MTU mismatch between LLDP neighbors + +- **VLT validation** — Based on the VLT information, the report is generated if the backup VLT link is down or not present + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- Variables and values are case-sensitive + +**wiring_validation keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``intended_neighbors`` | list | Defines topology details planned | os10 | +| ``source_switch`` | string | Defines the source switch inventory name planned | os10 | +| ``source_port`` | string | Defines the source port planned | os10 | +| ``dest_switch`` | string | Defines the destination switch inventory name planned | os10 | +| ``dest_port`` | string | Defines the destination port planned | os10 | + +**bgp_validation keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``intended_bgp_neighbors`` | list | Defines topology details planned | os10 | +| ``source_switch`` | string | Defines the source switch inventory name planned | os10 | + +**vlt_validation keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``intended_vlt_pairs`` | list | Defines topology details planned | os10 | +| ``primary`` | string | Defines the primary role of switch inventory name planned | os10 | +| ``secondary`` | string | Defines the secondary role of switch inventory name planned | os10 | + +Connection variables +-------------------- + +Ansible Dell EMC roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible _group_vars_ or _host_vars_ directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if the value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; defaults to 22 | +| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used | +| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the _become_ method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use, if required, to enter privileged mode on the remote device; if `ansible_become` is set to no, this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Dependencies +------------ + +- The _xmltodict_ library should be installed to convert show command output in dictionary format from XML +- To install the package, use the pip install xmltodict command +- The *os10_fabric_summary* role must be included to query system network summary information + +Example playbook +---------------- + +This example uses the *os10_network_validation* role to verify network validations. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + + +**Sample hosts file** + + site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + [spine] + site1-spine1 + site1-spine2 + site2-spine1 + site2-spine2 + [LeafAndSpineSwitch:children] + spine + + +**Sample host_vars/site1-spine1** + + cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + + os10_cli_user: xxxx + os10_cli_pass: xxxx + ansible_network_os: dellemc.os10.os10 + + +#### Sample ``group_var/all`` + +**Sample input for wiring validation** + + + intended_neighbors: + - source_switch: site1-spine2 + source_port: ethernet1/1/5 + dest_port: ethernet1/1/29 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/6 + dest_port: ethernet1/1/30 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/7 + dest_port: ethernet1/1/31 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/8 + dest_port: ethernet1/1/32 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/9 + dest_port: ethernet1/1/21 + dest_switch: site1-spine1 + - source_switch: site1-spine2 + source_port: ethernet1/1/7 + dest_port: ethernet1/1/29 + dest_switch: site1-spine3 + +**Sample input for BGP validation** + + intended_bgp_neighbors: + - source_switch: site1-spine1 + neighbor_ip: ["10.11.0.1","10.9.0.1","10.9.0.3","10.9.0.5","1.1.1.1"] + - source_switch: site1-spine2 + neighbor_ip: ["10.11.0.0","10.9.0.9","10.9.0.11","10.9.0.15"] + +**Sample input for VLT validation** + + intended_vlt_pairs: + - primary: site1-spine1 + secondary: site2-spine2 + - primary: site2-spine1 + secondary: site2-spine2 + + +#### Simple playbook to setup network validation + +**Sample playbook of ``validation.yaml`` to run complete validation** + + --- + - name: setup network validation + hosts: localhost + gather_facts: no + connection: local + roles: + - os10_network_validation + +**Sample playbook to run wiring validation** + + --- + - name: setup wiring validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: wiring_validation.yaml + +**Sample playbook to run BGP validation** + + --- + - name: setup bgp validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: bgp_validation.yaml + +**Sample playbook to run VLT validation** + + --- + - name: setup vlt validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: vlt_validation.yaml + +**Sample playbook to run MTU validation** + + --- + - name: setup mtu validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: mtu_validation.yaml + + +**Run** + +Execute the playbook and examine the results. + + ansible-playbook -i inventory.yaml validation.yaml + +**sample output of wiring validation** + + "results": [ + { + "dest_port": "ethernet1/1/1", + "dest_switch": "site2-spine2", + "error_type": "link-missing", + "reason": "link is not found for source switch: site2-spine1,port: ethernet1/1/1", + "source_port": "ethernet1/1/1", + "source_switch": "site2-spine1" + }, + { + "dest_port": "ethernet1/1/2", + "dest_switch": "site2-spine1", + "error_type": "link-mismatch", + "reason": "Destination switch is not an expected value, expected switch: site2-spine1,port: ethernet1/1/2; actual switch: site1-spine2(svc-tag:J33FXC2, node_mac:e4:f0:04:9b:eb:dc), port: ethernet1/1/1", + "source_port": "ethernet1/1/1", + "source_switch": "site1-spine1" + } + ] + +**sample output of BGP validation** + + "results": [ + { + "bgp_neighbor": "10.9.0.1", + "bgp_state": "idle", + "error_type": "remote_port_down", + "possible_reason": "remote port site2-spine1 ethernet1/1/2 is down", + "source_switch": "site1-spine1" + }, + { + "bgp_neighbor": "-", + "bgp_state": "idle", + "error_type": "not_an_intended_neighbor", + "possible_reason": "neighbor 10.9.0.7 is not an intended, please add this neighbor in the intended_bgp_neighbors", + "source_switch": "site1-spine1" + }, + { + "bgp_neighbor": "1.1.1.1", + "error_type": "config_missing", + "possible_reason": "neighbor config missing", + "source_switch": "site1-spine1" + }, + { + "bgp_neighbor": "10.9.0.9", + "bgp_state": "idle", + "error_type": "remote_port_down", + "possible_reason": "remote port site2-spine1 ethernet1/1/3 is down", + "source_switch": "site1-spine2" + } + ] + +**sample output of VLT validation** + + "results": [ + { + "error_type": "secondary_mismatch", + "intended_primary": "site1-spine1", + "intended_secondary": "site2-spine2", + "possible_reason": "config mismatch as site2-spine2 is expected, but the actual secondary is site1-spine2 ", + "secondary": "site1-spine2" + }, + { + "error_type": "peer_missing", + "intended_primary": "site2-spine1", + "intended_secondary": "site2-spine2", + "possible_reason": "peer info is not configured or peer interface is down" + } + ] + +**sample output of MTU validation** + + "msg": { + "results": "There is no MTU mistmatch between neighbors" + } + + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml new file mode 100644 index 00000000..b01fd4b1 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml @@ -0,0 +1,21 @@ +# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + role_name: os10_network_validation + author: Dell EMC Networking Engineering + description: The os10_network_validation role faclitates to provide the Network validation in devices running on Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 + + dependencies: + - role: os10_fabric_summary diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml new file mode 100644 index 00000000..a289b50c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml @@ -0,0 +1,33 @@ +--- +- name: "Get Dell EMC OS10 Show ip bgp summary" + os10_command: + commands: + - command: "show ip bgp summary | display-xml" + - command: "show ip interface brief | display-xml" + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_bgp +- name: "set fact to form bgp database" + set_fact: + output_bgp: "{{ output_bgp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_bgp': item.stdout.0, 'stdout_show_ip': item.stdout.1}] }}" + loop: "{{ show_bgp.results }}" +- name: call lib to convert bgp info from xml to dict format + base_xml_to_dict: + cli_responses: "{{ item.stdout_show_bgp }}" + with_items: + - "{{ output_bgp }}" + register: show_bgp_list +- name: call lib to convert ip interface info from xml to dict format + base_xml_to_dict: + cli_responses: "{{ item.stdout_show_ip }}" + with_items: + - "{{ output_bgp }}" + register: show_ip_intf_list +- name: call lib for bgp validation + bgp_validate: + show_ip_bgp: "{{ show_bgp_list.results }}" + show_ip_intf_brief: "{{ show_ip_intf_list.results }}" + bgp_neighbors: "{{ intended_bgp_neighbors }}" + register: bgp_validation_output +- name: "debug bgp database" + debug: var=bgp_validation_output.msg.results diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml new file mode 100644 index 00000000..c81545b8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml @@ -0,0 +1,9 @@ +--- +- name: "Validate the wiring info" + import_tasks: wiring_validation.yaml +- name: "Validate the BGP info" + import_tasks: bgp_validation.yaml +- name: "Validate the VLT info" + import_tasks: vlt_validation.yaml +- name: "Validate the MTU info for lldp neigbors" + import_tasks: mtu_validation.yaml diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml new file mode 100644 index 00000000..fbc58538 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml @@ -0,0 +1,32 @@ +--- +- name: "Get Dell EMC OS10 MTU mismatch info" + os10_command: + commands: + - command: "show lldp neighbors" + - command: "show ip interface brief | display-xml" + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_output +- name: "set fact to form database" + set_fact: + output_mtu: "{{ output_mtu|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_lldp': item.stdout.0, 'stdout_show_ip': item.stdout.1 }] }}" + loop: "{{ show_output.results }}" +- name: "debug the output database" + debug: var=output_mtu +- name: call lib to convert ip interface info from xml to dict format + base_xml_to_dict: + cli_responses: "{{ item.stdout_show_ip }}" + with_items: "{{ output_mtu }}" + register: show_ip_intf_list +- name: "Get Dell EMC OS10 Show system" + import_role: + name: os10_fabric_summary + register: show_system_network_summary +- name: "call lib to process" + mtu_validate: + show_lldp_neighbors_list: "{{ output_mtu }}" + show_system_network_summary: "{{ show_system_network_summary.msg.results }}" + show_ip_intf_brief: "{{ show_ip_intf_list.results }}" + register: mtu_validation +- name: "debug mtu validation result" + debug: var=mtu_validation.msg.results diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml new file mode 100644 index 00000000..1a673e19 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml @@ -0,0 +1,44 @@ +--- +- name: "Get Dell EMC OS10 Show run vlt" + os10_command: + commands: + - command: "show running-configuration vlt | grep vlt-domain" + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_run_vlt +- name: "set fact to form show vlt database" + set_fact: + output_vlt: "{{ output_vlt|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_vlt': item.stdout.0}] }}" + loop: "{{ show_run_vlt.results }}" +- name: "debug output_vlt" + debug: var=output_vlt +- name: "Get Dell EMC OS10 Show vlt info" + os10_command: + commands: + - command: "show vlt {{ item.stdout_show_vlt.split()[1] }} | display-xml" + provider: "{{ hostvars[item.inv_name].cli }}" + with_items: "{{ output_vlt }}" + register: show_vlt +- name: "set fact to form vlt database" + set_fact: + vlt_out: "{{ vlt_out|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'show_vlt_stdout': item.stdout.0}] }}" + loop: "{{ show_vlt.results }}" + register: vlt_output +- name: call lib to convert vlt info from xml to dict format + base_xml_to_dict: + cli_responses: "{{ item.show_vlt_stdout }}" + with_items: + - "{{ vlt_out }}" + register: vlt_dict_output +- name: "Get Dell EMC OS10 Show system" + import_role: + name: os10_fabric_summary + register: show_system_network_summary +- name: call lib to process + vlt_validate: + show_vlt: "{{ vlt_dict_output.results }}" + show_system_network_summary: "{{ show_system_network_summary.msg.results }}" + intended_vlt_pairs: "{{ intended_vlt_pairs }}" + register: show_vlt_info +- name: "debug vlt validation result" + debug: var=show_vlt_info.msg.results diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml new file mode 100644 index 00000000..d89ac18c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml @@ -0,0 +1,24 @@ +--- +- name: "Get Dell EMC OS10 wiring info" + os10_command: + commands: + - command: "show lldp neighbors" + provider: "{{ hostvars[item].cli }}" + with_items: "{{ groups['all'] }}" + register: show_lldp +- name: "set facts to form lldp db" + set_fact: + output_lldp: "{{ output_lldp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_lldp': item.stdout}] }}" + loop: "{{ show_lldp.results }}" +- name: "Get Dell EMC OS10 Show system" + import_role: + name: os10_fabric_summary + register: show_system_network_summary +- name: call lib to process + wiring_validate: + show_lldp_neighbors_list: "{{ output_lldp }}" + show_system_network_summary: "{{ show_system_network_summary.msg.results }}" + planned_neighbors: "{{ intended_neighbors }}" + register: wiring_validation +- name: "debug the wiring validation results" + debug: var=wiring_validation.msg.results diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all new file mode 100644 index 00000000..01c4856e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all @@ -0,0 +1,30 @@ +#wiring_validation input +intended_neighbors: + - source_switch: site1-spine1 + source_port: ethernet1/1/1 + dest_port: ethernet1/1/1 + dest_switch: site1-spine2 + - source_switch: site2-spine1 + source_port: ethernet1/1/1 + dest_port: ethernet1/1/1 + dest_switch: site2-spine2 + - source_switch: site1-spine1 + source_port: ethernet1/1/1 + dest_port: ethernet1/1/2 + dest_switch: site2-spine1 + - source_switch: site1-spine1 + source_port: ethernet1/1/2 + dest_port: ethernet1/1/2 + dest_switch: site2-spine2 +#bgp_validation input +intended_bgp_neighbors: + - source_switch: site1-spine1 + neighbor_ip: ["10.11.0.1", "10.9.0.1", "10.9.0.3", "10.9.0.5", "1.1.1.1"] + - source_switch: site1-spine2 + neighbor_ip: ["10.11.0.0", "10.9.0.9", "10.9.0.11", "10.9.0.15"] +#vlt_validation input +intended_vlt_pairs: + - primary: site1-spine1 + secondary: site1-spine2 + - primary: site2-spine1 + secondary: site2-spine2 diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine1 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine1 new file mode 100644 index 00000000..1f450079 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine1 @@ -0,0 +1,11 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine2 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine2 new file mode 100644 index 00000000..1f450079 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine2 @@ -0,0 +1,11 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine1 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine1 new file mode 100644 index 00000000..1f450079 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine1 @@ -0,0 +1,11 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine2 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine2 new file mode 100644 index 00000000..1f450079 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine2 @@ -0,0 +1,11 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml new file mode 100644 index 00000000..d1838947 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml @@ -0,0 +1,14 @@ +--- +site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 +site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 +site2-spine1 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 +site2-spine2 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10 + +[spine] +site1-spine1 +site1-spine2 +site2-spine1 +site2-spine2 + +[LeafAndSpineSwitch:children] +spine diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml new file mode 100644 index 00000000..1f450079 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml @@ -0,0 +1,11 @@ +--- +# Sample variables for OS10 device +cli: + host: "{{ ansible_host }}" + username: "{{ os10_cli_user | default('admin') }}" + password: "{{ os10_cli_pass | default('admin') }}" + timeout: 300 + +os10_cli_user: xxxx +os10_cli_pass: xxxx +ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml new file mode 100644 index 00000000..aff21dfe --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml @@ -0,0 +1,56 @@ +--- +# Sample playbook to validate network validation role +- name: setup network validation + hosts: localhost + gather_facts: False + connection: local + roles: + - dellemc.os10.os10_network_validation + +# Sample playbook to validate wiring validation +- name: setup play for wiring validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: wiring_validation.yaml + +# Sample playbook to validate bgp validation +- name: setup playbook to validate bgp validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: bgp_validation.yaml + +# Sample playbook to validate vlt validation +- name: setup playbook to validate vlt validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: vlt_validation.yaml + +# Sample playbook to validate mtu validation +- name: setup playbook to validate mtu validation + hosts: localhost + gather_facts: False + connection: local + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_network_validation + tasks_from: mtu_validation.yaml diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/README.md b/ansible_collections/dellemc/os10/roles/os10_ntp/README.md new file mode 100644 index 00000000..17e879c6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/README.md @@ -0,0 +1,124 @@ +NTP role +======== + +This role facilitates the configuration of network time protocol (NTP) attributes. It specifically enables configuration of NTP server, NTP source, authentication, and broadcast service. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The NTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_ntp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``server`` | list | Configures the NTP server (see ``server.*``) | os10 | +| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os10 | +| ``server.key`` | integer | Configures the peer authentication key for the NTP server | os10 | +| ``server.prefer`` | boolean | Configures the peer preference | os10 | +| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os10 | +| ``source`` | string | Configures the interface for the source address | os10 | +| ``master`` | integer | Configures the local clock to act as the server | os10 | +| ``authenticate`` | boolean | Configures authenticate time sources | os10 | +| ``authentication_key`` | list | Configures authentication key for trusted time sources (see ``authentication_key.*``) | os10 | +| ``authentication_key.key_num`` | integer | Configures authentication key number | os10 | +| ``authentication_key.key_string_type`` | integer: 0,9 | Configures hidden authentication key string if the value is 9, and configures unencrypted authentication key string if the value is 0 | os10 | +| ``authentication_key.key_string`` | string | Configures the authentication key string | os10 | +| ``authentication_key.type`` | string: md5,sha1,sha2-256 | Configures the authentication type | os10 | +| ``authentication_key.state`` | string: absent,present\* | Deletes the authenticaton key if set to absent | os10 | +| ``trusted_key`` | list | Configures key numbers for trusted time sources (see ``trusted_key.*``) | os10 | +| ``trusted_key.key_num`` | integer | Configures the key number | os10 | +| ``trusted_key.state`` | string: absent,present\* | Deletes the trusted key if set to absent | os10 | +| ``intf`` | dictionary | Configures NTP on the interface (see ``intf.*``) | os10 | +| ``intf.`` | dictionary | Configures NTP on the interface (see ``.*``) | os10 | +| ``.disable`` | boolean | Configures NTP disable on the interface | os10 | +| ``.broadcast`` | boolean | Configures NTP broadcast client service on the interface | os10 | +| ``vrf`` | dictionary | Enables NTP on VRF (see ``vrf.*``) | os10 | +| ``vrf.name`` | string | Name of the VRF to enable NTP | os10 | +| ``vrf.state`` | string: absent,present\* | Disables NTP on the VRF if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When the `os10_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_ntp* role. + +By including the role, you automatically get access to all of the tasks to configure NTP attributes. The sample *host_vars* is for os10. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + host: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_ntp: + source: ethernet 1/1/2 + master: 5 + authenticate: true + authentication_key: + - key_num: 123 + key_string_type: 9 + key_string: test + type: md5 + state: present + trusted_key: + - key_num: 1323 + state: present + server: + - ip: 2.2.2.2 + key: 345 + prefer: true + state: present + intf: + ethernet 1/1/2: + disable: true + broadcast: true + vrf: + name: red + state: present + +**Simple playbook to setup NTP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_ntp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml new file mode 100644 index 00000000..7d2d8eee --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_ntp diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml new file mode 100644 index 00000000..965f50b5 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_ntp diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml new file mode 100644 index 00000000..3befe0cd --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_ntp role facilitates the configuration of NTP attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml new file mode 100644 index 00000000..202e5601 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml @@ -0,0 +1,41 @@ +--- +# tasks file for os10 + - name: "Generating NTP configuration for os10" + template: + src: os10_ntp.j2 + dest: "{{ build_dir }}/ntp10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning NTP configuration for os10" + os10_config: + src: os10_ntp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output + + - name: "Generating NTP VRF configuration for os10" + lineinfile: + path: "{{ build_dir }}/ntp10_{{ hostname }}.conf.part" + line: "{{ lookup('template', 'os10_ntp_vrf.j2') }}" + insertafter: EOF + when: > + (ansible_network_os is defined and + ansible_network_os == "dellemc.os10.os10" and + ((os10_cfg_generate | default('False')) | bool) and + os10_ntp.vrf is defined and + os10_ntp.vrf) + + - name: "Provisioning NTP VRF configuration for os10" + os10_config: + lines: + - command: "{{ lookup('template', 'os10_ntp_vrf.j2') }}" + prompt: "Do you want to continue" + answer: "yes" + when: > + (ansible_network_os is defined and + ansible_network_os == "dellemc.os10.os10" and + os10_ntp.vrf is defined and + os10_ntp.vrf) +# notify: save config os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2 b/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2 new file mode 100644 index 00000000..7524c935 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2 @@ -0,0 +1,125 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure NTP commands for os10 Devices +os10_ntp: + source: ethernet 1/1/2 + master: 5 + authenticate: true + vrf: + name: red + authentication_key: + - key_num: 123 + key_string_type: 7 + key_string: test + type: md5 + state: present + trusted_key: + - key_num: 1323 + state: present + server: + - ip: 2.2.2.2 + key: 345 + prefer: true + state: present + intf: + ethernet 1/1/2: + disable: true + broadcast: true +###############################################} +{% if os10_ntp is defined and os10_ntp %} + {% if os10_ntp.source is defined %} + {% if os10_ntp.source %} +ntp source {{ os10_ntp.source }} + {% else %} +no ntp source + {% endif %} + {% endif %} + {% if os10_ntp.master is defined %} + {% if os10_ntp.master %} +ntp master {{ os10_ntp.master }} + {% else %} +no ntp master + {% endif %} + {% endif %} + {% if os10_ntp.authenticate is defined %} + {% if os10_ntp.authenticate %} +ntp authenticate + {% else %} +no ntp authenticate + {% endif %} + {% endif %} + {% if os10_ntp.server is defined and os10_ntp.server %} + {% for item in os10_ntp.server %} + {% if item.ip is defined and item.ip %} + {% if item.state is defined and item.state == "absent" %} +no ntp server {{ item.ip }} + {% else %} + {% if item.key is defined and item.key %} + {% if item.prefer is defined and item.prefer %} +ntp server {{ item.ip }} key {{ item.key }} prefer + {% else %} +ntp server {{ item.ip }} key {{ item.key }} + {% endif %} + {% else %} + {% if item.prefer is defined and item.prefer %} +ntp server {{ item.ip }} prefer + {% else %} +ntp server {{ item.ip }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if os10_ntp.authentication_key is defined and os10_ntp.authentication_key %} + {% for item in os10_ntp.authentication_key %} + {% if item.key_num is defined and item.key_num %} + {% if item.state is defined and item.state == "absent" %} +no ntp authentication-key {{ item.key_num }} + {% else %} + {% if item.key_string_type is defined and item.key_string_type >= 0 %} + {% if item.key_string is defined and item.key_string %} + {% if item.type is defined and item.type %} + {% set auth_type = item.type %} + {% else %} + {% set auth_type = 'md5' %} + {% endif%} +ntp authentication-key {{ item.key_num }} {{ auth_type }} {{ item.key_string_type }} {{ item.key_string }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if os10_ntp.trusted_key is defined and os10_ntp.trusted_key %} + {% for item in os10_ntp.trusted_key %} + {% if item.key_num is defined and item.key_num %} + {% if item.state is defined and item.state == "absent" %} +no ntp trusted-key {{ item.key_num }} + {% else %} +ntp trusted-key {{ item.key_num }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if os10_ntp.intf is defined and os10_ntp.intf %} + {% for key in os10_ntp.intf.keys() %} +interface {{ key }} + {% if os10_ntp.intf[key].disable is defined %} + {% if os10_ntp.intf[key].disable %} + ntp disable + {% else %} + no ntp disable + {% endif %} + {% endif %} + {% if os10_ntp.intf[key].broadcast is defined %} + {% if os10_ntp.intf[key].broadcast %} + ntp broadcast client + {% else %} + no ntp broadcast client + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j2 b/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j2 new file mode 100644 index 00000000..d2e12a39 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j2 @@ -0,0 +1,18 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure NTP VRF for os10 Devices +os10_ntp: + vrf: + name: red + state: present +###############################################} +{% if os10_ntp is defined and os10_ntp %} + {% if os10_ntp.vrf is defined and os10_ntp.vrf.name is defined %} + {% if os10_ntp.vrf.state is defined and os10_ntp.vrf.state == "absent" %} +no ntp enable vrf {{ os10_ntp.vrf.name }} + {% else %} +ntp enable vrf {{ os10_ntp.vrf.name }} + {% endif%} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory new file mode 100644 index 00000000..85a255f9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory @@ -0,0 +1,2 @@ +--- +localhost diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml new file mode 100644 index 00000000..cc2a9b09 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml @@ -0,0 +1,25 @@ +--- +# vars file for dellemc.os10.os10_ntp, +# below gives a sample configuration +# Sample variables for OS10 device +os10_ntp: + source: ethernet 1/1/2 + master: 5 + authenticate: true + authentication_key: + - key_num: 123 + key_string_type: 0 + key_string: test + state: present + trusted_key: + - key_num: 1323 + state: present + server: + - ip: 2.2.2.2 + key: 345 + prefer: true + state: present + intf: + ethernet 1/1/2: + disable: true + broadcast: true diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml new file mode 100644 index 00000000..dce69c7b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os10.os10_ntp diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml new file mode 100644 index 00000000..e90d53c0 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_ntp diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE b/ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md b/ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md new file mode 100644 index 00000000..dce141e8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md @@ -0,0 +1,104 @@ +Prefix-list role +================ + +This role facilitates the configuration of a prefix-list. It supports the configuration of an IP prefix-list, and assigns the prefix-list to line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The prefix-list role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value +- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_prefix_list keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string (required): ipv4,ipv6 | Configures an L3 (IPv4/IPv6) prefix-list | os10 | +| ``name`` | string (required) | Configures the prefix-list name | os10 | +| ``description`` | string | Configures the prefix-list description | os10 | +| ``entries`` | list | Configures rules in the prefix-list (see ``seqlist.*``) | os10 | +| ``entries.number`` | int (required) | Specifies the sequence number of the prefix-list rule | os10 | +| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true, and specifies to reject packets if set to false | os10 | +| ``entries.net_num`` | string (required) | Specifies the network number | os10 | +| ``entries.mask`` | string (required) | Specifies the mask | os10 | +| ``entries.condition_list`` | list | Configures conditions to filter packets (see ``condition_list.*``)| os10 | +| ``condition_list.condition`` | list | Specifies the condition to filter packets from the source address | os10 | +| ``condition_list.prelen`` | string (required) | Specifies the allowed prefix length | os10 | +| ``entries.state`` | string: absent,present\* | Deletes the rule from the prefix-list if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the prefix-list if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_prefix_list* role to configure prefix-list for both IPv4 and IPv6. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_prefix_list* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_prefix_list + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml new file mode 100644 index 00000000..8d218029 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_prefix_list diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml new file mode 100644 index 00000000..72b64726 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_prefix_list diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info b/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info new file mode 100644 index 00000000..ccddfc42 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info @@ -0,0 +1 @@ +{install_date: 'Fri Mar 10 15:35:29 2017', version: v1.0.1} diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml new file mode 100644 index 00000000..287ff507 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml @@ -0,0 +1,16 @@ +# Copyright (c) 2017-2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_prefix_list role facilitates the configuration of prefix list attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml new file mode 100644 index 00000000..449b80ba --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating prefix list configuration for os10" + template: + src: os10_prefix_list.j2 + dest: "{{ build_dir }}/prefixlist10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning prefix list configuration for os10" + os10_config: + src: os10_prefix_list.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j2 b/ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j2 new file mode 100644 index 00000000..fe598331 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j2 @@ -0,0 +1,95 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### +Purpose: +Configure pl on OS10 devices +os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present + state: present +#####################################} +{% if (os10_prefix_list is defined and os10_prefix_list) %} + {% for val in os10_prefix_list %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} + {% if val.type is defined and val.type == "ipv4" %} +no ip prefix-list {{ val.name }} + {% elif val.type is defined and val.type == "ipv6" %} +no ipv6 prefix-list {{ val.name }} + {% endif %} + {% else %} + {% if val.type is defined and val.type == "ipv4" %} + {% set ip = "ip" %} + {% elif val.type is defined and val.type == "ipv6" %} + {% set ip = "ipv6" %} + {% endif %} + {% if val.description is defined %} + {% if val.description %} +{{ ip }} prefix-list {{ val.name }} description {{ val.description }} + {% else %} +no {{ ip }} prefix-list {{ val.name }} description {{ val.description }} + {% endif %} + {% endif %} + {% if val.entries is defined and val.entries %} + {% for rule in val.entries %} + {% if rule.number is defined and rule.number %} + {% if rule.state is defined %} + {% if rule.state == "absent" %} +no {{ ip }} prefix-list {{ val.name }} seq {{ rule.number }} + {% else %} + {% if rule.permit is defined %} + {% if rule.permit %} + {% set is_permit = "permit" %} + {% else %} + {% set is_permit = "deny" %} + {% endif %} + {% endif %} + {% if rule.net_num is defined and rule.net_num %} + {% if rule.mask is defined %} + {% if rule.mask or rule.mask == 0 %} + {% if rule.condition_list is defined and rule.condition_list %} + {% set condition_string = [' '] %} + {% set item = "" %} + {% if rule.condition_list | length > 1 %} + {% for condition in rule.condition_list %} + {% if rule.condition_list[0].condition == "ge" and rule.condition_list[1].condition == "le" %} + {% set item = condition_string[0] + condition.condition + ' ' + condition.prelen|string + ' ' %} + {% endif %} + {% if condition_string.insert(0,item) %} {% endif %} + {% endfor %} + {% else %} + {% for condition in rule.condition_list %} + {% if rule.condition_list[0].condition == "ge" or rule.condition_list[0].condition == "le" %} + {% set item = condition_string[0] + condition.condition + ' ' + condition.prelen|string + ' ' +%} + {% endif %} + {% if condition_string.insert(0,item) %} {% endif %} + {% endfor %} + {% endif %} +{{ ip }} prefix-list {{ val.name }} seq {{ rule.number }} {{ is_permit }} {{ rule.net_num }}/{{ rule.mask }}{{ condition_string[0] }} + {% else %} +{{ ip }} prefix-list {{ val.name }} seq {{ rule.number}} {{ is_permit }} {{ rule.net_num }}/{{ rule.mask }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml new file mode 100644 index 00000000..3e8250fd --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml @@ -0,0 +1,20 @@ +--- +# vars file for dellemc.os10.os10_prefix_list, +# below gives a sample configuration +# Sample variables for OS10 device +os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml new file mode 100644 index 00000000..46fb739a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_prefix_list diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml new file mode 100644 index 00000000..bcff7f3f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_prefix_list diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/LICENSE b/ansible_collections/dellemc/os10/roles/os10_qos/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/README.md b/ansible_collections/dellemc/os10/roles/os10_qos/README.md new file mode 100644 index 00000000..58415970 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/README.md @@ -0,0 +1,90 @@ +QoS role +======== + +This role facilitates the configuration quality of service (QoS) attributes like policy-map and class-map. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The QoS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_qos keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``policy_map`` | list | Configures the policy-map (see ``policy_map.*``) | os10 | +| ``policy_map.name`` | string (required) | Configures the policy-map name | os10 | +| ``policy_map.type`` | string: qos\*, application, control-plane, network-qos, queuing in os10 | Configures the policy-map type | os10 | +| ``policy_map.state`` | string: present\*,absent | Deletes the policy-map if set to absent | os10 | +| ``class_map`` | list | Configures the class-map (see ``class_map.*``) | os10 | +| ``class_map.name`` | string (required) | Configures the class-map name | os10 | +| ``class_map.type`` | string: qos\*,application,control-plane,network-qos,queuing | Configures the class-map type | os10 | +| ``class_map.state`` | string: present\*,absent | Deletes the class-map if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_qos* role to configure the policy-map class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_qos* role. By including the role, you automatically get access to all of the tasks to configure QoS features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_qos: + policy_map: + - name: testpolicy + type: qos + state: present + class_map: + - name: testclass + type: application + state: present + +**Simple playbook to setup qos — leaf.yaml** + + - hosts: leaf1 + roles: + - Dell-Networking.os10.os10_qos + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml new file mode 100644 index 00000000..447b4329 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# defaults file for os10_qos +match_type: + match_all: match-all + match_any: match-any diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml new file mode 100644 index 00000000..43fd82c7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for os10_qos diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml new file mode 100644 index 00000000..0b07e1d9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_qos role facilitates the configuration of qos attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml new file mode 100644 index 00000000..6921f69a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for Dellos10 + - name: "Provisioning Qos configuration for os10" + os10_config: + src: os10_qos.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output + + - name: "Generating Qos configuration for os10" + template: + src: os10_qos.j2 + dest: "{{ build_dir }}/qos10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j2 b/ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j2 new file mode 100644 index 00000000..a94c5415 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j2 @@ -0,0 +1,48 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{##################################################### +Purpose: +Configure qos commands for os10 Devices. +os10_qos: + policy_map: + - name: testpolicy + type: qos + state: present + class_map: + - name: testclass + type: application + state: present +#####################################################} +{% if os10_qos is defined and os10_qos %} +{% for key in os10_qos.keys() %} + {% if key =="policy_map" %} + {% for vars in os10_qos[key] %} + {% if vars.name is defined and vars.name %} + {% if vars.state is defined and vars.state == "absent" %} +no policy-map {{ vars.name }} + {% else %} + {% if vars.type is defined and vars.type %} +policy-map type {{ vars.type }} {{ vars.name }} + {% else %} +policy-map type qos {{ vars.name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% elif key =="class_map" %} + {% for vars in os10_qos[key] %} + {% if vars.name is defined and vars.name %} + {% if vars.state is defined and vars.state == "absent" %} +no class-map {{ vars.name }} + {% else %} + {% if vars.type is defined and vars.type %} +class-map type {{ vars.type }} {{ vars.name }} + {% else %} +class-map type qos {{ vars.name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory new file mode 100644 index 00000000..85a255f9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory @@ -0,0 +1,2 @@ +--- +localhost diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml new file mode 100644 index 00000000..191d94ce --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml @@ -0,0 +1,11 @@ +--- +# Sample variables for OS10 device +os10_qos: + policy_map: + - name: testpolicy + type: qos + state: present + class_map: + - name: testclas + type: qos + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml new file mode 100644 index 00000000..4107ee81 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - os10_qos diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml new file mode 100644 index 00000000..ecee178e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for os10_qos diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE b/ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/README.md b/ansible_collections/dellemc/os10/roles/os10_raguard/README.md new file mode 100644 index 00000000..abf7cf4a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/README.md @@ -0,0 +1,126 @@ +IPv6 RA uard role +=================== + +This role facilitates the configuration of IPv6 RA Guard attributes. It specifically enables configuration of IPv6 RA Guard feature enable/disable, IPv6 RA Guard policy definition and policy parameter configuration, and attachment of IPv6 RA Guard policy to an interface. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The IPv6 RA Guard role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_raguard keys** + + +| Key | Type | Description |Support | +|--------------------------------------|-------------------------|----------------------------------------------------------|---------| +| ``enable`` | boolean | Enables IPv6 RA-Guard feature | os10 | +| ``policy`` | list | Configures the IPv6 RA Guard policy (see ``policy.*``) | os10 | +| ``policy.state`` | string: absent/present\*| Deletes the policy if set to absent | os10 | +| ``policy.name`` | string (required) | Configures the IPv6 RA Guard policy name | os10 | +| ``policy.device_role.value`` | string (required) | Configures the device role for a policy | os10 | +| ``policy.device_role.state`` | string: absent,present\*| Deletes the device role if set to absent | os10 | +| ``policy.managed_config_flag.value`` | string | Configures the managed config flag param for a policy | os10 | +| ``policy.managed_config_flag.state`` | string: absent,present\*| Deletes the managed config flag if set to absent | os10 | +| ``policy.other_config_flag.value`` | string | Configures the other config flag param for a policy | os10 | +| ``policy.other_config_flag.state`` | string: absent,present\*| Deletes the other config flag if set to absent | os10 | +| ``policy.mtu.value`` | integer | Configures the MTU param for a policy | os10 | +| ``policy.mtu.state`` | string: absent,present\*| Deletes the MTU if set to absent | os10 | +| ``policy.reachable_time.value`` | integer | Configures the reachable time param for a policy | os10 | +| ``policy.reachable_time.state`` | string: absent,present\*| Deletes the reachable time if set to absent | os10 | +| ``policy.retrans_timer.value`` | integer | Configures the retransmit timer param for a policy | os10 | +| ``policy.retrans_timer.state`` | string: absent,present\*| Deletes the retransmit timer if set to absent | os10 | +| ``policy.router_lifetime.value`` | integer | Configures the router lifetime param for a policy | os10 | +| ``policy.router_lifetime.state`` | string: absent,present\*| Deletes the router lifetime if set to absent | os10 | +| ``policy.router_preference.value`` | string | Configures the router preference param for a policy | os10 | +| ``policy.router_preference.state`` | string: absent,present\*| Deletes the router preference if set to absent | os10 | +| ``policy.match`` | list | Configures the prefix/ACL/MAC list param for a policy | os10 | +| ``policy.match.type`` | string | Configures the prefix/ACL/MAC type for a policy | os10 | +| ``policy.match.name`` | string | Configures the prefix/ACL/MAC name for a policy | os10 | +| ``policy.match.state`` | string: absent,present\*| Deletes the prefix/ACL/MAC if set to absent | os10 | +| ``intf`` | dictionary | Configures IPv6 RA Guard on the interface (see``intf.*``) | os10 | +| ``intf.`` | dictionary | Configures RA Guard on the interface (see``.*``)| os10 | +| ``.policy_name`` | String | Configures RA Guard policy name to be attached on an interface | os10 | +| ``.vlan`` | String | Configures VLAN name to which policy to be attached on an interface| os10| +| ``.state`` | String: absent,present\*| Deletes the policy if set to absent an interface | os10| + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_raguard* role to configure the IPv6 RA Guard feature enable/disable, IPv6 RA Guard Policy defination and policy parameter configuration, Attachment of IPv6 RA Guard policy to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_raguard* role. By including the role, you automatically get access to all of the tasks to configure IPv6 RA Guard attributes. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + host: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_raguard: + enable: true + policy: + - policy_name: test + device_role: + value: router + state: present + managed_config_flag: + value: "on" + state: present + mtu: + value: 1280 + state: present + match: + - type: prefix_list + name: test_prefix + state: present + state: present + intf: + ethernet 1/1/2: + policy_name: test + vlan: 10 + state: present + +**Simple playbook to setup IPv6 RA Guard — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_raguard + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml new file mode 100644 index 00000000..57e6cf6b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_raguard diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml new file mode 100644 index 00000000..5b69a797 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_raguard diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml new file mode 100644 index 00000000..1093615d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_raguard role facilitates the configuration of IPv6 RA Guard attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml new file mode 100644 index 00000000..f2ccf55a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for dellemc.os10.os10_raguard +# tasks file for os10 + - name: "Generating IPv6 RA Guard configuration for os10" + template: + src: os10_raguard.j2 + dest: "{{ build_dir }}/raguard10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning IPv6 RA Guard configuration for os10" + os10_config: + src: os10_raguard.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2 b/ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2 new file mode 100644 index 00000000..7abc27d0 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2 @@ -0,0 +1,174 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure IPv6 RA Guard commands for OS10 Devices +os10_raguard: + enable: true + policy: + - policy_name: test + device_role: + value: router + state: present + managed_config_flag: + value: "on" + state: present + other_config_flag: + value: "on" + state: present + mtu: + value: 1280 + state: present + reachable_time: + value: 100 + state: present + retrans_timer: + value: 100 + state: present + router_lifetime: + value: 10 + state: present + router_preference: + value: high + state: present + match: + - type: prefix_list + name: test_prefix + state: present + - type: access_list + name: test_access + state: present + - type: mac_list + name: test_mac + state: present + state: present + intf: + ethernet 1/1/2: + policy_name: test + vlan: 10 + state: present + ethernet 1/1/3: + policy_name: test + vlan: all + state: present + ethernet 1/1/4: + policy_name: test + vlan: 10,11,12,15 + state: present +###############################################} +{% if os10_raguard is defined and os10_raguard %} + {% if os10_raguard.enable is defined %} + {% if os10_raguard.enable %} +ipv6 nd ra-guard enable + {% else %} +no ipv6 nd ra-guard enable + {% endif %} + {% endif %} + {% if os10_raguard.policy is defined and os10_raguard.policy %} + {% for item in os10_raguard.policy %} + {% if item.policy_name is defined and item.policy_name %} + {% if item.state is defined and item.state == "absent" %} +no ipv6 nd ra-guard policy {{ item.policy_name }} + {% else %} +ipv6 nd ra-guard policy {{ item.policy_name }} + {% if item.device_role is defined and item.device_role %} + {% if item.device_role.state is defined and item.device_role.state == "absent" %} +no device-role {{ item.device_role }} + {% else %} +device-role {{ item.device_role.value }} + {% endif %} + {% endif %} + {% if item.device_role is defined and item.device_role.value == "router" %} + {% if item.managed_config_flag is defined and item.managed_config_flag %} + {% if item.managed_config_flag.state is defined and item.managed_config_flag.state == "absent" %} +no managed-config-flag {{ item.managed_config_flag.value }} + {% else %} +managed-config-flag {{ item.managed_config_flag.value }} + {% endif %} + {% endif %} + {% if item.other_config_flag is defined and item.other_config_flag %} + {% if item.other_config_flag.state is defined and item.other_config_flag.state == "absent" %} +no other-config-flag {{ item.other_config_flag.value }} + {% else %} +other-config-flag {{ item.other_config_flag.value }} + {% endif %} + {% endif %} + {% if item.mtu is defined and item.mtu %} + {% if item.mtu.state is defined and item.mtu.state == "absent" %} +no mtu {{ item.mtu.value }} + {% else %} +mtu {{ item.mtu.value }} + {% endif %} + {% endif %} + {% if item.reachable_time is defined and item.reachable_time %} + {% if item.reachable_time.state is defined and item.reachable_time.state == "absent" %} +no reachable-time {{ item.reachable_time.value }} + {% else %} +reachable-time {{ item.reachable_time.value }} + {% endif %} + {% endif %} + {% if item.retrans_timer is defined and item.retrans_timer %} + {% if item.retrans_timer.state is defined and item.retrans_timer.state == "absent" %} +no retrans-timer {{ item.retrans_timer.value }} + {% else %} +retrans-timer {{ item.retrans_timer.value }} + {% endif %} + {% endif %} + {% if item.router_lifetime is defined and item.router_lifetime %} + {% if item.router_lifetime.state is defined and item.router_lifetime.state == "absent" %} +no router-lifetime {{ item.router_lifetime.value }} + {% else %} +router-lifetime {{ item.router_lifetime.value }} + {% endif %} + {% endif %} + {% if item.router_preference is defined and item.router_preference %} + {% if item.router_preference.state is defined and item.router_preference.state == "absent" %} +no router-preference maximum + {% else %} +router-preference maximum {{ item.router_preference.value }} + {% endif %} + {% endif %} + {% if item.match is defined and item.match %} + {% for item1 in item.match %} + {% if item1.type is defined %} + {% if item1.type == "prefix_list" %} + {% if item1.state is defined and item1.state == "absent" %} +no match ra ipv6-prefix-list {{ item1.name }} + {% else %} +match ra ipv6-prefix-list {{ item1.name }} + {% endif %} + {% endif %} + {% if item1.type == "access_list" %} + {% if item1.state is defined and item1.state == "absent" %} +no match ra ipv6-access-list {{ item1.name }} + {% else %} +match ra ipv6-access-list {{ item1.name }} + {% endif %} + {% endif %} + {% if item1.type == "mac_list" %} + {% if item1.state is defined and item1.state == "absent" %} +no match ra mac-access-list {{ item1.name }} + {% else %} +match ra mac-access-list {{ item1.name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if os10_raguard.intf is defined and os10_raguard.intf %} + {% for key in os10_raguard.intf.keys() %} +interface {{ key }} + {% if os10_raguard.intf[key].policy_name is defined and os10_raguard.intf[key].policy_name %} + {% if os10_raguard.intf[key].state is defined and os10_raguard.intf[key].state == "absent" %} +no ipv6 nd ra-guard attach-policy {{ os10_raguard.intf[key].policy_name }} vlan {{ os10_raguard.intf[key].vlan }} + {% else %} +ipv6 nd ra-guard attach-policy {{ os10_raguard.intf[key].policy_name }} vlan {{ os10_raguard.intf[key].vlan }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml new file mode 100644 index 00000000..85a255f9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml @@ -0,0 +1,2 @@ +--- +localhost diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml new file mode 100644 index 00000000..3d1548c7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml @@ -0,0 +1,56 @@ +--- +# vars file for dellemc.os10.os10_raguard, +# below gives a sample configuration +# Sample variables for OS10 device +os10_raguard: + enable: true + policy: + - policy_name: test + device_role: + value: router + state: present + managed_config_flag: + value: "on" + state: present + other_config_flag: + value: "on" + state: present + mtu: + value: 1280 + state: present + reachable_time: + value: 100 + state: present + retrans_timer: + value: 100 + state: present + router_lifetime: + value: 10 + state: present + router_preference: + value: high + state: present + match: + - type: prefix_list + name: test_prefix + state: present + - type: access_list + name: test_access + state: present + - type: mac_list + name: test_mac + state: present + state: present + intf: + ethernet 1/1/2: + policy_name: test + vlan: 10 + state: present + ethernet 1/1/3: + policy_name: test + vlan: all + state: present + ethernet 1/1/4: + policy_name: test + vlan: 10,11,12,15 + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml new file mode 100644 index 00000000..7ae62a2c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os10.os10_raguard diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml new file mode 100644 index 00000000..172b49cf --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_raguard diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE b/ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/README.md b/ansible_collections/dellemc/os10/roles/os10_route_map/README.md new file mode 100644 index 00000000..1160ca48 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/README.md @@ -0,0 +1,190 @@ +Route-map role +============== + +This role facilitates the configuration of route-map attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The route-map role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_route_map keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``route_map`` | list | Configures the route-map (see ``route_map.*``) | os10 | +| ``route_map.name`` | string (required) | Configures the route-map name | os10 | +| ``route_map.permit`` | boolean | Configures permit/deny set operations | os10 | +| ``route_map.seq_num`` | integer | Configures the sequence number | os10 | +| ``route_map.continue`` | integer | Configures the next sequence number | os10 | +| ``route_map.set`` | dictionary | Configures route-map to set values in the destination routing protocol (see ``set.*``) | os10 | +| ``set.local_pref`` | integer | Configures the BGP local preference path attribute | os10 | +| ``set.metric`` | string | Configures a specific value to add or subtract from the existing metric value ("+ ", "- ", format) | os10 | +| ``set.metric_type`` | string: internal,type-1,type-2 | Configures the metric type for the destination routing protocol | os10 | +| ``set.origin`` | string: igp,egp,incomplete | Configures the BGP origin attribute | os10 | +| ``set.weight`` | integer | Configures the weight for the BGP route | os10 | +| ``set.comm_list`` | dictionary | Configures the BGP community list (see ``comm_list.*``) | os10 | +| ``comm_list.add`` | string | Adds the community attribute of a BGP update | os10 | +| ``comm_list.delete`` | string | Deletes a community attribute of a BGP update | os10 | +| ``set.community`` | string | Configures the community attribute for a BGP route update | os10 | +| ``set.extcomm_list`` | dictionary | Configures the BGP extcommunity list (see ``extcomm_list.*``) | os10 | +| ``extcomm_list.add`` | string | Adds an extended community attribute of a BGP update | os10 | +| ``extcomm_list.delete`` | string | Deletes the extended community attribute of a BGP update | os10 | +| ``set.extcommunity`` | string | Configures the extended community attribute for a BGP route update | os10 | +| ``set.next_hop`` | list | Configures the next-hop address (see ``next_hop.*``) | os10 | +| ``next_hop.type`` | string: ip,ipv6 | Configures the type of the next-hop address | os10 | +| ``next_hop.address`` | string | Configures the next-hop address | os10 | +| ``next_hop.track_id`` | integer | Configures the object track ID | os10 | +| ``next_hop.state`` | string: present\*,absent | Deletes the next-hop address if set to absent | os10 | +| ``route_map.match`` | list | Configures the route-map to match values from the route table (see ``match.*``) | os10 | +| ``match.ip_type`` | string (required): ipv4,ipv6 | Configures the IPv4/IPv6 address to match | os10 | +| ``match.access_group`` | string | Configures the access-group or list to match | os10 | +| ``match.source_protocol_ebgp`` | string | Configures the source protocol to eBGP to match | os10 | +| ``match.source_protocol_ibgp`` | string | Configures the source protocol to iBGP to match | os10 | +| ``match.source_protocol_evpn`` | string | Configures the source protocol to EVPN to match | os10 | +| ``match.source_protocol_static`` | string | Configures the source protocol to static to match | os10 | +| ``match.source_protocol_connected`` | string | Configures the source protocol to connected to match | os10 | +| ``match.source_protocol_ospf`` | string | Configures the source protocol to OSPF to match | os10 | +| ``match.prefix_list`` | string | Configures the IP prefix-list to match against | os10 | +| ``route_map.state`` | string, choices: present\*,absent | Deletes the route-map if set to absent | os10 | +| ``as_path`` | list | Configures the BGP AS path filter (see ``as_path.*``) | os10 | +| ``as_path.access_list`` | string (required) | Configures the access-list name | os10 | +| ``as_path.permit`` | boolean (required) | Configures an AS path to accept or reject | os10 | +| ``as_path.regex``| string (required) | Configures a regular expression | os10 | +| ``as_path.state`` | string: absent,present\* | Deletes the BGP as path filter if set to absent | os10 | +| ``community_list`` | list | Configures a community list entry (see ``community_list.*``) | os10 | +| ``community_list.type`` | string (required): standard,expanded | Configures the type of community-list entry | os10 | +| ``community_list.name`` | string (required) | Configures the name of community-list entry | os10 | +| ``community_list.permit`` | boolean(required) | Configures the community to accept or reject | os10 | +| ``community_list.regex`` | string (required) | Configures the regular expression for extended community list; mutually exclusive with *community_list.community* | os10 | +| ``community_list.community`` | string (required) | Configures a well-known community or community number for standard community list; mutually exclusive with *community_list.regex* | os10 | +| ``community_list.state`` | string: absent,present\* | Deletes the community list entry if set to absent | os10 | +| ``extcommunity_list`` | list | Configures extcommunity-list entry (see ``extcommunity_list.*``) | os10 | +| ``extcommunity_list.type`` | string (required): standard,expanded | Configures the type of extcommunity-list entry | os10 | +| ``extcommunity_list.name`` | string (required) | Configures the name of extcommunity-list entry | os10 | +| ``extcommunity_list.permit`` | boolean(required) | Configures the extcommunity to accept or reject | os10 | +| ``extcommunity_list.regex`` | string (required) | Configures the regular expression for the extended extcommunity list; mutually exclusive with *extcommunity_list.community* | os10 | +| ``extcommunity_list.community`` | string (required) | Configures the extended community for standard community-list; mutually exclusive with *extcommunity_list.regex* | os10 | +| ``extcommunity_list.state`` | string: absent,present\* | Deletes the extcommunity-list entry if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_route_map* role for the route-map, policy-map, and class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_route_map* role. By including the role, you automatically get access to all of the tasks to configure route-map features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_route_map: + as_path: + - access_list: aa + permit: true + regex: www + state: present + community_list: + - type: expanded + name: qq + permit: true + regex: aaa + state: present + - type: standard + name: qqq + permit: false + community: internet + state: present + extcommunity_list: + - type: expanded + name: qq + permit: true + regex: aaa + state: present + - type: standard + name: qqq + permit: false + community: "rt 22:33" + state: present + route_map: + - name: test + permit: true + seq_num: 1 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + source_protocol_ebgp: present + source_protocol_ibgp: present + source_protocol_evpn: present + source_protocol_static: present + source_protocol_ospf: present + source_protocol_connected: present + set: + local_pref: 1200 + metric_type: internal + metric: + 30 + origin: igp + weight: 50 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: present + community: internet + comm_list: + add: qq + delete: qqq + extcommunity: "22:33" + extcomm_list: + add: aa + delete: aa + state: present + +**Simple playbook to setup QoS —leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_route_map + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml new file mode 100644 index 00000000..a78d55b8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_route_map diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml new file mode 100644 index 00000000..ef0b4531 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_route_map diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml new file mode 100644 index 00000000..b73ddcef --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_route_map role facilitates the configuration of route map attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - os10 + - dellemc diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml new file mode 100644 index 00000000..62c94bdd --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Provisioning route-map configuration for os10" + os10_config: + src: os10_route_map.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output + + - name: "Generating route map configuration for os10" + template: + src: os10_route_map.j2 + dest: "{{ build_dir }}/routemap10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2 b/ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2 new file mode 100644 index 00000000..bfca3d17 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2 @@ -0,0 +1,348 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{##################################################### +Purpose: +Configure route-map commands for os10 Devices. +os10_route_map: + as_path: + - access_list: aa + permit: true + regex: www + state: present + community_list: + - type: standard + name: qqq + permit: false + community: internet + state: present + extcommunity_list: + - type: standard + name: qqq + permit: false + community: "rt 22:33" + state: present + route_map: + - name: test + permit: true + seq_num: 1 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + source_protocol_ebgp: "present" + source_protocol_ibgp: "present" + source_protocol_evpn: "present" + source_protocol_ospf: "present" + source_protocol_static: "present" + source_protocol_connected: "present" + set: + local_pref: 1200 + metric_type: internal + metric: + 30 + origin: igp + weight: 50 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: present + community: internet + comm_list: + add: qq + delete: qqq + extcommunity: "22:33" + extcomm_list: + add: aa + delete: aa + state: present +#####################################################} +{% if os10_route_map is defined and os10_route_map %} +{% for key in os10_route_map.keys() %} + {% if key == "route_map" %} + {% for vars in os10_route_map[key] %} + {% if vars.name is defined and vars.name %} + {% if vars.state is defined and vars.state == "absent" %} +no route-map {{ vars.name }} + {% else %} + {% if vars.permit is defined and vars.permit %} + {% if vars.seq_num is defined and vars.seq_num %} +route-map {{ vars.name }} permit {{ vars.seq_num }} + {% else %} +route-map {{ vars.name }} permit 10 + {% endif %} + {% elif vars.permit is defined and not vars.permit %} + {% if vars.seq_num is defined and vars.seq_num %} +route-map {{ vars.name }} deny {{ vars.seq_num }} + {% else %} +route-map {{ vars.name }} deny 10 + {% endif %} + {% else %} + {% if vars.seq_num is defined and vars.seq_num %} +route-map {{ vars.name }} permit {{ vars.seq_num }} + {% else %} +route-map {{ vars.name }} permit 10 + {% endif %} + {% endif %} + {% if vars.set is defined and vars.set %} + {% if vars.set.local_pref is defined %} + {% if vars.set.local_pref %} + set local-preference {{ vars.set.local_pref }} + {% else %} + no set local-preference + {% endif %} + {% endif %} + {% if vars.set.metric_type is defined %} + {% if vars.set.metric_type %} + set metric-type {{ vars.set.metric_type }} + {% else %} + no set metric-type internal + {% endif %} + {% endif %} + {% if vars.set.metric is defined %} + {% if vars.set.metric %} + set metric {{ vars.set.metric }} + {% else %} + no set metric + {% endif %} + {% endif %} + {% if vars.set.origin is defined %} + {% if vars.set.origin %} + set origin {{ vars.set.origin }} + {% else %} + no set origin + {% endif %} + {% endif %} + {% if vars.set.community is defined %} + {% if vars.set.community %} + set community {{ vars.set.community }} + {% else %} + no set community internet + {% endif %} + {% endif %} + {% if vars.set.extcommunity is defined %} + {% if vars.set.extcommunity %} + set extcommunity rt {{ vars.set.extcommunity }} + {% else %} + no set extcommunity rt 11:33 + {% endif %} + {% endif %} + {% if vars.set.weight is defined %} + {% if vars.set.weight %} + set weight {{ vars.set.weight }} + {% else %} + no set weight + {% endif %} + {% endif %} + {% if vars.set.comm_list is defined and vars.set.comm_list %} + {% if vars.set.comm_list.add is defined and vars.set.comm_list.add %} + set comm-list {{ vars.set.comm_list.add }} add + {% else %} + no set comm-list aa add + {% endif %} + {% if vars.set.comm_list.delete is defined and vars.set.comm_list.delete %} + set comm-list {{ vars.set.comm_list.delete }} delete + {% else %} + no set comm-list aa delete + {% endif %} + {% endif %} + {% if vars.set.extcomm_list is defined and vars.set.extcomm_list %} + {% if vars.set.extcomm_list.add is defined and vars.set.extcomm_list.add %} + set extcomm-list {{ vars.set.extcomm_list.add }} add + {% else %} + no set extcomm-list aa add + {% endif %} + {% if vars.set.extcomm_list.delete is defined and vars.set.extcomm_list.delete %} + set extcomm-list {{ vars.set.extcomm_list.delete }} delete + {% else %} + no set extcomm-list aa delete + {% endif %} + {% endif %} + {% if vars.set.next_hop is defined and vars.set.next_hop %} + {% for item in vars.set.next_hop %} + {% if item.type is defined and item.type %} + {% if item.address is defined and item.address %} + {% if item.state is defined and item.state=="absent" %} + no set {{ item.type }} next-hop {{ item.address }} + {% else %} + {% if item.track_id is defined and item.track_id %} + set {{ item.type }} next-hop {{ item.address }} track-id {{ item.track_id }} + {% else %} + set {{ item.type }} next-hop {{ item.address }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if vars.continue is defined %} + {% if vars.continue %} + continue {{ vars.continue }} + {% else %} + no continue + {% endif %} + {% endif %} + {% if vars.match is defined and vars.match %} + {% for match in vars.match %} + {% if match.ip_type is defined and match.ip_type %} + {% if match.ip_type =="ipv4" %} + {% set ip = "ip" %} + {% else %} + {% set ip = "ipv6" %} + {% endif %} + {% if match.access_group is defined %} + {% if match.access_group %} + match {{ ip }} address {{ match.access_group }} + {% else %} + no match {{ ip }} address a + {% endif %} + {% endif %} + {% if match.prefix_list is defined %} + {% if match.prefix_list %} + match {{ ip }} address prefix-list {{ match.prefix_list }} + {% else %} + no match {{ ip }} address prefix-list a + {% endif %} + {% endif %} + {% endif %} + {% if match.source_protocol_ebgp is defined and match.source_protocol_ebgp %} + {% if match.source_protocol_ebgp == "present" %} + match source-protocol bgp ebgp + {% endif %} + {% if match.source_protocol_ebgp == "absent" %} + no match source-protocol bgp ebgp + {% endif %} + {% endif %} + {% if match.source_protocol_ibgp is defined and match.source_protocol_ibgp %} + {% if match.source_protocol_ibgp == "present" %} + match source-protocol bgp ibgp + {% endif %} + {% if match.source_protocol_ibgp == "absent" %} + no match source-protocol bgp ibgp + {% endif %} + {% endif %} + {% if match.source_protocol_evpn is defined and match.source_protocol_evpn %} + {% if match.source_protocol_evpn == "present" %} + match source-protocol bgp evpn + {% endif %} + {% if match.source_protocol_evpn == "absent" %} + no match source-protocol bgp evpn + {% endif %} + {% endif %} + {% if match.source_protocol_ospf is defined and match.source_protocol_ospf %} + {% if match.source_protocol_ospf == "present" %} + match source-protocol ospf + {% endif %} + {% if match.source_protocol_ospf == "absent" %} + no match source-protocol ospf + {% endif %} + {% endif %} + {% if match.source_protocol_static is defined and match.source_protocol_static %} + {% if match.source_protocol_static == "present" %} + match source-protocol static + {% endif %} + {% if match.source_protocol_static == "absent" %} + no match source-protocol static + {% endif %} + {% endif %} + {% if match.source_protocol_connected is defined and match.source_protocol_connected %} + {% if match.source_protocol_connected == "present" %} + match source-protocol connected + {% endif %} + {% if match.source_protocol_connected == "absent" %} + no match source-protocol connected + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% elif key == "as_path" %} + {% for item in os10_route_map[key] %} + {% if item.access_list is defined and item.access_list %} + {% if item.permit is defined %} + {% if item.permit %} + {% set filter = "permit" %} + {% else %} + {% set filter = "deny" %} + {% endif %} + {% if item.regex is defined and item.regex %} + {% if item.state is defined and item.state == "absent" %} +no ip as-path access-list {{ item.access_list }} {{ filter }} {{ item.regex }} + {% else %} +ip as-path access-list {{ item.access_list }} {{ filter }} {{ item.regex }} + {% endif %} + {% endif %} + {% else %} + {% if item.state is defined and item.state == "absent" %} +no ip as-path access-list {{ item.access_list }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% elif key == "community_list" %} + {% for item in os10_route_map[key] %} + {% if item.type is defined and item.type %} + {% if item.name is defined and item.name %} + {% if item.permit is defined %} + {% if item.permit %} + {% set filter = "permit" %} + {% else %} + {% set filter = "deny" %} + {% endif %} + {% if item.regex is defined and item.regex %} + {% if item.state is defined and item.state == "absent" %} +no ip community-list standard {{ item.name }} {{ filter }} {{ item.regex }} + {% else %} +ip community-list standard {{ item.name }} {{ filter }} {{ item.regex }} + {% endif %} + {% elif item.community is defined and item.community %} + {% if item.state is defined and item.state == "absent" %} +no ip community-list standard {{ item.name }} {{ filter }} {{ item.community }} + {% else %} +ip community-list {{ item.type }} {{ item.name }} {{ filter }} {{ item.community }} + {% endif %} + {% endif %} + {% else %} + {% if item.state is defined and item.state == "absent" %} +no ip community-list standard {{ item.name }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% elif key == "extcommunity_list" %} + {% for item in os10_route_map[key] %} + {% if item.type is defined and item.type %} + {% if item.name is defined and item.name %} + {% if item.permit is defined %} + {% if item.permit %} + {% set filter = "permit" %} + {% else %} + {% set filter = "deny" %} + {% endif %} + {% if item.regex is defined and item.regex %} + {% if item.state is defined and item.state == "absent" %} +no ip extcommunity-list standard {{ item.name }} {{ filter }} {{ item.regex }} + {% else %} +ip extcommunity-list standard {{ item.name }} {{ filter }} {{ item.regex }} + {% endif %} + {% elif item.community is defined and item.community %} + {% if item.state is defined and item.state == "absent" %} +no ip extcommunity-list standard {{ item.name }} {{ filter }} {{ item.community }} + {% else %} +ip extcommunity-list {{ item.type }} {{ item.name }} {{ filter }} {{ item.community }} + {% endif %} + {% endif %} + {% else %} + {% if item.state is defined and item.state == "absent" %} +no ip extcommunity-list standard {{ item.name }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory new file mode 100644 index 00000000..85a255f9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory @@ -0,0 +1,2 @@ +--- +localhost diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml new file mode 100644 index 00000000..e791b295 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml @@ -0,0 +1,55 @@ +--- +# Sample variables for OS10 device +os10_route_map: + as_path: + - access_list: aa + permit: true + regex: www + state: present + community_list: + - type: standard + name: qqq + permit: false + community: internet + state: present + extcommunity_list: + - type: standard + name: qqq + permit: false + community: "rt 22:33" + state: present + route_map: + - name: test + permit: true + seq_num: 1 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + source_protocol_ebgp: present + source_protocol_ibgp: present + source_protocol_evpn: present + source_protocol_static: present + source_protocol_ospf: present + source_protocol_connected: present + set: + local_pref: 1200 + metric_type: internal + metric: + 30 + origin: igp + weight: 50 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: present + community: internet + comm_list: + add: qq + delete: qqq + extcommunity: "22:33" + extcomm_list: + add: aa + delete: aa + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml new file mode 100644 index 00000000..4302a12d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os10.os10_route_map diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml new file mode 100644 index 00000000..ff4a48ac --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_route_map diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/README.md b/ansible_collections/dellemc/os10/roles/os10_snmp/README.md new file mode 100644 index 00000000..a875a234 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/README.md @@ -0,0 +1,269 @@ +SNMP role +========= + +This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The SNMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_snmp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``snmp_contact`` | string | Configures SNMP contact information | os10 | +| ``snmp_location`` | string | Configures SNMP location information | os10 | +| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os10 | +| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os10 | +| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os10 | +| ``snmp_community.access_list`` | dictionary | Configures ACL for the community (see ``snmp_community.access_list.*``) | os10 | +| ``snmp_community.access_list.name`` | string | Specifies the name of the ACL for the community | os10 | +| ``snmp_community.access_list.state`` | string: absent,present\* | Deletes the ACL from the community if set to absent | os10 | +| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os10 | +| ``snmp_engine_id`` | string | Configures SNMP local EngineID | os10 | +| ``snmp_remote_engine_id`` | list | Configures SNMP remote engine information (see ``snmp_remote_engine_id.*``) | os10 | +| ``snmp_remote_engine_id.ip`` | string | Configures the IP address of the SNMP remote engine | os10 | +| ``snmp_remote_engine_id.engine_id`` | string | Configures the EngineID of the SNMP remote engine | os10 | +| ``snmp_remote_engine_id.udpport`` | string | Configures the UDP port of the SNMP remote engine | os10 | +| ``snmp_remote_engine_id.state`` | string: absent,present\* | Deletes the SNMP remote engine information if set to absent | os10 | +| ``snmp_group`` | list | Configures the SNMP group information (see ``snmp_group.*``) | os10 | +| ``snmp_group.name`` | string | Configures the name of the SNMP group | os10 | +| ``snmp_group.version`` | string: 1,2c,3 | Configures the version of the SNMP group | os10 | +| ``snmp_group.security_level`` | string: auth,noauth,priv | Configures the security level of SNMP group for version 3 | os10 | +| ``snmp_group.access_list`` | dictionary | Configures the access list of the SNMP group (see ``snmp_group.access_list.*``)| os10 | +| ``snmp_group.access_list.name`` | string | Specifies the name of the access list for the SNMP group wtih version 1 or 2c | os10 | +| ``snmp_group.access_list.state`` | string: absent,present\* | Deletes the access list from the SNMP group if set to absent | os10 | +| ``snmp_group.read_view`` | dictionary | Configures the read view of the SNMP group (see ``snmp_group.read_view.*``) | os10 | +| ``snmp_group.read_view.name`` | string | Specifies the name of the read view for the SNMP group | os10 | +| ``snmp_group.read_view.state`` | string: absent,present\* | Deletes the read view from the SNMP group if set to absent | os10 | +| ``snmp_group.write_view`` | dictionary | Configures the write view of the SNMP group (see ``snmp_group.write_view``) | os10 | +| ``snmp_group.write_view.name`` | string | Specifies the name of the write view for the SNMP group | os10 | +| ``snmp_group.write_view.state`` | string: absent,present\* | Deletes the write view from the SNMP group if set to absent | os10 | +| ``snmp_group.notify_view`` | dictionary | Configures the notify view of the SNMP group (see ``snmp_group.notify_view.*``) | os10 | +| ``snmp_group.notify_view.name`` | string | Specifies the name of the notify view for the SNMP group | os10 | +| ``snmp_group.notify_view.state`` | string: absent,present\* | Deletes the notify view from the SNMP group if set to absent | os10 | +| ``snmp_group.state`` | string: absent,present\* | Deletes the SNMP group if set to absent | os10 | +| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os10 | +| ``snmp_host.ip`` | string | Configures the IP address of the SNMP trap host | os10 | +| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host for version 1 or 2c | os10 | +| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os10 | +| ``snmp_host.version`` | string: 1,2c,3 (required) | Specifies the SNMP version of the host (1 or 2c or 3 in os10) | os10 | +| ``snmp_host.security_level`` | string: auth,noauth,priv | Configures the security level of the SNMP host for version 3 | os10 | +| ``snmp_host.security_name`` | string | Configures the security name of the SNMP host for version 3 | os10 | +| ``snmp_host.notification_type`` | string: traps,informs | Configures the notification type of the SNMP host | os10 | +| ``snmp_host.trap_categories`` | dictionary | Enables or disables different trap categories for the SNMP host (see ``snmp_host.trap_categories.*``) | os10 | +| ``snmp_host.trap_categories.dom`` | boolean: true,false | Enables or disables dom category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.entity`` | boolean: true,false | Enables or disables entity category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.envmon`` | boolean: true,false | Enables or disables envmon category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.lldp`` | boolean: true,false | | Enables or disables lldp category traps for the SNMP host | os10 | +| ``snmp_host.trap_categories.snmp`` | boolean: true,false | | Enables or disables snmp category traps for the SNMP host | os10 | +| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os10 | +| ``snmp_source_interface`` | string | Configures the source interface for SNMP | os10 | +| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os10 | +| ``snmp_traps.name`` | string | Enables SNMP traps | os10 | +| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os10 | +| ``snmp_user`` | list | Configures the SNMP user information (see ``snmp_user.*``) | os10 | +| ``snmp_user.name`` | string | Specifies the name of the SNMP user | os10 | +| ``snmp_user.group_name`` | string | Specifies the group of the SNMP user | os10 | +| ``snmp_user.version `` | string: 1,2c,3 | Configures the version for the SNMP user | os10 | +| ``snmp_user.access_list`` | string | Configures the access list for the SNMP user with version 1 or 2c | os10 | +| ``snmp_user.authentication`` | dictionary | Configures the authentication information for the SNMP user with version 3 (see ``snmp_user.authentication.*``) | os10 | +| ``snmp_user.authentication.localized`` | boolean: true,false | Configures the password to be in localized key format or not | os10 | +| ``snmp_user.authentication.algorithm`` | string: md5, sha | Configures the authentication algorithm for the SNMP user | os10 | +| ``snmp_user.authentication.password`` | string | Configures the authentication password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 | +| ``snmp_user.authentication.encryption`` | dictionary | Configures the encryption parameters for the SNMP user | os10 | +| ``snmp_user.authentication.encryption.algorithm`` | string: aes,des | Configures the encryption algorithm for the SNMP user | os10 | +| ``snmp_user.authentication.encryption.password`` | string | Configures encryption password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 | +| ``snmp_user.remote`` | dictionary | Configures the remote SNMP entity the user belongs to (see ``snmp_user.remote.*``) | os10 | +| ``snmp_user.remote.ip`` | string | Configures the IP address of the remote entity for the SNMP user | os10 | +| ``snmp_user.remote.udpport`` | string | Configures the UDP port of the remote entiry for the SNMP user | os10 | +| ``snmp_user.state`` | string: absent,present\* | Deletes the SNMP user if set to absent | os10 | +| ``snmp_view`` | list | Configures SNMPv3 view information (see ``snmp_view.*``) | os10 | +| ``snmp_view.name`` | string | Configures the SNMP view name (up to 20 characters) | os10 | +| ``snmp_view.oid_subtree`` | integer | Configures the SNMP view for the OID subtree | os10 | +| ``snmp_view.include`` | boolean: true,false | Specifies if the MIB family should be included or excluded from the view | os10 | +| ``snmp_view.state`` | string: absent,present\* | Deletes the SNMP view if set to absent | os10 | +| ``snmp_vrf`` | string | Configures the VRF for SNMP | os10 | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_snmp: + snmp_contact: test + snmp_location: Chennai + snmp_source_interface: loopback 10 + snmp_vrf: test + snmp_community: + - name: public + access_mode: ro + access_list: + name: test_acl + state: present + state: present + snmp_engine_id: 123456789 + snmp_remote_engine_id: + - host: 1.1.1.1 + engine_id: '0xab' + udpport: 162 + state: present + snmp_traps: + - name: all + state: present + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: absent + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: present + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + trap_categories: + dom: true + lldp: true + state: present + - ip: 3.1.1.1 + version: 3 + security_level: priv + security_name: test + notification_type: informs + udpport: 200 + trap_categories: + dom: true + entity: true + envmon: true + snmp: true + state: present + snmp_group: + - name: group_1 + version: "2c" + state: present + access_list: + name: test_acl + state: present + read_view: + name: view_1 + state: present + write_view: + name: view_2 + state: present + notify_view: + name: view_3 + state: present + - name: group_2 + version: 3 + security_level: priv + state: present + read_view: + name: view_1 + state: absent + notify_view: + name: view_3 + state: present + snmp_user: + - name: user_1 + group_name: group_1 + version: 3 + authentication: + localized: true + algorithm: md5 + password: 9fc53d9d908118b2804fe80e3ba8763d + encryption: + algorithm: aes + password: d0452401a8c3ce42804fe80e3ba8763d + state: present + - name: user_2 + group_name: group_1 + version: 3 + remote: + ip: 1.1.1.1 + udpport: 200 + authentication: + localized: true + algorithm: md5 + password: '0x9fc53d9d908118b2804fe80e3ba8763d' + encryption: + algorithm: aes + password: '0xd0452401a8c3ce42804fe80e3ba8763d' + state: present + - name: user_3 + group_name: group_1 + version: 2c + state: present + - name: user_4 + group_name: group_1 + version: 3 + state: present + - name: user_5 + group_name: group_2 + version: 2c + remote: + ip: 1.1.1.1 + udpport: 200 + access_list: test_acl + state: present + +**Simple playbook to setup SNMP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_snmp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml new file mode 100644 index 00000000..81a11877 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_snmp diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml new file mode 100644 index 00000000..1a8a3142 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_snmp diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml new file mode 100644 index 00000000..efbf4e40 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_snmp role facilitates the configuration of snmp attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml new file mode 100644 index 00000000..7ed03578 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating SNMP configuration for os10" + template: + src: os10_snmp.j2 + dest: "{{ build_dir }}/snmp10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning SNMP configuration for os10" + os10_config: + src: os10_snmp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2 b/ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2 new file mode 100644 index 00000000..f9581709 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2 @@ -0,0 +1,441 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure snmp commands for os10 Devices +os10_snmp: + snmp_contact: test + snmp_location: Chennai + snmp_vrf: test + snmp_source_interface: loopback 10 + snmp_community: + - name: public + access_mode: ro + access_list: + name: test_acl + state: present + state: present + snmp_traps: + - name: all + state: present + snmp_engine_id: 123456789 + snmp_remote_engine_id: + - host: 1.1.1.1 + engine_id: '0xab' + udpport: 162 + state: present + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: absent + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: present + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + state: present + - ip: 3.1.1.1 + version: 3 + security_level: priv + security_name: test + notification_type: informs + udpport: 200 + trap_categories: + dom: true + entity: true + envmon: true + snmp: true + state: present + snmp_group: + - name: group_1 + version: "2c" + state: present + access_list: + name: test_acl + state: present + read_view: + name: view_1 + state: present + write_view: + name: view_2 + state: present + notify_view: + name: view_3 + state: present + - name: group_2 + version: 3 + security_level: priv + state: present + read_view: + name: view_1 + state: absent + notify_view: + name: view_3 + state: present + snmp_user: + - name: user_1 + group_name: group_1 + version: 3 + authentication: + localized: true + algorithm: md5 + password: 9fc53d9d908118b2804fe80e3ba8763d + encryption: + algorithm: aes + password: d0452401a8c3ce42804fe80e3ba8763d + state: present + - name: user_2 + group_name: group_1 + version: 3 + remote: + ip: 1.1.1.1 + udpport: 200 + authentication: + localized: true + algorithm: md5 + password: '0x9fc53d9d908118b2804fe80e3ba8763d' + encryption: + algorithm: aes + password: '0xd0452401a8c3ce42804fe80e3ba8763d' + state: present + - name: user_3 + group_name: group_1 + version: 2c + state: present + - name: user_4 + group_name: group_1 + version: 3 + state: present + - name: user_5 + group_name: group_2 + version: 2c + remote: + ip: 1.1.1.1 + udpport: 200 + access_list: test_acl + state: present + +###############################################} +{% if os10_snmp is defined and os10_snmp %} + {% if os10_snmp.snmp_community is defined %} + {% set value = os10_snmp.snmp_community %} + {% if value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.access_mode is defined and item.access_mode %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server community {{ item.name }} {{ item.access_mode }} + {% else %} + {% if item.access_list is defined and item.access_list and item.access_list.name is defined and item.access_list.name %} + {% if item.access_list.state is defined and item.access_list.state == "absent" %} +no snmp-server community {{ item.name }} {{ item.access_mode }} acl {{ item.access_list.name }} + {% else %} +snmp-server community {{ item.name }} {{ item.access_mode }} acl {{ item.access_list.name }} + {% endif %} + {% else %} +snmp-server community {{ item.name }} {{ item.access_mode }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% if os10_snmp.snmp_contact is defined %} + {% set value = os10_snmp.snmp_contact %} + {% if value %} +snmp-server contact {{ value }} + {% else %} +no snmp-server contact + {% endif %} + {% endif %} + + {% if os10_snmp.snmp_engine_id is defined %} + {% set value = os10_snmp.snmp_engine_id %} + {% if value %} +snmp-server engineID local {{ value }} + {% else %} +no snmp-server engineID local + {% endif %} + {% endif %} + + {# Remove users before removing remote engine #} + {% if os10_snmp.snmp_user is defined and os10_snmp.snmp_user %} + {% set value = os10_snmp.snmp_user %} + {% for item in value %} + {% if item.name is defined and item.name and item.group_name is defined and item.group_name %} + {% if item.state is defined and item.state == "absent" %} + {% set user_remote_option = "" %} + {% if item.remote is defined and item.remote %} + {% if item.remote.ip is defined and item.remote.ip %} + {% if item.remote.udpport is defined and item.remote.udpport %} + {% set user_remote_option = " remote " + item.remote.ip + " udp-port " + item.remote.udpport|string %} + {% endif %} + {% endif %} + {% endif %} +no snmp-server user {{ item.name }} {{ item.group_name }}{{ user_remote_option }} {{ item.version }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if os10_snmp.snmp_remote_engine_id is defined %} + {% set value = os10_snmp.snmp_remote_engine_id %} + {% for item in value %} + {% if item.host is defined and item.host %} + {% if item.state is defined and item.state == "absent" %} + {% if item.udpport is defined and item.udpport %} +no snmp-server engineID remote {{ item.host }} udp-port {{ item.udpport }} + {% else %} +no snmp-server engineID remote {{ item.host }} + {% endif %} + {% else %} + {% if item.engine_id is defined and item.engine_id %} + {% if item.udpport is defined and item.udpport %} +snmp-server engineID remote {{ item.host }} udp-port {{ item.udpport }} {{ item.engine_id }} + {% else %} +snmp-server engineID remote {{ item.host }} udp-port 162 {{ item.engine_id }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if os10_snmp.snmp_traps is defined %} + {% set value = os10_snmp.snmp_traps %} + {% if value %} + {% for val in value %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} + {% if val.name == "all" %} + {% set trap_list = ['snmp authentication','snmp linkDown','snmp linkUp','envmon fan','envmon power-supply','envmon temperature'] %} + {% for name in trap_list %} +no snmp-server enable traps {{ name }} + {% endfor %} + {% else %} +no snmp-server enable traps {{ val.name }} + {% endif %} + {% else %} + {% if val.name == "all" %} + {% set trap_list = ['snmp authentication','snmp linkDown','snmp linkUp','envmon fan','envmon power-supply','envmon temperature'] %} + {% for name in trap_list %} +snmp-server enable traps {{ name }} + {% endfor %} + {% else %} +snmp-server enable traps {{ val.name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% if os10_snmp.snmp_group is defined and os10_snmp.snmp_group %} + {% set value = os10_snmp.snmp_group %} + {% for item in value %} + {% if item.name is defined and item.name and item.version is defined and item.version %} + {% set group_value = item.name + " " + item.version|string %} + {% if item.security_level is defined and item.security_level %} + {% if item.version|string != "1" and item.version|string != "2c" %} + {% set group_value = group_value + " " + item.security_level %} + {% endif %} + {% endif %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server group {{ group_value }} + {% else %} + {% set group_options = [] %} + {% if item.version|string == "1" or item.version|string == "2c" %} + {% if item.access_list is defined and item.access_list and item.access_list.name is defined and item.access_list.name %} + {% if item.access_list.state is defined and item.access_list.state == "absent" %} +no snmp-server group {{ group_value }} access {{ item.access_list.name }} + {% else %} + {{ group_options.append("access "+item.access_list.name) }} + {% endif %} + {% endif %} + {% endif %} + {% if item.notify_view is defined and item.notify_view and item.notify_view.name is defined and item.notify_view.name %} + {% if item.notify_view.state is defined and item.notify_view.state == "absent" %} +no snmp-server group {{ group_value }} notify {{ item.notify_view.name }} + {% else %} + {{ group_options.append("notify "+item.notify_view.name)}} + {% endif %} + {% endif %} + {% if item.read_view is defined and item.read_view and item.read_view.name is defined and item.read_view.name %} + {% if item.read_view.state is defined and item.read_view.state == "absent" %} +no snmp-server group {{ group_value }} read {{ item.read_view.name }} + {% else %} + {{ group_options.append("read "+item.read_view.name) }} + {% endif %} + {% endif %} + {% if item.write_view is defined and item.write_view and item.write_view.name is defined and item.write_view.name %} + {% if item.write_view.state is defined and item.write_view.state == "absent" %} +no snmp-server group {{ group_value }} write {{ item.write_view.name }} + {% else %} + {{ group_options.append("write "+item.write_view.name)}} + {% endif %} + {% endif %} +snmp-server group {{ group_value }} {{ group_options|join(" ") }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if os10_snmp.snmp_host is defined and os10_snmp.snmp_host %} + {% set value = os10_snmp.snmp_host %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% set host_port_option = "" %} + {% if item.udpport is defined and item.udpport %} + {% set host_port_option = " udp-port " + item.udpport|string %} + {% endif %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server host {{ item.ip }}{{ host_port_option }} + {% else %} + {% set host_notif_type = "traps" %} + {% if item.notification_type is defined and item.notification_type %} + {% set host_notif_type = item.notification_type %} + {% endif %} + {% if item.version is defined and item.version %} + {% set host_version = "" %} + {% if item.version|string == "1" or item.version|string == "2c" %} + {% if item.communitystring is defined and item.communitystring %} + {% set host_version = item.version|string + " " + item.communitystring %} + {% endif %} + {% elif item.security_level is defined and item.security_level %} + {% if item.security_name is defined and item.security_name %} + {% set host_version = item.version|string + " " + item.security_level + " " + item.security_name %} + {% endif %} + {% endif %} + {% set host_trap_categories = [] %} + {% if item.trap_categories is defined and item.trap_categories %} + {% for cat_key, cat_value in item.trap_categories.items() %} + {% if cat_value %} + {% if cat_key == "dom" %} + {{ host_trap_categories.append("dom")}} + {% elif cat_key == "entity" %} + {{ host_trap_categories.append("entity") }} + {% elif cat_key == "envmon" %} + {{ host_trap_categories.append("envmon") }} + {% elif cat_key == "lldp" %} + {{ host_trap_categories.append("lldp") }} + {% elif cat_key == "snmp" %} + {{ host_trap_categories.append("snmp") }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if host_version %} +snmp-server host {{ item.ip }} {{ host_notif_type }} version {{ host_version }}{{ host_port_option }} {{ host_trap_categories|join(" ") }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if os10_snmp.snmp_location is defined %} + {% set value = os10_snmp.snmp_location %} + {% if value %} +snmp-server location {{ value }} + {% else %} +no snmp-server location + {% endif %} + {% endif %} + + {% if os10_snmp.snmp_source_interface is defined %} + {% set value = os10_snmp.snmp_source_interface %} + {% if value %} +snmp-server source-interface {{ value.split() | join() }} + {% else %} +no snmp-server source-interface + {% endif %} + {% endif %} + + {% if os10_snmp.snmp_user is defined and os10_snmp.snmp_user %} + {% set value = os10_snmp.snmp_user %} + {% for item in value %} + {% if item.name is defined and item.name and item.group_name is defined and item.group_name %} + {% if item.version is defined and item.version %} + {% if item.state is defined and item.state == "absent" %} + {# snmp user removal is handled above remote engind id #} + {% else %} + {% set user_remote_option = "" %} + {% if item.remote is defined and item.remote %} + {% if item.remote.ip is defined and item.remote.ip %} + {% if item.remote.udpport is defined and item.remote.udpport %} + {% set user_remote_option = " remote " + item.remote.ip + " udp-port " + item.remote.udpport|string %} + {% endif %} + {% endif %} + {% endif %} + {% if item.version|string == "1" or item.version|string == "2c" %} + {% set user_acl_option = "" %} + {% if item.access_list is defined and item.access_list %} + {% set user_acl_option = "access " + item.access_list %} + {% endif %} +snmp-server user {{ item.name }} {{ item.group_name }}{{ user_remote_option }} {{ item.version }} {{ user_acl_option }} + {% else %} + {% set user_auth_option = "" %} + {% if item.authentication is defined and item.authentication %} + {% if item.authentication.localized is defined and item.authentication.localized %} + {% set user_auth_option = " localized" %} + {% endif %} + {% if item.authentication.algorithm is defined and item.authentication.algorithm %} + {% if item.authentication.password is defined and item.authentication.password %} + {% set user_auth_option = user_auth_option + " auth " + item.authentication.algorithm + " " + item.authentication.password %} + {% if item.authentication.encryption is defined and item.authentication.encryption %} + {% if item.authentication.encryption.algorithm is defined and item.authentication.encryption.algorithm %} + {% if item.authentication.encryption.password is defined and item.authentication.encryption.password %} + {% set user_auth_option = user_auth_option + " priv " + item.authentication.encryption.algorithm + " " + item.authentication.encryption.password %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} +snmp-server user {{ item.name }} {{ item.group_name }}{{ user_remote_option }} {{ item.version }}{{ user_auth_option }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if os10_snmp.snmp_view is defined %} + {% set value = os10_snmp.snmp_view %} + {% if value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.oid_subtree is defined and item.oid_subtree %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server view {{ item.name }} {{ item.oid_subtree }} + {% else %} + {% if item.include is defined %} + {% if item.include %} +snmp-server view {{ item.name }} {{ item.oid_subtree }} included + {% elif not item.include %} +snmp-server view {{ item.name }} {{ item.oid_subtree }} excluded + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% if os10_snmp.snmp_vrf is defined %} + {% set value = os10_snmp.snmp_vrf %} + {% if value %} +snmp-server vrf {{ value }} + {% else %} +no snmp-server vrf default + {% endif %} + {% endif %} + +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml new file mode 100644 index 00000000..bafcc210 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml @@ -0,0 +1,29 @@ +--- +# vars file for dellemc.os10.os10_snmp, +# below gives a sample configuration +# Sample variables for OS10 device +os10_snmp: + snmp_contact: test + snmp_location: Chennai + snmp_community: + - name: public + access_mode: ro + state: present + snmp_traps: + - name: all + state: present + snmp_view: + - name: view_1 + oid_subtree: 2 + include: false + state: absent + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: present + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml new file mode 100644 index 00000000..6b4b4e7e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_snmp diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml new file mode 100644 index 00000000..407dad8e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_snmp diff --git a/ansible_collections/dellemc/os10/roles/os10_system/LICENSE b/ansible_collections/dellemc/os10/roles/os10_system/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_system/README.md b/ansible_collections/dellemc/os10/roles/os10_system/README.md new file mode 100644 index 00000000..119138af --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/README.md @@ -0,0 +1,126 @@ +System role +=========== + +This role facilitates the configuration of global system attributes. It specifically enables configuration of hostname and hashing algorithm. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The System role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_system keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``hostname`` | string | Configures a hostname to the device (no negate command) | os10 | +| ``hardware_forwarding`` | string: scaled-l2,scaled-l3-routes,scaled-l3-hosts | Configures hardware forwarding mode | os10 | +| ``hash_algo`` | dictionary | Configures hash algorithm commands (see ``hash_algo.*``) | os10 | +| ``hash_algo.algo`` | list | Configures hashing algorithm (see ``algo.*``) | os10 | +| ``algo.name`` | string (required) | Configures the name of the hashing algorithm | os10 | +| ``algo.mode`` | string (required) | Configures the hashing algorithm mode | os10 | +| ``algo.state`` | string: absent,present\* | Deletes the hashing algorithm if set to absent | os10 | +| ``load_balance`` | dictionary | Configures the global traffic load balance (see ``load_balance.*``) | os10 | +| ``load_balance.ingress_port`` | boolean: true,false | Specifies whether to use the source port ID for the hashing algorithm | os10 | +| ``load_balance.ip_selection`` | list | Configures IPv4 key fields to use in hashing algorithm; | os10 | +| ``ip_selection.field`` | string | Configures IPv4 key fields to use in hashing algorithm | os10 | +| ``ip_selection.state`` | string: absent,present\* | Deletes the IPv4 key fields if set to absent | os10 | +| ``load_balance.ipv6_selection`` | list | Configures IPv6 key fields to use in hashing algorithm | os10 | +| ``ipv6_selection.field`` | string | Configures IPv6 key fields to use in hashing algorithm | os10 | +| ``ipv6_selection.state`` | string: absent,present\* | Deletes the IPv6 key fields if set to absent | os10 | +| ``load_balance.mac_selection`` | list | Configures MAC key fields to use in hashing algorithm (see ``mac_selection.*``) | os10 | +| ``mac_selection.field`` | string | Configures MAC key fields to use in hashing algorithm | os10 | +| ``mac_selection.state`` | string: absent,present\* | Deletes the MAC key fields if set to absent | os10 | +| ``load_balance.tcp_udp_selection`` | list | Configures TCP UDP ports for load balancing configurations (see ``tcp_udp_selection.*``) | os10 | +| ``tcp_udp_selection.field`` | string | Configures TCP UDP port fields to use in hashing algorithm | os10 | +| ``tcp_udp_selection.state`` | string: absent,present\* | Deletes the TCP UDP ports if set to absent | os10 | +| ``min_ra`` | string | Configures global RA minimum interval value, applicable to all interfaces across VRFs | os10 | +| ``max_ra`` | string | Configures global RA maximum interval value, applicable to all interfaces across VRFs | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os10_system* role. By including the role, you automatically get access to all of the tasks to configure system features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_system: + hostname: os10 + hardware_forwarding: scaled-l3-hosts + hash_algo: + algo: + - name: lag + mode: crc + state: present + - name: ecmp + mode: xor + state: present + load_balance: + ingress_port: true + ip_selection: + - field: source-ip + state: present + ipv6_selection: + - field: source-ip + state: present + mac_selection: + - field: source-mac + state: present + tcp_udp_selection: + - field: l4-source-port + state: present + max_ra: 15 + min_ra: 10 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_system + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml new file mode 100644 index 00000000..55924055 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_system diff --git a/ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml new file mode 100644 index 00000000..b7913129 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_system diff --git a/ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml new file mode 100644 index 00000000..588850b6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_system role facilitates the configuration of system attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml new file mode 100644 index 00000000..5b61c862 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating system configuration for os10" + template: + src: os10_system.j2 + dest: "{{ build_dir }}/system10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning system configuration for os10" + os10_config: + src: os10_system.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2 b/ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2 new file mode 100644 index 00000000..95edc2eb --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2 @@ -0,0 +1,130 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# + +Purpose: +Configure system commands for os10 Devices + +os10_system: + hostname: os10 + hardware_forwarding: scaled-l3-routes + hash_algo: + algo: + - name: lag + mode: crc + state: present + - name: ecmp + mode: xor + state: present + load_balance: + ingress_port: true + ip_selection: + - field: source-ip + state: present + ipv6_selection: + - field: source-ip + state: present + mac_selection: + - field: source-mac + state: present + tcp_udp_selection: + - field: l4-source-port + state: present + max_ra: 15 + min_ra: 10 + +###############################################} +{% if os10_system is defined and os10_system %} +{% if os10_system.hostname is defined and os10_system.hostname %} +hostname {{ os10_system.hostname }} +{% endif %} +{% if os10_system.max_ra is defined %} + {% if os10_system.max_ra %} +ipv6 nd max-ra-interval {{ os10_system.max_ra }} + {% else %} +no ipv6 nd max-ra-interval + {% endif %} +{% endif %} +{% if os10_system.min_ra is defined %} + {% if os10_system.min_ra %} +ipv6 nd min-ra-interval {{ os10_system.min_ra }} + {% else %} +no ipv6 nd min-ra-interval + {% endif %} +{% endif %} +{% for key,value in os10_system.items() %} + {% if key == "hardware_forwarding" %} + {% if value %} +hardware forwarding-table mode {{ value }} + {% else %} +no hardware forwarding-table mode + {% endif %} + {% elif key == "hash_algo" and value %} + {% if value.algo is defined and value.algo %} + {% for item in value.algo %} + {% if item.name is defined and item.name %} + {% if item.mode is defined and item.mode %} + {% if item.state is defined and item.state == "absent" %} +no hash-algorithm {{ item.name }} {{ item.mode }} + {% else %} +hash-algorithm {{ item.name }} {{ item.mode }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% elif key == "load_balance" and value %} + {% if value.ingress_port is defined %} + {% if value.ingress_port %} +load-balancing ingress-port enable + {% else %} +no load-balancing ingress-port enable + {% endif %} + {% endif %} + {% if value.ip_selection is defined and value.ip_selection %} + {% for listitem in value.ip_selection %} + {% if listitem.field is defined and listitem.field %} + {% if listitem.state is defined and listitem.state == "absent" %} +no load-balancing ip-selection {{ listitem.field }} + {% else %} +load-balancing ip-selection {{ listitem.field }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if value.ipv6_selection is defined and value.ipv6_selection %} + {% for listitem in value.ipv6_selection %} + {% if listitem.field is defined and listitem.field %} + {% if listitem.state is defined and listitem.state == "absent" %} +no load-balancing ipv6-selection {{ listitem.field }} + {% else %} +load-balancing ipv6-selection {{ listitem.field }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if value.mac_selection is defined and value.mac_selection %} + {% for listitem in value.mac_selection %} + {% if listitem.field is defined and listitem.field %} + {% if listitem.state is defined and listitem.state == "absent" %} +no load-balancing mac-selection {{ listitem.field }} + {% else %} +load-balancing mac-selection {{ listitem.field }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if value.tcp_udp_selection is defined and value.tcp_udp_selection %} + {% for listitem in value.tcp_udp_selection %} + {% if listitem.field is defined and listitem.field %} + {% if listitem.state is defined and listitem.state == "absent" %} +no load-balancing tcp-udp-selection {{ listitem.field }} + {% else %} +load-balancing tcp-udp-selection {{ listitem.field }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml new file mode 100644 index 00000000..ea4bc20f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml @@ -0,0 +1,31 @@ +--- +# vars file for dellemc.os10.os10_system, +# below gives a sample configuration +# Sample variables for OS10 device +os10_system: + hostname: os10 + hardware_forwarding: scaled-l3-hosts + hash_algo: + algo: + - name: lag + mode: crc + state: present + - name: ecmp + mode: xor + state: present + load_balance: + ingress_port: true + ip_selection: + - field: source-ip + state: present + ipv6_selection: + - field: source-ip + state: present + mac_selection: + - field: source-mac + state: present + tcp_udp_selection: + - field: l4-source-port + state: present + max_ra: 15 + min_ra: 10 diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml new file mode 100644 index 00000000..8674f097 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_system diff --git a/ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml new file mode 100644 index 00000000..4a69de59 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_system, diff --git a/ansible_collections/dellemc/os10/roles/os10_template/LICENSE b/ansible_collections/dellemc/os10/roles/os10_template/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_template/README.md b/ansible_collections/dellemc/os10/roles/os10_template/README.md new file mode 100644 index 00000000..d7faf013 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/README.md @@ -0,0 +1,75 @@ +Template role +============== + +This role provides access to structured data from show commands. This role facilitates the TEXTFSM parsing engine. TextFSM is a template based state machine . It takes the raw string input from the CLI of network devices, run them through a TEXTFSM template and return structured text in the form of a Python dictionary. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Template role is highly customizable, and it works with separate template definitions which contain variables and rules with regular expressions. This library is very helpful to parse any text-based CLI output from network devices. The Template role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- Variables and values are case-sensitive + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_template* role to parse any text-based CLI output. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. All the supported CLI commands are imported as tasks in tasks/main.yml. + +For the *os10_template* role plugins to be used, you may need to specify the actual path of role in *ansible.cfg* file. + +**Sample ansible.cfg** + + action_plugins = ../../plugins/modules/ + + +**Sample hosts file** + + leaf1 ansible_host= ansible_network_os=dellemc.os10.os10 ansible_ssh_user=xxxxx ansible_ssh_pass=xxxxx + + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_template + +**Example playbook to run specific show command — leaf.yaml** + + + --- + - name: PARSE SHOW IP INTERFACE BRIEF + hosts: leaf1 + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + + + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml new file mode 100644 index 00000000..4df1a6b4 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + role_name: os10_template + author: Dell EMC Networking Engineering + description: The os10_template role facilitates to provide the structured output from CLI in devices running on Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml new file mode 100644 index 00000000..9b41a6c2 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml @@ -0,0 +1,24 @@ +--- + - name: os10 dellemc.os10.os10_template test + import_tasks: show_ip_interface_brief.yaml + + - name: os10 dellemc.os10.os10_template test + import_tasks: show_port-channel_summary.yaml + + - name: os10 dellemc.os10.os10_template test + import_tasks: show_lldp_neighbors.yaml + + - name: os10 dellemc.os10.os10_template test + import_tasks: show_ip_vrf.yaml + + - name: os10 dellemc.os10.os10_template test + import_tasks: show_ip_bgp_summary.yaml + + - name: os10 dellemc.os10.os10_template test + import_tasks: show_vlan.yaml + + - name: os10 dellemc.os10.os10_template test + import_tasks: show_vlt_err_disabled_ports.yaml + + - name: os10 dellemc.os10.os10_template test + import_tasks: show_spanning_tree_compatibility_mode.yaml diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml new file mode 100644 index 00000000..d2ee2ac7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml @@ -0,0 +1,21 @@ +--- + - name: CAPTURE SHOW IP BGP SUMMARY + os10_command: + commands: + - show ip bgp summary + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_ip_bgp_summary + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_ip_bgp_summary.template') }}" + content: "{{ output.stdout[0] }}" + name: bgp_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml new file mode 100644 index 00000000..f2996775 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml @@ -0,0 +1,21 @@ +--- + - name: CAPTURE SHOW_IP_INTERFACE_BRIEF + os10_command: + commands: + - show ip interface brief + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_ip_interface_brief + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_ip_interface_brief.template') }}" + content: " {{ output.stdout[0] }}" + name: ip_interface_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml new file mode 100644 index 00000000..616416ed --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml @@ -0,0 +1,21 @@ +--- + - name: CAPTURE SHOW IP VRF + os10_command: + commands: + - show ip vrf + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_ip_vrf + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_ip_vrf.template') }}" + content: "{{ output.stdout[0] }}" + name: vrf_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml new file mode 100644 index 00000000..1fedfe4b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml @@ -0,0 +1,21 @@ +--- + - name: CAPTURE SHOW LLDP NEIGHBORS + os10_command: + commands: + - show lldp neighbors + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_lldp_neighbors + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_lldp_neighbors.template') }}" + content: "{{ output.stdout[0] }}" + name: lldp_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml new file mode 100644 index 00000000..2d26c14e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml @@ -0,0 +1,21 @@ +--- + - name: CAPTURE SHOW PORT-CHANNEL SUMMARY + os10_command: + commands: + - show port-channel summary + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_port-channel_summary + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_port-channel_summary.template') }}" + content: "{{ output.stdout[0] }}" + name: port_channel_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml new file mode 100644 index 00000000..a3c69524 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml @@ -0,0 +1,21 @@ +--- + - name: CAPTURE SHOW SPANNING TREE COMPATIBILITY MODE + os10_command: + commands: + - command: show spanning-tree compatibility-mode + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_spanning_tree_compatibility_mode + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_spanning_tree_compatibility_mode.template') }}" + content: "{{ output.stdout[0] }}" + name: spanning_tree_comp_mode_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml new file mode 100644 index 00000000..ee3c988e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml @@ -0,0 +1,21 @@ +--- + - name: CAPTURE SHOW VLAN + os10_command: + commands: + - show vlan + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_vlan + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_vlan.template') }}" + content: "{{ output.stdout[0] }}" + name: vlan_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml new file mode 100644 index 00000000..0e0f8b3d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml @@ -0,0 +1,20 @@ +--- + - name: CAPTURE SHOW VLT ERR DISABLED PORTS + dellos10_command: + commands: ['show vlt all error-disabled-ports'] + register: output + + - name: DISPLAY THE OUTPUT + debug: var=output.stdout + + - name: INSTALL TEXTFSM + import_tasks: textfsm.yaml + + - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_vlt_err_dis_ports + textfsm_parser: + src: "{{ lookup('file', './templates/os10_show_vlt_err_disabled_ports.template') }}" + content: "{{ output.stdout[0] }}" + name: vlt_err_dis_facts + register: result + vars: + - ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml new file mode 100644 index 00000000..99394b44 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml @@ -0,0 +1,3 @@ +#Install Textfsm + - pip: + name: textfsm diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template new file mode 100644 index 00000000..52ddc289 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template @@ -0,0 +1,16 @@ +Value Filldown RouterId (\d+\.\d+\.\d+\.\d+) +Value Filldown LocalAs (\d+) +Value Filldown BFD (enabled) +Value Neighbor (\S+) +Value AS (\d+) +Value MsgRcvd (\d+) +Value MsgSent (\d+) +Value Status (\S+) +Value State (\S+) + +Start + ^BGP router identifier ${RouterId} local AS number ${LocalAs} + ^Global BFD is ${BFD} + ^(?!Neighbor)${Neighbor}\s+${AS}\s+${MsgRcvd}\s+${MsgSent}\s+${Status}\s+${State} -> Record + +EOF diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template new file mode 100644 index 00000000..20d01717 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template @@ -0,0 +1,9 @@ +Value INTERFACE_NAME (\S+\s\S+) +Value IP_ADDRESS (\S+) +Value OK (YES|NO) +Value METHOD (DHCP|manual|unset) +Value STATUS (up|down|admin down) +Value PROTOCOL (up|down|admin down) + +Start + ^${INTERFACE_NAME}\s+${IP_ADDRESS}\s+${OK}\s+${METHOD}\s+${STATUS}\s+${PROTOCOL} -> Record diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template new file mode 100644 index 00000000..c7e4d754 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template @@ -0,0 +1,7 @@ +Value VRFName (\S+) +Value Interfaces (\S+) + +Start + ^(?!VRF-Name)${VRFName}\s+${Interfaces} -> Record + ^(?!VRF-Name)${VRFName} -> Record + ^\s+${Interfaces} -> Record diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template new file mode 100644 index 00000000..3c935313 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template @@ -0,0 +1,7 @@ +Value LocPortID (\S+) +Value RemHostName (\S+) +Value RemPortId (\S+) +Value RemChassisId ([a-fA-F0-9:]{17}) + +Start + ^${LocPortID}\s+${RemHostName}\s+${RemPortId}\s+${RemChassisId} -> Record diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template new file mode 100644 index 00000000..1e77b92e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template @@ -0,0 +1,9 @@ +Value GROUP (\d+) +Value PORT_CHANNEL (\S+\s+[(D)|(U)]+) +Value TYPE ([Eth|Gig|ten|For]+) +Value PROTOCOL ([DYNAMIC|STATIC]+) +Value List MEMBER_PORTS (\s.*) + +Start + ^${GROUP}\s+${PORT_CHANNEL}\s+${TYPE}\s+${PROTOCOL}\s+${MEMBER_PORTS} -> Record + ^${GROUP}\s+${PORT_CHANNEL}\s+${TYPE}\s+${PROTOCOL} -> Record diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template new file mode 100644 index 00000000..bf365e33 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template @@ -0,0 +1,6 @@ +Value Interface_name ([a-zA-Z\-]+\s*[\d\/\:]+) +Value Instance (VLAN\s+(\d+)) +Value Compatibility_mode (\S+\s*) + +Start + ^${Interface_name}\s+${Instance}\s+${Compatibility_mode} -> Record diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template new file mode 100644 index 00000000..f71e9573 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template @@ -0,0 +1,12 @@ +Value Codes (\*|\@|\M|\R|\s) +Value NUM (\d+) +Value Status (Active|Inactive) +Value Description (\S+|\s+) +Value Q (A|T) +Value Ports (\S+) + +Start + ^${Codes}\s+${NUM}\s+${Status}\s+${Description}\s+${Q}\s+${Ports} -> Record + ^${Codes}\s+${NUM}\s+${Status}\s+${Description} -> Record + ^${Codes}\s+${NUM}\s+${Status} -> Record + ^\s+${Q}\s+${Ports} -> Record diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template new file mode 100644 index 00000000..340b7a2b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template @@ -0,0 +1,5 @@ +Value VLT_PORT_CHANNEL_ID (\d+) +Value PORT_CHANNEL (\S+\s*) + +Start + ^${VLT_PORT_CHANNEL_ID}\s+${PORT_CHANNEL} -> Record diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all b/ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all new file mode 100644 index 00000000..902b3301 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all @@ -0,0 +1,3 @@ +ansible_ssh_user: xxxx +ansible_ssh_pass: xxxx +ansible_network_os: dellemc.os10.os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml new file mode 100644 index 00000000..a76e0817 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[LeafAndSpineSwitch:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml new file mode 100644 index 00000000..ca1c43cc --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml @@ -0,0 +1,5 @@ +--- +- hosts: LeafAndSpineSwitch + connection: network_cli + roles: + - dellemc.os10.os10_template diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml new file mode 100644 index 00000000..6d49466d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml @@ -0,0 +1,9 @@ +--- +- hosts: LeafAndSpineSwitch + connection: network_cli + collections: + - dellemc.os10 + tasks: + - import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE b/ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/README.md b/ansible_collections/dellemc/os10/roles/os10_uplink/README.md new file mode 100644 index 00000000..8ffeb0e7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/README.md @@ -0,0 +1,109 @@ +Uplink role +=========== + +This role facilitates the configuration of uplink failure detection feature attributes. It specifically enables configuration of association between upstream and downstream interfaces known as uplink-state group. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Uplink role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_uplink keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``uplink_state_group`` | list | Configures the uplink state group (see ``uplink_state_group.*``) | os10 | +| ``uplink_state_group.id`` | integer | Configures the uplink state group instance | os10 | +| ``uplink_state_group.enable`` | boolean: True,False | Enables the uplink state group instance | os10 | +| ``uplink_state_group.defer_time`` | integer | Configures defer timer for the uplink state group | os10 | +| ``uplink_state_group.uplink_type`` | list | Configures the upstream and downstream attribute (see ``uplink_type.*``) | os10 | +| ``uplink_type.type`` | string: upstream,downstream | Configures the uplink type | os10 | +| ``uplink_type.intf`` | string | Configures the uplink interface | os10 | +| ``uplink_type.state`` | string: absent,present\* | Removes the uplink stream if set to absent | os10 | +| ``uplink_state_group.downstream`` | dictionary | Configures downstream information for the uplink state group (see ``downstream.*``) | os10 | +| ``downstream.disable_links`` | integer | Configures number of downstream links to be disabled. String 'all' can be used to disable all downstream links | os10 | +| ``downstream.auto_recover`` | boolean: True,False | Enables or disables auto recover for downstream interfaces | os10 | +| ``uplink_state_group.state`` | string: absent,present\* | Removes the uplink state group instance if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +******************** + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_uplink role* to completely set the uplink sate group instance, and upstream, downstream interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The uplink role writes a simple playbook that only references the *os10_uplink* role. By including the role, you automatically get access to all of the tasks to configure uplink features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "present" + - type: "downstream" + intf: "ethernet1/1/2-1/1/5" + state: "present" + state: "present" + downstream: + disable_links: all + auto_recover: false + defer_time: 50 + - id: 2 + enable: True + state: "present" + +> **NOTE**: Interfaces should be created using the *os10_interface* role. + +**Simple playbook to setup uplink — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_uplink + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml new file mode 100644 index 00000000..441d767e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_uplink diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml new file mode 100644 index 00000000..7abb0012 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_uplink diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml new file mode 100644 index 00000000..0bc56196 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + role_name: os10_uplink + author: Dell EMC Networking Engineering + description: The os10_uplink role facilitates the configuration of uplink attributes in devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml new file mode 100644 index 00000000..6500ea3c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating uplink configuration for os10" + template: + src: os10_uplink.j2 + dest: "{{ build_dir }}/uplink10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning uplink configuration for os10" + os10_config: + src: os10_uplink.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2 b/ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2 new file mode 100644 index 00000000..64a237d8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2 @@ -0,0 +1,102 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +#Purpose: +Configure uplink commands for os10 Devices + +os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "present" + - type: "downstream" + intf: "ethernet1/1/2-1/1/5" + state: "present" + downstream: + disable_links: all + auto_recover: false + defer_time: 50 + state: "present" + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "present" + - type: "downstream" + intf: "ethernet1/1/2-1/1/5" + state: "present" + downstream: + disable_links: 10 + auto_recover: false + state: "present" + +###############################################} +{% if os10_uplink is defined and os10_uplink %} + {% if os10_uplink.uplink_state_group is defined and os10_uplink.uplink_state_group %} + {% for uplink_val in os10_uplink.uplink_state_group %} + {% if uplink_val.id is defined %} + {% if uplink_val.state is defined and uplink_val.state == "absent" %} +no uplink-state-group {{ uplink_val.id }} + {% else %} +uplink-state-group {{ uplink_val.id }} + {% if uplink_val.enable is defined %} + {% if uplink_val.enable == True %} + enable + {% else %} + no enable + {% endif %} + {% endif %} + {% if uplink_val.downstream is defined and uplink_val.downstream %} + {% if uplink_val.downstream.auto_recover is defined %} + {% if uplink_val.downstream.auto_recover %} + downstream auto-recover + {% else %} + no downstream auto-recover + {% endif %} + {% endif %} + {% if uplink_val.downstream.disable_links is defined %} + {% if uplink_val.downstream.disable_links %} + downstream disable-links {{ uplink_val.downstream.disable_links }} + {% else %} + no downstream disable-links + {% endif %} + {% endif %} + {% endif %} + {% if uplink_val.uplink_type is defined and uplink_val.uplink_type %} + {% for uplink in uplink_val.uplink_type %} + {% if uplink.type is defined and uplink.type %} + {% if uplink.state is defined and uplink.state == "absent" %} + {% if uplink.intf is defined and uplink.intf %} + {% if uplink.type == "downstream" %} + no downstream {{ uplink.intf }} + {% elif uplink.type == "upstream" %} + no upstream {{ uplink.intf }} + {% endif %} + {% endif %} + {% else %} + {% if uplink.intf is defined and uplink.intf %} + {% if uplink.type == "downstream" %} + downstream {{ uplink.intf }} + {% elif uplink.type == "upstream" %} + upstream {{ uplink.intf }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if uplink_val.defer_time is defined %} + {% if uplink_val.defer_time %} + defer-time {{ uplink_val.defer_time }} + {% else %} + no defer-time + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml new file mode 100644 index 00000000..90afe008 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml @@ -0,0 +1,16 @@ +--- +# vars file for dellemc.os10.os10_uplink, +# below gives a sample configuration +# Sample variables for OS10 device +os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "present" + - type: "downstream" + intf: "ethernet1/1/2-1/1/5" + state: "present" + state: "present" diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml new file mode 100644 index 00000000..92c99613 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_uplink diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml new file mode 100644 index 00000000..a376eeb1 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_uplink diff --git a/ansible_collections/dellemc/os10/roles/os10_users/LICENSE b/ansible_collections/dellemc/os10/roles/os10_users/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_users/README.md b/ansible_collections/dellemc/os10/roles/os10_users/README.md new file mode 100644 index 00000000..09d55f1d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/README.md @@ -0,0 +1,89 @@ +Users role +========== + +This role facilitates the configuration of global system user attributes, and it supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The Users role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_users list keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os10 | +| ``password`` | string | Configures the password set for the username; password length must be at least eight characters | os10 | +| ``role`` | string | Configures the role assigned to the user | os10 | +| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_users* role to configure global system user attributes. It creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os10_users* role. By including the role, you automatically get access to all of the tasks to configure user features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_users: + - username: test + password: a1a2a3a4!@#$ + role: sysadmin + state: present + - username: u1 + password: a1a2a3a4!@#$ + role: netadmin + state: present + +**Simple playbook to setup users — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_users + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml new file mode 100644 index 00000000..668eefac --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_users diff --git a/ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml new file mode 100644 index 00000000..e73b341b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_users diff --git a/ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml new file mode 100644 index 00000000..c73b755e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_users role facilitates the configuration of user attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml new file mode 100644 index 00000000..eb870a13 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating users configuration for os10" + template: + src: os10_users.j2 + dest: "{{ build_dir }}/users10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning users configuration for os10" + os10_config: + src: os10_users.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j2 b/ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j2 new file mode 100644 index 00000000..080f6a59 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j2 @@ -0,0 +1,27 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure users commands for os10 Devices +os10_users: + - username: test + password: test + role: sysadmin + state: present +###############################################} +{% if os10_users is defined and os10_users %} + {% for item in os10_users %} + {% if item.username is defined and item.username %} + {% if item.state is defined and item.state == "absent" %} +no username {{ item.username }} + {% else %} + {% if item.password is defined and item.password %} + {% if item.role is defined and item.role %} +username {{ item.username }} password {{ item.password }} role {{ item.role }} + {% else %} +username {{ item.username }} password {{ item.password }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml new file mode 100644 index 00000000..0bbc0633 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml @@ -0,0 +1,13 @@ +--- +# vars file for dellemc.os10.os10_users, +# below gives a sample configuration +# Sample variables for OS10 device +os10_users: + - username: test + password: a1a2a3a4!@#$ + role: sysadmin + state: present + - username: u1 + password: a1a2a3a4!@#$ + role: netadmin + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml new file mode 100644 index 00000000..26812451 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_users diff --git a/ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml new file mode 100644 index 00000000..49177b5b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_users diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/README.md b/ansible_collections/dellemc/os10/roles/os10_vlan/README.md new file mode 100644 index 00000000..71a7adf9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/README.md @@ -0,0 +1,123 @@ +VLAN role +========= + +This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration +- `os10_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key. +- VLAN ID key should be in format "vlan ID" (1 to 4094) +- Variables and values are case-sensitive + +**os10_vlan** + +| Key | Type | Notes | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``default_vlan_id`` | integer | Configures the vlan-id as the default VLAN for an existing VLAN | os10 | + +**VLAN ID keys** + +| Key | Type | Notes | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``description`` | string | Configures a single line description for the VLAN | os10 | +| ``tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os10 | +| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os10 | +| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os10 | +| ``untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os10 | +| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os10 | +| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os10 | +| ``state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os10 | +| ``virtual_gateway_ip`` | string | Configures an anycast gateway IPv4 address for VLAN interfaces| os10 | +| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 | +| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +## Example playbook + +This example uses the *os10_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlan* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_vlan: + default_vlan_id: 2 + vlan 100: + description: "Blue" + tagged_members: + - port: ethernet 1/1/32 + state: present + - port: ethernet 1/1/31 + state: present + untagged_members: + - port: ethernet 1/1/30 + state: present + - port: ethernet 1/1/29 + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: "present" + vlan 10: + description: "vlan with anycast GW" + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + state: "present" + +> **NOTE**: Interfaces should be created using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vlan + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml new file mode 100644 index 00000000..7510d594 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_vlan diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml new file mode 100644 index 00000000..acd66992 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_vlan diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml new file mode 100644 index 00000000..03e7a180 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_vlan role facilitates the configuration of VLAN attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml new file mode 100644 index 00000000..f4d69bc9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating VLAN configuration for os10" + template: + src: os10_vlan.j2 + dest: "{{ build_dir }}/vlan10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning VLAN configuration for os10" + os10_config: + src: os10_vlan.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2 b/ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2 new file mode 100644 index 00000000..f07f10b3 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2 @@ -0,0 +1,129 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{########################################## +Purpose: +Configure VLAN Interface commands for os10 Devices +os10_vlan: + default_vlan_id : 4 + vlan 100: + description: "red" + tagged_members: + - port: ethernet 1/1/32 + state: present + - port: ethernet 1/1/31 + state: absent + untagged_members: + - port: ethernet 1/1/30 + state: present + - port: ethernet 1/1/29 + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: "present" + vlan 10: + description: "vlan with anycast GW" + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + state: "present" + +#########################################} +{% if os10_vlan is defined and os10_vlan %} +{% for key,value in os10_vlan.items() %} + {% if key == "default_vlan_id" %} + {% if value %} +default vlan-id {{ value }} + {% else %} +no default vlan-id + {% endif %} + {% else %} + + {% set vlan_id = key.split(" ") %} + {% set vlan_vars = os10_vlan[key] %} + + {% if vlan_vars.state is defined and vlan_vars.state == "absent" %} +no interface vlan{{ vlan_id[1] }} + {% else %} +interface vlan{{ vlan_id[1] }} + {% if vlan_vars.description is defined %} + {% if vlan_vars.description %} + {% if vlan_vars.description|wordcount > 1 %} + description "{{ vlan_vars.description }}" + {% else %} + description {{ vlan_vars.description }} + {% endif %} + {% else %} + no description + {% endif %} + {% endif %} + + {% if vlan_vars.ip_and_mask is defined %} + {% if vlan_vars.ip_and_mask %} + ip address {{ vlan_vars.ip_and_mask }} + {% else %} + no ip address + {% endif %} + {% endif %} + + {% if vlan_vars.virtual_gateway_ip is defined %} + {% if vlan_vars.virtual_gateway_ip %} + ip virtual-router address {{ vlan_vars.virtual_gateway_ip }} + {% else %} + no ip virtual-router address + {% endif %} + {% endif %} + + {% if vlan_vars.virtual_gateway_ipv6 is defined %} + {% if vlan_vars.virtual_gateway_ipv6 %} + ipv6 virtual-router address {{ vlan_vars.virtual_gateway_ipv6 }} + {% else %} + no ipv6 virtual-router address + {% endif %} + {% endif %} + + {# Keep member configs in the end as it switches to member interface context #} + {% if vlan_vars.untagged_members is defined %} + {% for ports in vlan_vars.untagged_members %} + {% if ports.port is defined and ports.port %} + {% if 'range' in ports.port %} +interface {{ ports.port }} + {% else %} +interface {{ ports.port.split() | join() }} + {% endif %} + {% if ports.state is defined and ports.state == "absent" %} + no switchport access vlan + {% else %} + switchport access vlan {{ vlan_id[1] }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if vlan_vars.tagged_members is defined %} + {% for ports in vlan_vars.tagged_members %} + {% if ports.port is defined and ports.port %} + {% if 'range' in ports.port %} +interface {{ ports.port }} + {% else %} +interface {{ ports.port.split() | join() }} + {% endif %} + {% if ports.state is defined and ports.state == "absent" %} + no switchport trunk allowed vlan {{ vlan_id[1] }} + {% else %} + switchport mode trunk + switchport trunk allowed vlan {{ vlan_id[1] }} + {% endif %} + {% if ports.access_vlan is defined and ports.access_vlan == "false" %} + no switchport access vlan + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml new file mode 100644 index 00000000..78e24738 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml @@ -0,0 +1,31 @@ +--- +# vars file for dellemc.os10.os10_vlan, +# below gives a example configuration +# Sample variables for OS10 device +os10_vlan: + default_vlan_id: 2 + vlan 100: + description: "Blue" + tagged_members: + - port: ethernet 1/1/32 + state: present + - port: ethernet 1/1/31 + state: present + untagged_members: + - port: ethernet 1/1/30 + state: present + - port: ethernet 1/1/29 + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: "present" + vlan 10: + description: "vlan with anycast GW" + ip_and_mask: "10.1.1.1/24" + virtual_gateway_ip: "10.1.1.254" + virtual_gateway_ipv6: "10:1:1::254" + state: "present" diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml new file mode 100644 index 00000000..13442eff --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_vlan diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml new file mode 100644 index 00000000..c856f302 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_vlan diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/README.md b/ansible_collections/dellemc/os10/roles/os10_vlt/README.md new file mode 100644 index 00000000..85ed917a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/README.md @@ -0,0 +1,108 @@ +VLT role +======== + +This role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VLT role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables . + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_vlt keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``domain`` | integer (required) | Configures the VLT domain identification number (1 to 1000) | os10 | +| ``backup_destination`` | string | Configures an IPv4 address for the VLT backup link (A.B.C.D format or X:X:X:X::X format) | os10 | +| ``destination_type`` | string | Configures the backup destination based on this destination type (IPv4 or IPv6)| os10 | +| ``backup_destination_vrf`` | string | Configures the virtual routing and forwarding (VRF) instance through which the backup destination IP is reachable (*vrfname* must be present) | os10 | +| ``discovery_intf`` | string | Configures the discovery interface for the VLT domain (range of interfaces)| os10 | +| ``discovery_intf_state`` | string: absent,present | Deletes the discovery interfaces for the VLT domain if set to absent | os10 | +| ``peer_routing`` | boolean | Configures VLT peer routing | os10 | +| ``priority`` | integer (default:32768) | Configures VLT priority | os10 | +| ``vlt_mac`` | string | Configures the VLT MAC address | os10 | +| ``vlt_peers`` | dictionary | Contains objects to configure the VLT peer port-channel (see ``vlt_peers.*``) | os10 | +| ``vlt_peers.`` | dictionary | Configures the VLT peer port-channel (`Po value`) | os10 | +| ``vlt_peers..peer_lag`` | integer | Configures the port-channel ID of the VLT peer lag | os10 | +| ``state`` | string: absent,present | Deletes the VLT instance if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network OS roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Dependencies +------------ + +The *os10_vlt* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0. + +Example playbook +---------------- + +This example uses the *os10_vlt* role to setup a VLT-domain. It creates a *hosts* file with the switch details and corresponding variables.The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlt* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + + os10_vlt: + domain: 1 + backup_destination: "192.168.211.175" + destination_type: "ipv4" + backup_destination_vrf: + discovery_intf: 1/1/12 + discovery_intf_state: present + peer_routing: True + vlt_mac: aa:aa:aa:aa:aa:aa + vlt_peers: + Po 12: + peer_lag: 13 + state: present + +> **NOTE**: Discovery interface must not be in switchport mode and can be configured using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vlt + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml new file mode 100644 index 00000000..daa35993 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_vlt diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml new file mode 100644 index 00000000..910f1fa8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_vlt diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml new file mode 100644 index 00000000..c3164f7d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_vlt role facilitates the configuration of VLT attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml new file mode 100644 index 00000000..63fa380a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating VLT configuration for os10" + template: + src: os10_vlt.j2 + dest: "{{ build_dir }}/vlt10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning VLT configuration for os10" + os10_config: + src: os10_vlt.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2 b/ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2 new file mode 100644 index 00000000..4915ff71 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2 @@ -0,0 +1,108 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ +Purpose: +Configure VLT commands fon os10 Devices. +os10_vlt: + domain: 1 + backup_destination: "192.168.1.1" + destination_type: "ipv4" + backup_destination_vrf: "management" + discovery_intf: 1/1/12 + discovery_intf_state: present + peer_routing: True + priority: 1 + vlt_mac: aa:aa:aa:aa:aa:aa + vlt_peers: + Po 12: + peer_lag: 13 + state: present +################################} +{% if os10_vlt is defined and os10_vlt %} + {% if os10_vlt.domain is defined and os10_vlt.domain %} + {% if os10_vlt.state is defined and os10_vlt.state == 'absent' %} +no vlt-domain {{ os10_vlt.domain }} + {% else %} +vlt-domain {{ os10_vlt.domain }} + {% if os10_vlt.backup_destination is defined %} + {% if os10_vlt.backup_destination %} + {% if os10_vlt.destination_type is defined %} + {% if os10_vlt.destination_type == "ipv6" %} + backup destination ipv6 {{ os10_vlt.backup_destination }} + {% elif os10_vlt.destination_type == "ipv4" %} + {% if os10_vlt.backup_destination_vrf is defined and os10_vlt.backup_destination_vrf %} + backup destination {{ os10_vlt.backup_destination }} vrf {{ os10_vlt.backup_destination_vrf }} + {% else %} + backup destination {{ os10_vlt.backup_destination }} + {% endif %} + {% endif %} + {% endif %} + {% else %} + no backup destination + {% endif %} + {% endif %} + {% if os10_vlt.discovery_intf_state is defined and os10_vlt.discovery_intf_state == "absent" %} + {% if os10_vlt.discovery_intf is defined and os10_vlt.discovery_intf %} + no discovery-interface ethernet{{ os10_vlt.discovery_intf }} + {% endif %} + {% else %} + {% if os10_vlt.discovery_intf is defined and os10_vlt.discovery_intf %} + discovery-interface ethernet{{ os10_vlt.discovery_intf }} + {% endif %} + {% endif %} + {% if os10_vlt.backup_destination is defined %} + {% if os10_vlt.backup_destination %} + {% if os10_vlt.destination_type is defined %} + {% if os10_vlt.destination_type == 'ipv6' %} + backup destination ipv6 {{ os10_vlt.backup_destination }} + {% elif os10_vlt.destination_type == 'ipv4' %} + {% if os10_vlt.backup_destination_vrf is defined and os10_vlt.backup_destination_vrf %} + backup destination {{ os10_vlt.backup_destination }} vrf {{ os10_vlt.backup_destination_vrf }} + {% else %} + backup destination {{ os10_vlt.backup_destination }} + {% endif %} + {% endif %} + {% endif %} + {% else %} + no backup destination + {% endif %} + {% endif %} + {% if os10_vlt.priority is defined %} + {% if os10_vlt.priority %} + primary-priority {{ os10_vlt.priority }} + {% else %} + no primary-priority + {% endif %} + {% endif %} + {% if os10_vlt.peer_routing is defined %} + {% if os10_vlt.peer_routing %} + peer-routing + {% else %} + no peer-routing + {% endif %} + {% endif %} + {% if os10_vlt.vlt_mac is defined %} + {% if os10_vlt.vlt_mac %} + vlt-mac {{ os10_vlt.vlt_mac }} + {% else %} + no vlt-mac + {% endif %} + {% endif %} + + {% endif %} + {% endif %} + {% if os10_vlt.vlt_peers is defined and os10_vlt.vlt_peers %} + {% for key in os10_vlt.vlt_peers.keys() %} + {% set channel_id = key.split(" ") %} + {% set peer_vars = os10_vlt.vlt_peers[key] %} +interface port-channel{{ channel_id[1] }} + {% if peer_vars.peer_lag is defined %} + {% if peer_vars.peer_lag %} + vlt-port-channel {{ peer_vars.peer_lag}} + {% else %} + no vlt-port-channel + {% endif %} + {% endif %} + + {% endfor %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml new file mode 100644 index 00000000..1c15f159 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml @@ -0,0 +1,17 @@ +--- +# vars file for dellemc.os10.os10_vlt, +# below gives a example configuration +# Sample variables for OS10 device +os10_vlt: + domain: 1 + backup_destination: "192.168.211.175" + destination_type: "ipv4" + backup_destination_vrf: + discovery_intf: 1/1/12 + discovery_intf_state: present + peer_routing: True + vlt_mac: aa:aa:aa:aa:aa:aa + vlt_peers: + Po 12: + peer_lag: 13 + state: present diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml new file mode 100644 index 00000000..8f950f01 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_vlt diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml new file mode 100644 index 00000000..aee0f95b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_vlt diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/README.md b/ansible_collections/dellemc/os10/roles/os10_vrf/README.md new file mode 100644 index 00000000..464efc5b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/README.md @@ -0,0 +1,143 @@ +VRF role +======== + +This role facilitates to configure the basics of virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VRF role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the variable `ansible_network_os` that can take the `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_vrf keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``vrfdetails`` | list | Configures the list of VRF instances (see ``instances.*``) | os10 | +| ``vrfdetails.vrf_name`` | string | Specifies the VRF instance name (default is management) | os10 | +| ``vrfdetails.state`` | string | Deletes the VRF instance name if set to absent | os10 | +| ``vrfdetails.ip_route_import`` | string | Configures VRF IP subcommands | os10 | +| ``ip_route_import.community_value`` | string | Configures the route community value | os10 | +| ``ip_route_import.route_map_value`` | string | Configures the route-map value | os10 | +| ``ip_route_import.state`` | string | Deletes the IP configuration if set to absent | os10 | +| ``vrfdetails.ip_route_export`` | string | Configures VRF IP subcommands | os10 | +| ``ip_route_export.community_value`` | string | Configures the route community value | os10 | +| ``ip_route_export.route_map_value`` | string | Configures the route-map value | os10 | +| ``ip_route_export.state`` | string | Deletes the IP config if set to absent | os10 | +| ``vrfdetails.ipv6_route_import`` | string | Configures VRF IPv6 subcommands | os10 | +| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 | +| ``ipv6_route_import.route_map_value`` | string | Configures the route-map value | os10 | +| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 | +| ``vrfdetails.ipv6_route_export`` | string | Configures VRF IPv6 subcommands | os10 | +| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 | +| ``ipv6_route_export.route_map_value`` | string | Configures the route-map value | os10 | +| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 | +| ``vrfdetails.map_ip_interface`` | list | Specifies a list of valid interface names | os10 | +| ``map_ip_interface.intf_id`` | string | Specifies a valid interface name | os10 | +| ``map_ip_interface.state`` | string | Deletes VRF association in the interface if set to absent | os10 | +| ``upd_src_ip_loopback_id`` | string | Configures the source IP for any leaked route in VRF from the provided loopback ID, delete if empty string| os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Dependencies +------------ + +The *os10_vrf* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0 + +Example playbook +---------------- + +This example uses the *os10_vrf* role to setup a VRF and associate it to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that references the *os10_vrf* role. +*upd_src_ip_loopback_id* has an dependency with association of the interface in a VRF, and the *os10_vrf* role needs to be invoked twice with different input dictionary one for the create and one for *upd_src_ip_loopback_id*. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + os10_vrf: + vrfdetails: + - vrf_name: "os10vrf" + state: "present" + ip_route_import: + community_value: "10:20" + state: "present" + route_map_value: "test4" + ip_route_export: + community_value: "30:40" + state: "present" + route_map_value: "test3" + ipv6_route_import: + community_value: "40:50" + state: "absent" + route_map_value: "test2" + ipv6_route_export: + community_value: "60:70" + state: "absent" + route_map_value: "test2" + map_ip_interface: + - intf_id : "loopback11" + state : "present" + + os_vrf_upd_src_loopback: + vrfdetails: + - vrf_name: "os10vrf" + state: "present" + upd_src_ip_loopback_id: 11 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vrf + +**Simple playbook with `upd_src_ip_loopback_id` — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vrf + - hosts: leaf1 + vars: + os10_vrf: "{{ os_vrf_upd_src_loopback }}" + roles: + - dellemc.os10.os10_vrf + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml new file mode 100644 index 00000000..e00abdd9 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_vrf diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml new file mode 100644 index 00000000..7bd70e18 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_vrf diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml new file mode 100644 index 00000000..db8f619d --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_vrf role facilitates the configuration of VRF attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml new file mode 100644 index 00000000..ef651579 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating vrf configuration for os10" + template: + src: os10_vrf.j2 + dest: "{{ build_dir }}/vrf10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning VRF configuration for os10" + os10_config: + src: os10_vrf.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2 b/ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2 new file mode 100644 index 00000000..e77f6c14 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2 @@ -0,0 +1,122 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ +Purpose: +Configure VRF on os10 Devices. +os10_vrf: + vrfdetails: + -vrf_name: "os10vrf" + state: "present" + ip_route_import: + community_value: 10:15 + state: "absent" + route_map_value: test1 + ip_route_export: + commnity_value: 20:30 + state: "present" + route_map_value: test2 + ipv6_route_import: + community_value: 10:15 + state: "present" + route_map_value: test3 + ipv6_route_export: + commnity_value: 20:30 + state: "present" + route_map_value: test4 + map_ip_interface: + -intf_id : loopback6 + state : "present" + upd_src_ip_loopback_id: 5 + -vrf_name: "os10vrf1" + state: "absent" +################################} +{% if (os10_vrf is defined and os10_vrf) %} +{% if os10_vrf.vrfdetails is defined %} + {% for vrf in os10_vrf.vrfdetails %} + {% if vrf.vrf_name is defined %} + {% if vrf.vrf_name %} + {% if vrf.state is defined and vrf.state == 'absent' %} +no ip vrf {{ vrf.vrf_name }} + {% else %} +ip vrf {{ vrf.vrf_name }} + {% if vrf.ip_route_import is defined and vrf.ip_route_import %} + {% set route_vars = vrf.ip_route_import %} + {% if route_vars.community_value is defined and route_vars.community_value %} + {% if route_vars.state == 'present' %} + {% if route_vars.route_map_value is defined and route_vars.route_map_value %} + ip route-import {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }} + {% else %} + ip route-import {{ route_vars.community_value }} + {% endif %} + {% else %} + no ip route-import {{ route_vars.community_value }} + {% endif %} + {% endif %} + {% endif %} + {% if vrf.ipv6_route_import is defined and vrf.ipv6_route_import %} + {% set route_vars = vrf.ipv6_route_import %} + {% if route_vars.community_value is defined and route_vars.community_value %} + {% if route_vars.state == 'present' %} + {% if route_vars.route_map_value is defined and route_vars.route_map_value %} + ipv6 route-import {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }} + {% else %} + ipv6 route-import {{ route_vars.community_value }} + {% endif %} + {% else %} + no ipv6 route-import {{ route_vars.community_value }} + {% endif %} + {% endif %} + {% endif %} + {% if vrf.ip_route_export is defined and vrf.ip_route_export %} + {% set route_vars = vrf.ip_route_export %} + {% if route_vars.community_value is defined and route_vars.community_value %} + {% if route_vars.state == 'present' %} + {% if route_vars.route_map_value is defined and route_vars.route_map_value %} + ip route-export {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }} + {% else %} + ip route-export {{ route_vars.community_value }} + {% endif %} + {% else %} + no ip route-export {{ route_vars.community_value }} + {% endif %} + {% endif %} + {% endif %} + {% if vrf.ipv6_route_export is defined and vrf.ipv6_route_export %} + {% set route_vars = vrf.ipv6_route_export %} + {% if route_vars.community_value is defined and route_vars.community_value %} + {% if route_vars.state == 'present' %} + {% if route_vars.route_map_value is defined and route_vars.route_map_value %} + ipv6 route-export {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }} + {% else %} + ipv6 route-export {{ route_vars.community_value }} + {% endif %} + {% else %} + no ipv6 route-export {{ route_vars.community_value }} + {% endif %} + {% endif %} + {% endif %} + {% if vrf.map_ip_interface is defined and vrf.map_ip_interface %} + exit + {% for map_ip_interface in vrf.map_ip_interface %} + {% if map_ip_interface.intf_id is defined and map_ip_interface.intf_id %} +interface {{ map_ip_interface.intf_id }} + {% if map_ip_interface.state is defined and map_ip_interface.state == "absent" %} + no ip vrf forwarding + {% else %} + ip vrf forwarding {{ vrf.vrf_name }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if vrf.upd_src_ip_loopback_id is defined %} + {% if vrf.upd_src_ip_loopback_id %} + update-source-ip loopback{{ vrf.upd_src_ip_loopback_id}} + {% else %} + no update-source-ip loopback + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml new file mode 100644 index 00000000..b8a265d8 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml @@ -0,0 +1,33 @@ +--- +# vars file for dellemc.os10.os10_vrf, +# below gives a sample configuration +# Sample variables for OS10 device +os10_vrf: + vrfdetails: + - vrf_name: "os10vrf" + state: "present" + ip_route_import: + community_value: "10:20" + state: "present" + route_map_value: "test1" + ip_route_export: + community_value: "30:40" + state: "present" + route_map_value: "test2" + ipv6_route_import: + community_value: "40:50" + state: "absent" + route_map_value: "test3" + ipv6_route_export: + community_value: "60:70" + state: "absent" + route_map_value: "test4" + map_ip_interface: + - intf_id: "loopback11" + state: "present" + +os_vrf_upd_src_loopback: + vrfdetails: + - vrf_name: "os10vrf" + state: "present" + upd_src_ip_loopback_id: 11 diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml new file mode 100644 index 00000000..6093a28f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_vrf diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml new file mode 100644 index 00000000..5ed35d63 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars for dellemc.os10.os10_vrf diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/README.md b/ansible_collections/dellemc/os10/roles/os10_vrrp/README.md new file mode 100644 index 00000000..299166bf --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/README.md @@ -0,0 +1,139 @@ +VRRP role +========= + +This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VRRP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os10_vrrp` (dictionary) holds a dictionary with the interface name key +- Interface name can correspond to any of the valid OS10 interface with a unique interface identifier name +- Physical interfaces names must be in * * format (for example *fortyGigE 1/1*) +- Variables and values are case-sensitive + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``vrrp`` | dictionary | Configures VRRP commands (see ``vrrp.*``) | os10 | +| ``version`` | dictionary | Configures VRRP version | os10 | +| ``vrrp_active_active_mode`` | dictionary | Configures VRRP active-active mode | os10 | +| ``delay_reload`` | integer | Configures the minimum delay timer applied after boot (0 to 900) | os10 | +| ``vrrp_group`` | list | Configures VRRP group commands (see ``vrrp_group.*``) | os10 | +| ``vrrp_group.type`` | string: ipv6,ipv4 | Specifies the type of the VRRP group | os10 | +| ``vrrp_group.group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os10 | +| ``vrrp_group.virtual_address`` | string | Configures a virtual-address to the VRRP group (A.B.C.D format) | os10 | +| ``virtual_address.ip`` | string | Configures a virtual ip address (A.B.C.D format) | os10 | +| ``virtual_address.state`` | string: present\*,absent | Configures/unconfigures a virtual-address (A.B.C.D format) | os10 | +| ``vrrp_group.preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os10 | +| ``vrrp_group.priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100) | os10 | +| ``vrrp_group.adv_interval_centisecs`` | integer | Configures the advertisement interval for the VRRP group in centiseconds (25 to 4075; default 100) and in multiple of 25; centisecs gets converted into seconds in version 2 | os10 | +| ``vrrp_group.track_interface`` | list | Configures the track interface of the VRRP group (see ``track.*``) | os10 | +| ``track_interface.resource_id`` | integer | Configures the object tracking resource ID of the VRRP group; mutually exclusive with *track.interface* | os10 | +| ``track_interface.interface`` | string | Configures the track interface of the VRRP group ( format) | os10 | +| ``track_interface.priority_cost`` | integer | Configures the priority cost for track interface of the VRRP group (1 to 254; default 10) | os10 | +| ``track_interface.state`` | string: present\*,absent | Deletes the specific track interface from the VRRP group if set to absent | os10 | +| ``vrrp_group.track_interface.state`` | string: present*,absent | Deletes all track interfaces from the VRRP group if set to absent | os10 | +| ``vrrp_group.state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent | os10 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vrrp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + ethernet1/1/1: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: present + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: present + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: present + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + adv_interval_centisecs: 200 + state: present + vlan100: + vrrp_active_active_mode: true + +> **NOTE**: Interface VRRP cannot exist with L2 modes and can be configured using the *os10_interface* role. + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vrrp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml new file mode 100644 index 00000000..089bfddc --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_vrrp diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml new file mode 100644 index 00000000..aef7df3b --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_vrrp diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml new file mode 100644 index 00000000..38560f94 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os10_vrrp role facilitates the configuration of Virtual Router Redundancy Protocol (VRRP) attributes in + devices running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - dellemc + - emc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml new file mode 100644 index 00000000..3d8a1a6c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating VRRP configuration for os10" + template: + src: os10_vrrp.j2 + dest: "{{ build_dir }}/vrrp10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning VRRP configuration for os10" + os10_config: + src: os10_vrrp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2 b/ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2 new file mode 100644 index 00000000..a1c75b6a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2 @@ -0,0 +1,154 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{########################################## +Purpose: +Configure VRRP commands for os10 Devices +os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + ethernet1/1/1: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: present + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: present + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: present + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + adv_interval_centisecs: 200 + state: present + vlan100: + vrrp_active_active_mode: True +#########################################} +{% if os10_vrrp is defined and os10_vrrp %} +{% for key,value in os10_vrrp.items() %} +{% if key == "vrrp" %} + {% if value.delay_reload is defined %} + {% if value.delay_reload >=0 %} +vrrp delay reload {{ value.delay_reload }} + {% else %} +vrrp delay reload {{ value.delay_reload }} + {% endif %} + {% endif %} + {% if value.version is defined %} + {% if value.version %} +vrrp version {{ value.version }} + {% else %} +no vrrp version + {% endif %} + {% endif %} +{% else %} +interface {{ key }} + {% if value %} + {% if key.startswith("vlan") %} + {% if value.vrrp_active_active_mode is defined and value.vrrp_active_active_mode %} + vrrp mode active-active + {% else %} + no vrrp mode active-active + {% endif %} + {% endif %} + {% if value.vrrp_group is defined and value.vrrp_group %} + {% for group in value.vrrp_group %} + {% if group.group_id is defined and group.group_id %} + {% if group.state is defined and group.state == "absent" %} + {% if group.type is defined and group.type == "ipv6" %} + no vrrp-ipv6-group {{ group.group_id }} + {% else %} + no vrrp-group {{ group.group_id }} + {% endif %} + {% else %} + {% if group.type is defined and group.type == "ipv6" %} + vrrp-ipv6-group {{ group.group_id }} + {% else %} + vrrp-group {{ group.group_id }} + {% endif %} + {% if group.adv_interval_centisecs is defined %} + {% if group.adv_interval_centisecs %} + advertise-interval centisecs {{ group.adv_interval_centisecs }} + {% else %} + no advertise-interval centisecs + {% endif %} + {% endif %} + {% if group.adv_interval_secs is defined %} + {% if group.adv_interval_secs %} + advertise-interval secs {{ group.adv_interval_secs }} + {% else %} + no advertise-interval secs + {% endif %} + {% endif %} + {% if group.track_interface is defined and group.track_interface %} + {% for track_item in group.track_interface %} + {% if track_item.state is defined and track_item.state == "absent" %} + {% if track_item.resource_id is defined and track_item.resource_id %} + no track {{ track_item.resource_id }} + {% endif %} + {% else %} + {% if track_item.resource_id is defined and track_item.resource_id %} + {% if track_item.priority_cost is defined and track_item.priority_cost %} + track {{ track_item.resource_id }} priority-cost {{ track_item.priority_cost }} + {% else %} + track {{ track_item.resource_id }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if group.virtual_address is defined and group.virtual_address %} + {% for virtual_interface in group.virtual_address %} + {% if virtual_interface.state is defined and virtual_interface.state == "absent" %} + {% if virtual_interface.ip is defined and virtual_interface.ip %} + no virtual-address {{ virtual_interface.ip }} + {% endif %} + {% else %} + {% if virtual_interface.ip is defined and virtual_interface.ip %} + virtual-address {{ virtual_interface.ip }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if group.preempt is defined %} + {% if group.preempt %} + preempt + {% else %} + no preempt + {% endif %} + {% endif %} + {% if group.priority is defined %} + {% if group.priority %} + priority {{ group.priority }} + {% else %} + no priority + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml new file mode 100644 index 00000000..97475170 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml @@ -0,0 +1,45 @@ +--- +# vars file for dellemc.os10.os10_vrrp, +# below gives a example configuration +# Sample variables for OS10 device +os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + ethernet1/1/1: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: present + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: present + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: present + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + adv_interval_centisecs: 200 + state: present + vlan100: + vrrp_active_active_mode: true diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml new file mode 100644 index 00000000..2ed5ab8e --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_vrrp diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml new file mode 100644 index 00000000..aa78d677 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_vrrp diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/README.md b/ansible_collections/dellemc/os10/roles/os10_vxlan/README.md new file mode 100644 index 00000000..09b23bb3 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/README.md @@ -0,0 +1,259 @@ +VxLAN role +======== + +This role facilitates the configuration of virtual extensible LAN (VxLAN) attributes. It supports the configuration of virtual networks, Ethernet virtual private network (EVPN), and network virtualization edge (NVE). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The VxLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os10_vxlan keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``anycast_gateway_mac`` | string | Configures an anycast gateway IP address for a VxLAN virtual network | os10 | +| ``loopback`` | dictionary | Configures the loopback interface (see ``loopback.*``) | os10 | +| ``loopback.loopback_id`` | integer | Configures the loopback interface number (0 to 16383) | os10 | +| ``loopback.description`` | string | Configures the interface description | os10 | +| ``loopback.ip_address`` | string | Configure the IP address | os10 | +| ``loopback.state`` | string: absent,present\* | Removes loopback interface if set to absent | os10 | +| ``nve`` | dictionary | Configures network virtualization edge (see ``nve.*``) | os10 | +| ``nve.source_interface`` | integer | Configures source loopback interface | os10 | +| ``nve.controller`` | dictionary | Configures controller; supports only one controller connection at a time (see ``controller.*``) | os10 | +| ``controller.name`` | string: NSX, ovsdb | Configures the NVE controller | os10 | +| ``controller.max_backoff`` | integer | Configures max_backoff value (setting an empty value negates the corresponding configuration) | os10 | +| ``controller.control_cfg`` | list | Configures the controller IP and port (see ``control_cfg.*``) | os10 | +| ``control_cfg.ip_addr`` | string | Configures the controller IP | os10 | +| ``control_cfg.port`` | integer | Configures the controller port | os10 | +| ``control_cfg.state`` | string: absent,present\* | Removes the controller IP and port configuration if set to absent | os10 | +| ``controller.state`` | string: absent,present\* | Removes the controller if set to absent | os10 | +| ``nve.state`` | string: absent,present\* | Removes the NVE if set to absent | os10 | +| ``evpn`` | dictionary | Enables EVPN in control plane (see ``evpn.*``) | os10 | +| ``evpn.autoevi`` | boolean: True, False | Configures auto-EVI; no further manual configuration is allowed in auto-EVI mode | os10 | +| ``evpn.rmac`` | string | Configures router MAC address | os10 | +| ``evpn.evi`` | list | Configures EVPN instance (see ``evi.*``)| os10 | +| ``evpn.dis_rt_asn`` | boolean | Enables/disables AS number usage in route target | os10 | +| ``evpn.vrf`` | dictionary | Enables VRF for EVPN| os10 | +| ``vrf.name`` | string | Configures VRF name | os10 | +| ``vrf.state`` | string(present,absent) | Configures/removes VRF for EVPN | os10 | +| ``vrf.vni`` | integer | Configures VNI for the VRF | os10 | +| ``vrf.rd`` | string | Configures RD for the VRF | os10 | +| ``vrf.route_target`` | dictionary | Enables route target for the VRF | os10 | +| ``route_target.type`` | string (manual, auto) | Configures the route target type | os10 | +| ``route_target.asn_value`` | string | Configure AS number | os10 | +| ``route_target.state`` | string (present,absent) | Configures/unconfigures the route target | os10 | +| ``route_target.route_target_type`` | string | Configures the route target type | os10 | +| ``vrf.adv_ipv4`` | dictionary | Enables IPv4 advertisement VRF | os10 | +| ``adv_ipv4.type`` | string | Configures IPv4 advertisement type | os10 | +| ``adv_ipv4.rmap_name`` | string | Configures route-map for advertisement | os10 | +| ``adv_ipv4.unconfig`` | boolean | Configures/unconfigures route-map for advertisement | os10 | +| ``evi.id`` | integer | Configures the EVPN instance ID (1 to 65535) | os10 | +| ``evi.rd`` | string | Configures the route distinguisher | os10 | +| ``evi.vni`` | dictionary | Configures VNI value (see ``vni.*``) | os10 | +| ``vni.id`` | integer | Configures VNI value; configure the same VNI value configured for the VxLAN virtual network | os10 | +| ``vni.state`` | string: absent,present\* | Removes the VNI if set to absent | os10 | +| ``evi.route_target`` | list | Configures route target (see ``route_target.*``) | os10 | +| ``route_target.type`` | string: manual,auto | Configures the route target (auto mode auto-configures an import and export value for EVPN routes) | os10 | +| ``route_target.asn_value`` | string | Configures the route target ASN value | os10 | +| ``route_target.route_target_type`` | string: import,export,both | Configures the route target type | os10 | +| ``route_target.state`` | string: absent,present\* | Removes the route target if set to absent | os10 | +| ``evi.state`` | string: absent,present\* | Removes EVPN instance ID if set to absent | os10 | +| ``evpn.state`` | string: absent,present\* | Removes the EVPN configuration if set to absent | os10 | +| ``virtual_network`` | dictionary | Configures the virtual network attributes (see ``virtual_network.*``) | os10 | +| ``virtual_network.untagged_vlan`` | integer | Configures the reserved untagged VLAN ID (1 to 4093) | os10 | +| ``virtual_network.virtual_net`` | list | Configures the virtual network attributes for VxLAN tunneling (see ``virtual_net.*``) | os10 | +| ``virtual_net.id`` | integer | Configures a virtual network ( virtual-network ID, from 1 to 65535) | os10 | +| ``virtual_net.description`` | string | Configures the description for virtual network | os10 | +| ``virtual_net.vlt_vlan_id`` | integer | Configures the VLTi VLAN ID | os10 | +| ``virtual_net.member_interface`` | list | Configures the trunk member interface attributes to the virtual network (see ``member_interface.*``) | os10 | +| ``member_interface.ifname`` | string | Configures interface name to provision the virtual network member interface | os10 | +| ``member_interface.type`` | string: tagged,untagged | Configures the type to provision the virtual network member interface | os10 | +| ``member_interface.vlanid`` | integer | Configures the VLAN ID to provision the virtual network member interface | os10 | +| ``member_interface.state`` | string: absent,present\* | Removes the virtual network member interface if set to absent | os10 | +| ``virtual_net.vxlan_vni`` | dictionary | Configures the VxLAN attributes to virtual network (see ``vxlan_vni.*``) | os10 | +| ``vxlan_vni.id`` | integer | Configures the VxLAN ID to a virtual network | os10 | +| ``vxlan_vni.remote_endpoint`` | list | Configures the IP address of a remote tunnel endpoint in a VxLAN network (see ``remote_endpoint.*``) | os10 | +| ``remote_endpoint.ip`` | string | Configures the IP address of a remote tunnel endpoint (1.1.1.1) | os10 | +| ``remote_endpoint.state`` | string: absent,present\* | Removes the remote tunnel endpoint in a VxLAN network if set to absent | os10 | +| ``vxlan_vni.state`` | string: absent,present\* | Removes the VxLAN ID if set to absent | os10 | +| ``virtual_net.state`` | string: absent,present\* | Removes a virtual network if set to absent | os10 | +| ``vlan_association`` | list | Configures the VLAN association with virtual network (see ``vlan_association.*``) | os10 | +| ``vlan_association.vlan_id`` | integer | Specifies the VLAN ID | os10 | +| ``vlan_association.virtual_net`` | integer | Specifies the virtual netwrok ID which is to be associated with VLAN | os10 | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_vxlan* role to configure the VxLAN network, source IP address on VxLAN tunnel endpoint and virtual networks. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name. + +When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_vxlan* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/os10 + + os10_vxlan: + anycast_gateway_mac: "00:22:33:44:55:66" + loopback: + loopback_id: 10 + description: "HARDWARE_VXLAN" + ip_address: "10.8.0.1/32" + state: "present" + nve: + source_interface: 10 + controller: + name: "ovsdb" + max_backoff: 2000 + control_cfg: + - ip_addr: "1.2.3.4" + port: 30 + state: "present" + state: "present" + state: "present" + evpn: + autoevi: False + evi: + - id: 111 + rd: "auto" + vni: + id: 111 + state: "present" + route_target: + - type: "manual" + asn_value: "111:111" + route_target_type: "both" + state: "present" + - type: "manual" + asn_value: "11:11" + route_target_type: "export" + state: "present" + state: "present" + - id: 222 + rd: "2.2.2.2:222" + vni: + id: 222 + state: "present" + route_target: + - type: "auto" + asn_value: + route_target_type: + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + - name: "blue" + state: "absent" + rmac: 00:11:11:11:11:11 + dis_rt_asn: "true" + state: "present" + virtual_network: + untagged_vlan: 1001 + virtual_net: + - id: 111 + description: "NSX_Cluster_VNI_111" + vlt_vlan_id: 11 + member_interface: + - ifname: "ethernet 1/1/15" + type: "tagged" + vlanid: 15 + state: "present" + - ifname: "port-channel 12" + type: "tagged" + vlanid: 11 + state: "present" + vxlan_vni: + id: 111 + remote_endpoint: + - ip: "1.1.1.1" + state: "present" + - ip: "11.11.11.11" + state: "present" + - ip: "111.111.111.111" + state: "present" + state: "present" + state: "present" + - id: 222 + description: "NSX_Cluster_VNI_222" + vlt_vlan_id: 22 + member_interface: + - ifname: "ethernet 1/1/16" + type: "tagged" + vlanid: 16 + state: "present" + vxlan_vni: + id: 222 + remote_endpoint: + - ip: "2.2.2.2" + state: "present" + - ip: "22.22.22.22" + state: "present" + state: "present" + state: "present" + vlan_association: + - vlain_id: 111 + virtual_net: 111 + +> **NOTE**: Member interfaces should be in switchport trunk mode which can be configured using the *os10_interface* role. + +**Simple playbook to configure VxLAN — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os10.os10_vxlan + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml new file mode 100644 index 00000000..adeae550 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_vxlan diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml new file mode 100644 index 00000000..a6b6bc57 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_vxlan diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml new file mode 100644 index 00000000..87908f3c --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml @@ -0,0 +1,20 @@ +# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + role_name: os10_vxlan + author: Dell EMC Networking Engineering + description: > + The os10_vxlan role facilitates the configuration of nve evpn and virtual network attributes in devices + running Dell EMC SmartFabric OS10. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml new file mode 100644 index 00000000..f90be2d7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating vxlan configuration for os10" + template: + src: os10_vxlan.j2 + dest: "{{ build_dir }}/vxlan10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning vxlan configuration for os10" + os10_config: + src: os10_vxlan.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2 b/ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2 new file mode 100644 index 00000000..b56da068 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2 @@ -0,0 +1,434 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +#Purpose: +Configure VXLAN commands for os10 Devices +os10_vxlan: + anycast_gateway_mac: "00:22:33:44:55:66" + loopback: + loopback_id: 10 + description: "HARDWARE_VXLAN" + ip_address: "10.8.0.1/32" + state: "present" + nve: + source_interface: 10 + controller: + name: "ovsdb" + max_backoff: 2000 + control_cfg: + - ip_addr: "1.2.3.4" + port: 30 + state: "present" + state: "present" + state: "present" + evpn: + autoevi: False + evi: + - id: 111 + rd: "auto" + vni: + id: 111 + state: "present" + route_target: + - type: "manual" + asn_value: "111:111" + route_target_type: "both" + state: "present" + - type: "manual" + asn_value: "11:11" + route_target_type: "export" + state: "present" + state: "present" + - id: 222 + rd: "2.2.2.2:222" + vni: + id: 222 + state: "present" + route_target: + - type: "auto" + asn_value: + route_target_type: + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + - name: "blue" + state: "absent" + rmac: 00:11:11:11:11:11 + dis_rt_asn: "true" + state: "present" + virtual_network: + untagged_vlan: 1001 + virtual_net: + - id: 111 + description: "NSX_Cluster_VNI_111" + vlt_vlan_id: 11 + member_interface: + - ifname: "ethernet 1/1/15" + type: "tagged" + vlanid: 15 + state: "present" + - ifname: "port-channel 12" + type: "tagged" + vlanid: 11 + state: "present" + vxlan_vni: + id: 111 + remote_endpoint: + - ip: "1.1.1.1" + state: "present" + - ip: "11.11.11.11" + state: "present" + - ip: "111.111.111.111" + state: "present" + state: "present" + state: "present" + - id: 222 + description: "NSX_Cluster_VNI_222" + vlt_vlan_id: 22 + member_interface: + - ifname: "ethernet 1/1/16" + type: "tagged" + vlanid: 16 + state: "present" + vxlan_vni: + id: 222 + remote_endpoint: + - ip: "2.2.2.2" + state: "present" + - ip: "22.22.22.22" + state: "present" + state: "present" + state: "present" + vlan_association: + - vlain_id: 111 + virtual_net: 111 +###############################################} +{% if os10_vxlan is defined and os10_vxlan %} + {% if os10_vxlan.anycast_gateway_mac is defined %} + {% if os10_vxlan.anycast_gateway_mac %} +ip virtual-router mac-address {{ os10_vxlan.anycast_gateway_mac }} + {% else %} +no ip virtual-router mac-address + {% endif %} + {% endif %} + {% if os10_vxlan.loopback is defined %} + {% set loopback = os10_vxlan.loopback %} + {% if loopback.state is defined and loopback.state == "absent" %} + {% if loopback.loopback_id is defined and loopback.loopback_id %} +no interface loopback {{ loopback.loopback_id }} + {% endif %} + {% else %} + {% if loopback.loopback_id is defined and loopback.loopback_id %} +interface loopback {{ loopback.loopback_id }} + {% endif %} + {% if loopback.description is defined %} + {% if loopback.description %} + description {{ loopback.description }} + {% else %} + no description {{ loopback.description }} + {% endif %} + {% endif %} + {% if loopback.ip_address is defined %} + {% if loopback.ip_address %} + ip address {{ loopback.ip_address }} + {% else %} + no ip address + {% endif %} + {% endif %} + {% endif %} + {% endif %} + + {% if os10_vxlan.nve is defined and os10_vxlan.nve %} + {% set nve = os10_vxlan.nve %} + {% if nve.state is defined and nve.state == "absent" %} +no nve + {% else %} +nve + {% if nve.source_interface is defined %} + {% if nve.source_interface >= 0 %} + source-interface loopback{{ nve.source_interface }} + {% else %} + no source-interface + {% endif %} + {% endif %} + {% if nve.controller is defined %} + {% set controller = os10_vxlan.nve.controller %} + {% if controller.state is defined and controller.state == "absent" %} + no controller + {% else %} + {% if controller.name is defined and controller.name %} + controller {{ controller.name }} + {% endif %} + {% if controller.max_backoff is defined %} + {% if controller.max_backoff %} + max-backoff {{ controller.max_backoff }} + {% else %} + no max-backoff + {% endif %} + {% endif %} + {% if controller.control_cfg is defined and controller.control_cfg %} + {% for ctrl_cfg in controller.control_cfg %} + {% if ctrl_cfg.ip_addr is defined and ctrl_cfg.ip_addr %} + {% if ctrl_cfg.port is defined and ctrl_cfg.port %} + {% if ctrl_cfg.state is defined and ctrl_cfg.state == "absent" %} + {% if controller.name == "ovsdb" %} + no ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }} ssl + {% else %} + no ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }} + {% endif %} + {% else %} + {% if controller.name == "ovsdb" %} + ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }} ssl + {% else %} + ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + + {% if os10_vxlan.evpn is defined and os10_vxlan.evpn %} + {% set evpn = os10_vxlan.evpn %} + {% if evpn.state is defined and evpn.state == "absent" %} +no evpn + {% else %} +evpn + {% if evpn.rmac is defined %} + {% if evpn.rmac %} + router-mac {{ evpn.rmac }} + {% else %} + no router-mac + {% endif %} + {% endif %} + {% if evpn.dis_rt_asn is defined and evpn.dis_rt_asn == "true" %} + disable-rt-asn + {% else %} + no disable-rt-asn + {% endif %} + {% if evpn.evi is defined and evpn.evi %} + {% for evi in evpn.evi %} + {% if evi.id is defined and evi.id %} + {% if evi.state is defined and evi.state == "absent" %} + no evi {{ evi.id }} + {% else %} + evi {{ evi.id }} + {% if evi.vni is defined and evi.vni %} + {% if evi.vni.id is defined and evi.vni.id %} + {% if evi.vni.state is defined and evi.vni.state == "absent" %} + no vni {{ evi.vni.id }} + {% else %} + vni {{ evi.vni.id }} + {% endif %} + {% endif %} + {% endif %} + {% if evi.rd is defined %} + {% if evi.rd %} + rd {{ evi.rd }} + {% else %} + no rd + {% endif %} + {% endif %} + {% if evi.route_target is defined and evi.route_target %} + {% for rt in evi.route_target %} + {% if rt.type is defined and rt.type == "manual" %} + {% if rt.asn_value is defined and rt.asn_value %} + {% if rt.state is defined and rt.state == "absent" %} + no route-target {{ rt.asn_value }} + {% else %} + {% if rt.route_target_type is defined and rt.route_target_type %} + route-target {{ rt.asn_value }} {{ rt.route_target_type }} + {% endif %} + {% endif %} + {% endif %} + {% elif rt.type is defined and rt.type == "auto" %} + {% if rt.state is defined and rt.state == "absent" %} + no route-target auto + {% else %} + route-target auto + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if evpn.vrf is defined %} + {% for evpn_vrf in evpn.vrf %} + {% if evpn_vrf.state is defined and evpn_vrf.state == "absent" and evpn_vrf.name %} + no vrf {{ evpn_vrf.name }} + {% elif evpn_vrf.name %} + vrf {{ evpn_vrf.name }} + {% if evpn_vrf.vni is defined %} + {% if evpn_vrf.vni %} + vni {{ evpn_vrf.vni }} + {% else %} + no vni + {% endif %} + {% endif %} + {% if evpn_vrf.rd is defined %} + {% if evpn_vrf.rd %} + rd {{ evpn_vrf.rd }} + {% else %} + no rd + {% endif %} + {% endif %} + {% if evpn_vrf.route_target is defined and evpn_vrf.route_target %} + {% for rt in evpn_vrf.route_target %} + {% if rt.type is defined and rt.type == "manual" %} + {% if rt.asn_value is defined and rt.asn_value %} + {% if rt.state is defined and rt.state == "absent" %} + no route-target {{ rt.asn_value }} + {% else %} + {% if rt.route_target_type is defined and rt.route_target_type %} + route-target {{ rt.asn_value }} {{ rt.route_target_type }} + {% endif %} + {% endif %} + {% endif %} + {% elif rt.type is defined and rt.type == "auto" %} + {% if rt.state is defined and rt.state == "absent" %} + no route-target auto + {% else %} + route-target auto + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if evpn_vrf.adv_ipv4 is defined and evpn_vrf.adv_ipv4 %} + {% for rt in evpn_vrf.adv_ipv4 %} + {% if rt.unconfig is defined and rt.unconfig == "true" and rt.type is defined and rt.rmap_name is defined %} + no advertise ipv4 {{ rt.type }} route-map {{ rt.rmap_name }} + {% elif rt.type is defined and rt.rmap_name is defined %} + advertise ipv4 {{ rt.type }} route-map {{ rt.rmap_name }} + {% elif rt.unconfig is defined and rt.unconfig == "true" and rt.type is defined %} + no advertise ipv4 {{ rt.type }} + {% elif rt.type is defined %} + advertise ipv4 {{ rt.type }} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if evpn.autoevi is defined %} + {% if evpn.autoevi == True %} + auto-evi + {% else %} + no auto-evi + {% endif %} + {% endif %} + {% endif %} + {% endif %} + + {% if os10_vxlan.virtual_network is defined and os10_vxlan.virtual_network %} + {% set vir_net = os10_vxlan.virtual_network %} + {% if vir_net.untagged_vlan is defined %} + {% if vir_net.untagged_vlan %} +virtual-network untagged-vlan {{ vir_net.untagged_vlan }} + {% else %} +no virtual-network untagged-vlan + {% endif %} + {% endif %} + {% if vir_net.virtual_net is defined and vir_net.virtual_net %} + {% for v_net in vir_net.virtual_net %} + {% if v_net.id is defined and v_net.id %} + {% if v_net.state is defined and v_net.state == "absent" %} +no interface virtual-network {{ v_net.id }} +no virtual-network {{ v_net.id }} + {% else %} +virtual-network {{ v_net.id }} + {% if v_net.description is defined %} + {% if v_net.description %} + description {{ v_net.description }} + {% else %} + no description + {% endif %} + {% endif %} + {% if v_net.vlt_vlan_id is defined %} + {% if v_net.vlt_vlan_id %} + vlti-vlan {{ v_net.vlt_vlan_id }} + {% else %} + no vlti-vlan + {% endif %} + {% endif %} + {% if v_net.member_interface is defined and v_net.member_interface %} + {% for member_intf in v_net.member_interface %} + {% if member_intf.ifname is defined and member_intf.ifname %} + {% if member_intf.type is defined %} + {% if member_intf.type == "tagged" %} + {% if member_intf.vlanid is defined and member_intf.vlanid %} + {% if member_intf.state is defined and member_intf.state == "absent" %} + no member-interface {{ member_intf.ifname }} vlan-tag {{ member_intf.vlanid }} + {% else %} + member-interface {{ member_intf.ifname }} vlan-tag {{ member_intf.vlanid }} + {% endif %} + {% endif %} + {% elif member_intf.type == "untagged" %} + {% if member_intf.state is defined and member_intf.state == "absent" %} + no member-interface {{ member_intf.ifname }} untagged + {% else %} + member-interface {{ member_intf.ifname }} untagged + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if v_net.vxlan_vni is defined and v_net.vxlan_vni %} + {% set vxlan_vni = v_net.vxlan_vni %} + {% if vxlan_vni.id is defined and vxlan_vni.id %} + {% if vxlan_vni.state is defined and vxlan_vni.state == "absent" %} + no vxlan-vni {{ vxlan_vni.id }} + {% else %} + vxlan-vni {{ vxlan_vni.id }} + {% if vxlan_vni.remote_endpoint is defined and vxlan_vni.remote_endpoint %} + {% for remote_endpt in vxlan_vni.remote_endpoint %} + {% if remote_endpt.ip is defined and remote_endpt.ip %} + {% if remote_endpt.state is defined and remote_endpt.state == "absent" %} + no remote-vtep {{ remote_endpt.ip }} + {% else %} + remote-vtep {{ remote_endpt.ip }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if os10_vxlan.vlan_association is defined and os10_vxlan.vlan_association %} + {% for vlan in os10_vxlan.vlan_association %} + {% if vlan.vlan_id is defined and vlan.vlan_id %} +interface vlan{{ vlan.vlan_id }} + {% if vlan.virtual_net is defined %} + {% if vlan.virtual_net %} + virtual-network {{ vlan.virtual_net }} + {% else %} + no virtual-network + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml new file mode 100644 index 00000000..d326e635 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml @@ -0,0 +1,112 @@ +--- +os10_vxlan: + anycast_gateway_mac: "00:22:33:44:55:66" + loopback: + loopback_id: 10 + description: "HARDWARE_VXLAN" + ip_address: "10.8.0.1/32" + state: "present" + nve: + source_interface: 10 + controller: + name: "ovsdb" + max_backoff: 2000 + control_cfg: + - ip_addr: "1.2.3.4" + port: 30 + state: "present" + state: "present" + state: "present" + evpn: + autoevi: False + evi: + - id: 111 + rd: "auto" + vni: + id: 111 + state: "present" + route_target: + - type: "manual" + asn_value: "111:111" + route_target_type: "both" + state: "present" + - type: "manual" + asn_value: "11:11" + route_target_type: "export" + state: "present" + state: "present" + - id: 222 + rd: "2.2.2.2:222" + vni: + id: 222 + state: "present" + route_target: + - type: "auto" + asn_value: + route_target_type: + state: "present" + state: "present" + vrf: + - name: "test" + vni: 1000 + adv_ipv4: + - type: "connected" + state: "present" + - type: "bgp" + state: "present" + route_target: + - type: "manual" + asn_value: "65530:65534" + route_target_type: "both" + state: "present" + - name: "blue" + state: "absent" + rmac: 00:11:11:11:11:11 + dis_rt_asn: "true" + state: "present" + virtual_network: + untagged_vlan: 1001 + virtual_net: + - id: 111 + description: "NSX_Cluster_VNI_111" + vlt_vlan_id: 11 + member_interface: + - ifname: "ethernet 1/1/15" + type: "tagged" + vlanid: 15 + state: "present" + - ifname: "port-channel 12" + type: "tagged" + vlanid: 11 + state: "present" + vxlan_vni: + id: 111 + remote_endpoint: + - ip: "1.1.1.1" + state: "present" + - ip: "11.11.11.11" + state: "present" + - ip: "111.111.111.111" + state: "present" + state: "present" + state: "present" + - id: 222 + description: "NSX_Cluster_VNI_222" + vlt_vlan_id: 22 + member_interface: + - ifname: "ethernet 1/1/16" + type: "tagged" + vlanid: 16 + state: "present" + vxlan_vni: + id: 222 + remote_endpoint: + - ip: "2.2.2.2" + state: "present" + - ip: "22.22.22.22" + state: "present" + state: "present" + state: "present" + vlan_association: + - vlain_id: 111 + virtual_net: 111 diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml new file mode 100644 index 00000000..f5e4a6c1 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: os10host + connection: network_cli + roles: + - dellemc.os10.os10_vxlan diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml new file mode 100644 index 00000000..0373f0aa --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_vxlan diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/README.md b/ansible_collections/dellemc/os10/roles/os10_xstp/README.md new file mode 100644 index 00000000..0dd919b2 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/README.md @@ -0,0 +1,196 @@ +# xSTP role + +This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. + +The xSTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value +- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- `os10_xstp` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the device +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**hostname keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|----------------------| +| ``type`` | string (required) | Configures the type of spanning-tree mode specified that can vary according to the device including RSTP, rapid-PVST, and MST | os10 | +| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os10 | +| ``mac_flush_timer`` | integer | Configures the mac_flush_timer value (0 to 500) | os10 | +| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os10 | +| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os10 | +| ``rstp.max_age`` | integer | Configures the max_age timer for RSTP (6 to 40) | os10 | +| ``rstp.hello_time`` | integer | Configures the hello-time for RSTP (1 to 10) | os10 | +| ``rstp.forward_time`` | integer | Configures the forward-time for RSTP (4 to 30) | os10 | +| ``rstp.force_version`` | string: stp | Configures the force version for the BPDUs transmitted by RSTP | os10 | +| ``rstp.mac_flush_threshold`` | integer | Configures the MAC flush threshold for RSTP (1 to 65535) | os10 | +| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os10 | +| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os10 | +| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os10 | +| ``vlan.max_age`` | integer | Configures the max_age timer for a VLAN (6 to 40) | os10 | +| ``vlan.hello_time`` | integer | Configures the hello-time for a VLAN (1 to 10) | os10 | +| ``vlan.forward_time`` | integer | Configures the forward-time for a VLAN (4 to 30) | os10 | +| ``vlan.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated VLAN range_or_id | os10 | +| ``vlan.mac_flush_threshold`` | integer | Configures the MAC flush threshold for a VLAN (1 to 65535) | os10 | +| ``vlan.root`` | string: primary,secondary | Designates the primary or secondary root for the associated VLAN range_or_id; mutually exclusive with *vlan.bridge_priority* | os10 | +| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os10 | +| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os10 | +| ``mstp.max_age`` | integer | Configures the max_age timer for MSTP (6 to 40) | os10 | +| ``mstp.max_hops`` | integer | Configures the max-hops for MSTP (6 to 40) | os10 | +| ``mstp.hello_time`` | integer | Configures the hello-time for MSTP (1 to 10) | os10 | +| ``mstp.forward_time`` | integer | Configures the forward-time for MSTP (4 to 30) | os10 | +| ``mstp.force_version`` | string: stp,rstp | Configures the force-version for the BPDUs transmitted by MSTP | os10 | +| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os10 | +| ``mstp_instances.number_or_range`` | integer | Configures the multiple spanning-tree instance number| os10 | +| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os10 | +| ``mstp_instances.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated MSTP instance | os10 | +| ``mstp_instances.mac_flush_threshold`` | integer | Configures the MAC flush-threshold for an MSTP instance (1 to 65535) | os10 | +| ``mstp_instances.root`` | string: primary,secondary | Designates the primary or secondary root for the associated MSTP instance; mutually exclusive with *mstp_instances.bridge_priority* | os10 | +| ``mstp.mst_config`` | dictionary | Configures multiple spanning-tree (see ``mstp.mst_config.*``); supported | os10 | +| ``mst_config.name`` | string | Configures the name which is specified for the MSTP | os10 | +| ``mst_config.revision`` | integer | Configures the revision number for MSTP | os10 | +| ``mst_config.cfg_list`` | list | Configures the multiple spanning-tree list (see ``mst_config.cfg_list.*``) | os10 | +| ``cfg_list.number`` | integer | Specifies the MSTP instance number | os10 | +| ``cfg_list.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to an instance number | os10 | +| ``cfg_list.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os10 | +| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os10 | +| ``intf ``| dictionary | Configures the interface name (see ``intf..*``) | os10 | +| ``intf..edge_port`` | boolean: true,false | Configures the EdgePort as dynamic if set to true | os10 | +| ``intf..bpdu_filter``| boolean: true,false | Enables/disables bpdufilter at the interface | os10 | +| ``intf..bpdu_guard``| boolean: true,false | Enables/disables bpduguard at the interface | os10 | +| ``intf..guard``| string: loop,root,none | Configures guard on the interface | os10 | +| ``intf..enable`` | boolean: true,false | Enables/disables spanning-tree at the interface level | os10 | +| ``intf..link_type``| string: auto,point-to-point,shared | Configures the link type at the interface | os10 | +| ``intf..rstp`` | dictionary | Configures the RSTP interface name (see ``intf..rstp.*``) | os10 | +| ``rstp.priority``| integer | Configures the RSTP priority value at the interface | os10 | +| ``rstp.cost`` | integer | Configures the RSTP cost value at the interface | os10 | +| ``intf..msti`` | list | Configures the MSTi interface name (see ``intf..msti``) | os10 | +| ``msti.instance_number`` | integer or range | Specifies the MSTP instance number or range | os10 | +| ``msti.priority`` | integer | Specifies the priority value to be configured at the interface | os10 | +| ``msti.cost`` | integer | Specifies the cost value to be configured at the interface | os10 | +| ``intf..vlan`` | list | Configures the VLAN interface name (see ``intf..vlan``) | os10 | +| ``vlan.range_or_id`` | integer or range | Specifies the VLAN ID or range | os10 | +| ``vlan.priority`` | integer | Specifies the priority value to be configured at the interface | os10 | +| ``vlan.cost`` | integer | Specifies the cost value to be configured at the interface | os10 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + + +Example playbook +---------------- + +This example uses the *os10_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path. + +It writes a simple playbook that only references the *os10_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP. + +**Sample hosts file** + + spine1 ansible_host= + +**Sample host_vars/spine1** + + hostname: spine1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os10.os10 + build_dir: ../temp/temp_os10 + +**Sample vars/main.yml** + + os10_xstp: + type: rstp + enable: true + path_cost: true + mac_flush_timer: 4 + rstp: + max_age: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + bridge_priority: 4096 + mac_flush_threshold: 5 + pvst: + vlan: + - range_or_id: 10 + max_age: 6 + enable: true + hello_time: 7 + forward_time: 7 + bridge_priority: 4096 + mac_flush_threshold: 9 + mstp: + max_age: 6 + max_hops: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + mstp_instances: + - number_or_range: 1 + enable: true + mac_flush_threshold: 9 + bridge_priority: 4096 + mst_config: + name: cfg1 + revision: 5 + cfg_list: + - number: 1 + vlans: 10,12 + vlans_state: present + intf: + ethernet 1/1/8: + edge_port: true + bpdu_filter: true + bpdu_guard: true + guard: loop + enable: true + link_type: point-to-point + msti: + - instance_number: 1 + priority: 32 + cost: 1 + rstp: + priority: 32 + cost: 7 + vlan: + - range_or_id: 6 + priority: 16 + cost: 8 + + +**Simple playbook to setup system — spine.yml** + + - hosts: spine + roles: + - dellemc.os10.os10_xstp + +**Run** + + ansible-playbook -i hosts spine.yml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml new file mode 100644 index 00000000..daaf0f8f --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os10.os10_xstp diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml new file mode 100644 index 00000000..645522da --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os10.os10_xstp diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml new file mode 100644 index 00000000..8a63b500 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os10_xstp role facilitates the configuration of STP attributes in devices running Dell EMC SmartFabric OS10. + company: Dell Technologies + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os10 + + galaxy_tags: + - networking + - dell + - dellemc + - emc + - os10 diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml new file mode 100644 index 00000000..e14eb0f7 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os10 + - name: "Generating xSTP configuration for os10" + template: + src: os10_xstp.j2 + dest: "{{ build_dir }}/xstp10_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool) +# notify: save config os10 + register: generate_output + + - name: "Provisioning xSTP configuration for os10" + os10_config: + src: os10_xstp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") +# notify: save config os10 + register: output diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2 b/ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2 new file mode 100644 index 00000000..dc7456ba --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2 @@ -0,0 +1,398 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{############################################### +PURPOSE: Configure xSTP commands for OS10 devices +os10_xstp: + type: rstp + enable: true + mac_flush_timer: 4 + rstp: + max_age: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + bridge_priority: 4096 + mac_flush_threshold: 5 + pvst: + vlan: + - range_or_id: 10 + max_age: 6 + enable: true + hello_time: 7 + forward_time: 7 + bridge_priority: 4096 + mac_flush_threshold: 9 + mstp: + max_age: 6 + max_hops: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + mstp_instances: + - number_or_range: 1 + enable: true + mac_flush_threshold: 9 + bridge_priority: 4096 + mst_config: + name: cfg1 + revision: 5 + cfg_list: + - number: 1 + vlans: 10,12 + vlans_state: present + intf: + ethernet 1/1/8: + edge_port: true + bpdu_filter: true + bpdu_guard: true + guard: loop + enable: true + link_type: point-to-point + msti: + - instance_number: 1 + priority: 32 + cost: 1 + rstp: + priority: 32 + cost: 7 + vlan: + - range_or_id: 6 + priority: 16 + cost: 8 +#################################################} +{% if os10_xstp is defined and os10_xstp %} +{% set xstp_vars = os10_xstp %} +{% if xstp_vars.type is defined %} + {% if xstp_vars.type %} +spanning-tree mode {{ xstp_vars.type }} + {% else %} +no spanning-tree mode r + {% endif %} +{% endif %} +{% if xstp_vars.enable is defined %} + {% if xstp_vars.enable %} +no spanning-tree disable + {% else %} +spanning-tree disable + {% endif %} +{% endif %} +{% if xstp_vars.mac_flush_timer is defined %} + {% if xstp_vars.mac_flush_timer == 0 or xstp_vars.mac_flush_timer %} +spanning-tree mac-flush-timer {{ xstp_vars.mac_flush_timer }} + {% else %} +no spanning-tree mac-flush-timer + {% endif %} +{% endif %} + +{% if xstp_vars.rstp is defined and xstp_vars.rstp %} + {% set val = xstp_vars.rstp %} + {% if val.bridge_priority is defined %} + {% if val.bridge_priority == 0 or val.bridge_priority %} +spanning-tree rstp priority {{ val.bridge_priority }} + {% else %} +no spanning-tree rstp priority + {% endif %} + {% endif %} + {% if val.forward_time is defined %} + {% if val.forward_time %} +spanning-tree rstp forward-time {{ val.forward_time }} + {% else %} +no spanning-tree rstp forward-time + {% endif %} + {% endif %} + {% if val.hello_time is defined %} + {% if val.hello_time %} +spanning-tree rstp hello-time {{ val.hello_time }} + {% else %} +no spanning-tree rstp hello-time + {% endif %} + {% endif %} + {% if val.max_age is defined %} + {% if val.max_age %} +spanning-tree rstp max-age {{ val.max_age }} + {% else %} +no spanning-tree rstp max-age + {% endif %} + {% endif %} + {% if val.mac_flush_threshold is defined %} + {% if val.mac_flush_threshold %} +spanning-tree rstp mac-flush-threshold {{ val.mac_flush_threshold }} + {% else %} +no spanning-tree rstp mac-flush-threshold + {% endif %} + {% endif %} + {% if val.force_version is defined %} + {% if val.force_version %} +spanning-tree rstp force-version {{ val.force_version }} + {% else %} +no spanning-tree rstp force-version + {% endif %} + {% endif %} +{% endif %} + +{% if xstp_vars.pvst is defined and xstp_vars.pvst %} + {% set val = xstp_vars.pvst %} + {% if val.vlan is defined and val.vlan %} + {% for vlan in val.vlan %} + {% if vlan.range_or_id is defined and vlan.range_or_id %} + {% if vlan.bridge_priority is defined %} + {% if vlan.bridge_priority == 0 or vlan.bridge_priority %} +spanning-tree vlan {{ vlan.range_or_id }} priority {{ vlan.bridge_priority }} + {% else %} +no spanning-tree vlan {{ vlan.range_or_id }} priority + {% endif %} + {% endif %} + {% if vlan.enable is defined %} + {% if vlan.enable %} +no spanning-tree vlan {{ vlan.range_or_id }} disable + {% else %} +spanning-tree vlan {{ vlan.range_or_id }} disable + {% endif %} + {% endif %} + + {% if vlan.forward_time is defined %} + {% if vlan.forward_time %} +spanning-tree vlan {{ vlan.range_or_id }} forward-time {{ vlan.forward_time }} + {% else %} +no spanning-tree vlan {{ vlan.range_or_id }} forward-time + {% endif %} + {% endif %} + {% if vlan.hello_time is defined %} + {% if vlan.hello_time %} +spanning-tree vlan {{ vlan.range_or_id }} hello-time {{ vlan.hello_time }} + {% else %} +no spanning-tree vlan {{ vlan.range_or_id }} hello-time + {% endif %} + {% endif %} + {% if vlan.max_age is defined %} + {% if vlan.max_age %} +spanning-tree vlan {{ vlan.range_or_id }} max-age {{ vlan.max_age }} + {% else %} +no spanning-tree vlan {{ vlan.range_or_id }} max-age + {% endif %} + {% endif %} + {% if vlan.mac_flush_threshold is defined %} + {% if vlan.mac_flush_threshold %} +spanning-tree vlan {{ vlan.range_or_id }} mac-flush-threshold {{ vlan.mac_flush_threshold }} + {% else %} +no spanning-tree vlan {{ vlan.range_or_id }} mac-flush-threshold + {% endif %} + {% endif %} + {% if vlan.root is defined %} + {% if vlan.root %} +spanning-tree vlan {{ vlan.range_or_id }} root {{ vlan.root }} + {% else %} +no spanning-tree vlan {{ vlan.range_or_id }} root p + {% endif %} + {% endif %} + + {% endif %} + {% endfor %} + {% endif %} +{% endif %} +{% if xstp_vars.mstp is defined and xstp_vars.mstp %} + {% set val = xstp_vars.mstp %} + {% if val.forward_time is defined %} + {% if val.forward_time %} +spanning-tree mst forward-time {{ val.forward_time }} + {% else %} +no spanning-tree mst forward-time + {% endif %} + {% endif %} + {% if val.hello_time is defined %} + {% if val.hello_time %} +spanning-tree mst hello-time {{ val.hello_time }} + {% else %} +no spanning-tree mst hello-time + {% endif %} + {% endif %} + {% if val.max_age is defined %} + {% if val.max_age %} +spanning-tree mst max-age {{ val.max_age }} + {% else %} +no spanning-tree mst max-age + {% endif %} + {% endif %} + {% if val.max_hops is defined %} + {% if val.max_hops %} +spanning-tree mst max-hops {{ val.max_hops }} + {% else %} +no spanning-tree mst max-hops + {% endif %} + {% endif %} + {% if val.force_version is defined %} + {% if val.force_version %} +spanning-tree mst force-version {{ val.force_version }} + {% else %} +no spanning-tree mst force-version + {% endif %} + {% endif %} + + {% if val.mstp_instances is defined and val.mstp_instances %} + {% for instance in val.mstp_instances %} + {% if instance.number_or_range is defined and instance.number_or_range %} + {% if instance.bridge_priority is defined %} + {% if instance.bridge_priority ==0 or instance.bridge_priority %} +spanning-tree mst {{ instance.number_or_range }} priority {{ instance.bridge_priority }} + {% else %} +no spanning-tree mst {{ instance.number_or_range }} priority + {% endif %} + {% endif %} + {% if instance.enable is defined %} + {% if instance.enable %} +no spanning-tree mst {{ instance.number_or_range }} disable + {% else %} +spanning-tree mst {{ instance.number_or_range }} disable + {% endif %} + {% endif %} + {% if instance.mac_flush_threshold is defined %} + {% if instance.mac_flush_threshold %} +spanning-tree mst {{ instance.number_or_range }} mac-flush-threshold {{ instance.mac_flush_threshold }} + {% else %} +no spanning-tree mst {{ instance.number_or_range }} mac-flush-threshold + {% endif %} + {% endif %} + {% if instance.root is defined %} + {% if instance.root %} +spanning-tree mst {{ instance.number_or_range }} root {{ instance.root }} + {% else %} +no spanning-tree mst {{ instance.number_or_range }} root p + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if val.mst_config is defined and val.mst_config %} +spanning-tree mst configuration + {% if val.mst_config.name is defined %} + {% if val.mst_config.name %} + name {{ val.mst_config.name }} + {% else %} + no name + {% endif %} + {% endif %} + {% if val.mst_config.revision is defined %} + {% if val.mst_config.revision %} + revision {{ val.mst_config.revision }} + {% else %} + no revision + {% endif %} + {% endif %} + {% for instance in val.mst_config.cfg_list %} + {% if instance.number is defined and instance.number %} + {% if instance.vlans is defined and instance.vlans %} + {% if instance.vlans_state is defined and instance.vlans_state == "absent" %} + no instance {{ instance.number }} vlan {{ instance.vlans }} + {% else %} + instance {{ instance.number }} vlan {{ instance.vlans }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} +{% if xstp_vars.intf is defined and xstp_vars.intf %} + {% for intr in xstp_vars.intf.keys() %} + {% set intf_vars = xstp_vars.intf[intr] %} +interface {{ intr }} + {% if intf_vars.edge_port is defined %} + {% if not intf_vars.edge_port %} + no spanning-tree port type edge + {% else %} + spanning-tree port type edge + {% endif %} + {% endif %} + {% if intf_vars.bpdu_filter is defined %} + {% if intf_vars.bpdu_filter %} + spanning-tree bpdufilter enable + {% else %} + spanning-tree bpdufilter disable + {% endif %} + {% endif %} + {% if intf_vars.bpdu_guard is defined %} + {% if intf_vars.bpdu_guard %} + spanning-tree bpduguard enable + {% else %} + spanning-tree bpduguard disable + {% endif %} + {% endif %} + {% if intf_vars.guard is defined %} + {% if intf_vars.guard %} + spanning-tree guard {{ intf_vars.guard }} + {% else %} + spanning-tree guard none + {% endif %} + {% endif %} + {% if intf_vars.enable is defined %} + {% if intf_vars.enable %} + no spanning-tree disable + {% else %} + spanning-tree disable + {% endif %} + {% endif %} + {% if intf_vars.link_type is defined %} + {% if intf_vars.link_type %} + spanning-tree link-type {{ intf_vars.link_type }} + {% else %} + no spanning-tree link-type + {% endif %} + {% endif %} + {% if intf_vars.rstp is defined and intf_vars.rstp %} + {% if intf_vars.rstp.priority is defined %} + {% if intf_vars.rstp.priority %} + spanning-tree rstp priority {{ intf_vars.rstp.priority }} + {% else %} + no spanning-tree rstp priority + {% endif %} + {% endif %} + {% if intf_vars.rstp.cost is defined %} + {% if intf_vars.rstp.cost %} + spanning-tree rstp cost {{ intf_vars.rstp.cost }} + {% else %} + no spanning-tree rstp cost + {% endif %} + {% endif %} + {% endif %} + {% if intf_vars.msti is defined and intf_vars.msti %} + {% for inst in intf_vars.msti %} + {% if inst.instance_number is defined and inst.instance_number==0 or inst.instance_number %} + {% if inst.priority is defined %} + {% if inst.priority %} + spanning-tree msti {{ inst.instance_number }} priority {{ inst.priority }} + {% else %} + no spanning-tree msti {{ inst.instance_number }} priority 1 + {% endif %} + {% endif %} + {% if inst.cost is defined %} + {% if inst.cost %} + spanning-tree msti {{ inst.instance_number }} cost {{ inst.cost }} + {% else %} + no spanning-tree msti {{ inst.instance_number }} cost 1 + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if intf_vars.vlan is defined and intf_vars.vlan %} + {% for inst in intf_vars.vlan %} + {% if inst.range_or_id is defined and inst.range_or_id %} + {% if inst.priority is defined %} + {% if inst.priority %} + spanning-tree vlan {{ inst.range_or_id }} priority {{ inst.priority }} + {% else %} + no spanning-tree vlan {{ inst.range_or_id }} priority 1 + {% endif %} + {% endif %} + {% if inst.cost is defined %} + {% if inst.cost %} + spanning-tree vlan {{ inst.range_or_id }} cost {{ inst.cost }} + {% else %} + no spanning-tree vlan {{ inst.range_or_id }} cost 1 + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} +{% endif %} + +{% endif %} diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml new file mode 100644 index 00000000..b1ff63e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml @@ -0,0 +1,22 @@ +--- +spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10" +spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10" + +leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10" +leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10" +leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10" +leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10" + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml new file mode 100644 index 00000000..7b55af45 --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml @@ -0,0 +1,74 @@ +--- +# vars file for dellemc.os10.os10_xstp, +# below gives a sample configuration +# Sample variables for OS10 device +os10_xstp: + type: rstp + enable: true + path_cost: true + mac_flush_timer: 4 + rstp: + max_age: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + bridge_priority: 4096 + mac_flush_threshold: 5 + pvst: + vlan: + - range_or_id: 10 + max_age: 6 + enable: true + hello_time: 7 + forward_time: 7 + bridge_priority: 4096 + mac_flush_threshold: 9 + mstp: + max_age: 6 + max_hops: 6 + hello_time: 7 + forward_time: 7 + force_version: stp + mstp_instances: + - number_or_range: 1 + enable: true + mac_flush_threshold: 9 + bridge_priority: 4096 + mst_config: + name: cfg1 + revision: 5 + cfg_list: + - number: 1 + vlans: 10,12 + vlans_state: present + intf: + ethernet 1/1/8: + edge_port: true + bpdu_filter: true + bpdu_guard: true + guard: loop + enable: true + link_type: point-to-point + msti: + - instance_number: 1 + priority: 32 + cost: 1 + rstp: + priority: 32 + cost: 7 + vlan: + - range_or_id: 6 + priority: 16 + cost: 8 + +# Sample variables for OS10 devices to configure root in pvst and mst +#--- +#os10_xstp: +# pvst: +# vlan: +# - range_or_id: 10 +# root: primary +# mstp: +# mstp_instances: +# - number_or_range: 1 +# root: secondary diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml new file mode 100644 index 00000000..438212ca --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os10.os10_xstp diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml new file mode 100644 index 00000000..b3eb721a --- /dev/null +++ b/ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os10.os10_xstp diff --git a/ansible_collections/dellemc/os10/tests/integration/target-prefixes.network b/ansible_collections/dellemc/os10/tests/integration/target-prefixes.network new file mode 100644 index 00000000..69b59b3f --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/target-prefixes.network @@ -0,0 +1 @@ +os10 diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2 new file mode 100644 index 00000000..aa8ad40a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2 @@ -0,0 +1,15 @@ +os10_aaa: + tacacs_server: + host: + - ip: 10.10.10.10 + key: 0 + value: "aaaa" + auth_port: 3 + state: present + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: present + re_authenticate: false + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2 new file mode 100644 index 00000000..89728ab9 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2 @@ -0,0 +1,24 @@ +os10_aaa: + radius_server: + retransmit: 6 + timeout: 9 + host: + - ip: 10.10.10.10 + key: 0 + value: "abc" + auth_port: 3 + state: present + tacacs_server: + host: + - ip: 10.10.10.10 + key: 0 + value: "aaaa" + auth_port: 3 + state: present + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: present + re_authenticate: false + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2 new file mode 100644 index 00000000..1950e499 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2 @@ -0,0 +1,24 @@ +os10_aaa: + radius_server: + retransmit: + timeout: + host: + - ip: 10.10.10.10 + key: + value: "abc" + auth_port: + state: present + tacacs_server: + host: + - ip: 10.10.10.10 + key: 0 + value: "aaaa" + auth_port: 3 + state: present + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: "absent" + re_authenticate: false + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2 new file mode 100644 index 00000000..b7a86a2d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2 @@ -0,0 +1,23 @@ +os10_aaa: + radius_server: + retransmit: 6 + timeout: 9 + host: + - ip: 10.10.10.10 + key: 0 + value: "abc" + auth_port: 3 + state: absent + tacacs_server: + host: + - ip: 10.10.10.10 + key: 0 + value: "aaaa" + auth_port: 3 + state: absent + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: "absent" + re_authenticate: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2 new file mode 100644 index 00000000..8beb9cc1 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2 @@ -0,0 +1,20 @@ +os10_aaa: + radius_server: + retransmit: 6 + timeout: 9 + host: + - ip: 10.10.10.10 + key: 0 + value: "abc" + auth_port: 3 + tacacs_server: + host: + - ip: 10.10.10.10 + key: 0 + value: "aaaa" + auth_port: 3 + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + re_authenticate: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2 new file mode 100644 index 00000000..d9fa0fe4 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2 @@ -0,0 +1,23 @@ +os10_aaa: + radius_server: + retransmit: + timeout: + host: + - ip: 10.10.10.10 + key: 0 + value: "abc" + auth_port: 3 + state: absent + tacacs_server: + host: + - ip: 10.10.10.10 + key: 0 + value: "aaaa" + auth_port: 3 + state: absent + aaa_authentication: + login: + - console: true + type: group radius group tacacs+ local + state: "absent" + re_authenticate: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml new file mode 100644 index 00000000..3e7e9546 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml @@ -0,0 +1,2 @@ +test_roles: + - dellemc.os10.os10_aaa diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2 new file mode 100644 index 00000000..f3a5d7b8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2 @@ -0,0 +1,23 @@ +idempotent: true + +os10_acl: + - name: ssh + type: ipv4 + description: acl + remark: + - description: 1 + number: 2 + state: present + entries: + - number: 14 + permit: true + protocol: tcp + source: any + src_condition: neq 6 + destination: any + dest_condition: eq 4 + other_options: count + state: present + lineterminal: + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2 new file mode 100644 index 00000000..db65b97f --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2 @@ -0,0 +1,17 @@ +idempotent: false + +os10_acl: + - name: ssh + type: ipv4 + description: acl + stage_ingress: + - name: "{{ os10_interface_1 }}" + state: present + - name: "{{ os10_interface_2 }}" + state: present + stage_egress: + - name: "{{ os10_interface_3 }}" + state: present + lineterminal: + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2 new file mode 100644 index 00000000..0d9f1834 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2 @@ -0,0 +1,31 @@ +idempotent: false + +os10_acl: + - name: ssh + type: ipv4 + description: acl ssh + remark: + - description: acl remark + number: 3 + state: present + entries: + - number: 15 + permit: false + protocol: udp + source: any + src_condition: gt 4 + destination: any + dest_condition: lt 5 + other_options: fragment + state: present + stage_ingress: + - name: "{{ os10_interface_1 }}" + state: present + - name: "{{ os10_interface_2 }}" + state: present + stage_egress: + - name: "{{ os10_interface_3 }}" + state: present + lineterminal: + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2 new file mode 100644 index 00000000..afa89bbf --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2 @@ -0,0 +1,30 @@ +idempotent: false +os10_acl: + - name: ssh + type: ipv4 + description: acl ssh + remark: + - description: acl remark + number: 3 + state: absent + entries: + - number: 15 + permit: false + protocol: udp + source: any + src_condition: gt 4 + destination: any + dest_condition: lt 5 + other_options: fragment + state: absent + stage_ingress: + - name: "{{ os10_interface_1 }}" + state: absent + - name: "{{ os10_interface_2 }}" + state: absent + stage_egress: + - name: "{{ os10_interface_3 }}" + state: absent + lineterminal: + state: absent + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2 new file mode 100644 index 00000000..518758d1 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2 @@ -0,0 +1,34 @@ +idempotent: false + +os10_acl: + - name: ssh + type: ipv4 + description: acl ssh + remark: + - description: acl remark + number: 3 + entries: + - number: 14 + permit: true + protocol: tcp + source: any + src_condition: neq 6 + destination: any + dest_condition: eq 4 + other_options: count + - number: 15 + permit: false + protocol: udp + source: any + src_condition: gt 4 + destination: any + dest_condition: lt 5 + other_options: fragment + stage_ingress: + - name: "{{ os10_interface_1 }}" + - name: "{{ os10_interface_2 }}" + stage_egress: + - name: "{{ os10_interface_3 }}" + lineterminal: + state: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2 new file mode 100644 index 00000000..9bce3bf0 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2 @@ -0,0 +1,7 @@ +idempotent: false + +os10_acl: + - name: ssh + type: ipv4 + description: acl ssh + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2 new file mode 100644 index 00000000..9381300e --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2 @@ -0,0 +1,24 @@ +idempotent: false +os10_acl: + - name: ssh + type: ipv4 + description: acl ssh + remark: + - description: acl remark + number: 3 + entries: + - number: 14 + permit: true + protocol: tcp + source: any + src_condition: neq 6 + destination: any + dest_condition: eq 4 + other_options: count + stage_ingress: + - name: "{{ os10_interface_1 }}" + - name: "{{ os10_interface_2 }}" + stage_egress: + - name: "{{ os10_interface_3 }}" + lineterminal: + state: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2 new file mode 100644 index 00000000..2fcc4b18 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2 @@ -0,0 +1,29 @@ +os10_acl: + - name: ssh + type: ipv4 + description: acl ssh + remark: + - description: acl remark + number: 3 + state: absent + entries: + - number: 15 + permit: false + protocol: udp + source: any + src_condition: gt 4 + destination: any + dest_condition: lt 5 + other_options: fragment + state: absent + stage_ingress: + - name: "{{ os10_interface_1 }}" + state: absent + - name: "{{ os10_interface_2 }}" + state: absent + stage_egress: + - name: "{{ os10_interface_3 }}" + state: absent + lineterminal: + state: absent + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tests/acl_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tests/acl_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml new file mode 100644 index 00000000..b40350ab --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml @@ -0,0 +1,5 @@ +test_roles: + - dellemc.os10.os10_acl + +idempotent_roles: + - dellemc.os10.os10_acl diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2 new file mode 100644 index 00000000..947bca96 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2 @@ -0,0 +1,191 @@ +idempotent: false +os10_bgp: + asn: 12 + router_id: 90.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: true + fast_ext_fallover: true + always_compare_med: true + default_loc_pref: 1000 + as_notation: asdot + enforce_first_as: true + non_deterministic_med: true + outbound_optimization: true + confederation: + identifier: 25 + peers: 23 24 + peers_state: present + route_reflector: + client_to_client: true + cluster_id: 4294967295 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + summary_only: true + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + default_metric: 10 + distance_bgp: + value: 3 4 6 + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + address_family: + - type: ipv4 + activate: true + state: present + max_prefix: + count: 20 + threshold: 90 + warning: true + state: present + listen: + - subnet: 4.4.4.4/32 + limit: 4 + subnet_state: present + - subnet: 20::/64 + limit: 4 + subnet_state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + password: bgppassword + route_reflector_client: true + adv_start: 100 + adv_start_state: present + conn_retry_timer: 20 + remove_pri_as: present + address_family: + - type: ipv4 + activate: true + state: present + max_prefix: + count: 10 + threshold: 40 + warning: true + state: present + default_originate: + route_map: aa + state: present + distribute_list: + in: XX + in_state: present + out: YY + out_state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + sender_loop_detect: true + password: bgppassword + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + activate: true + sender_loop_detect: true + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2 new file mode 100644 index 00000000..0e4f173c --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2 @@ -0,0 +1,185 @@ +idempotent: false +os10_bgp: + asn: 12 + router_id: + maxpath_ibgp: + maxpath_ebgp: + graceful_restart: false + log_neighbor_changes: false + fast_ext_fallover: false + always_compare_med: false + default_loc_pref: + as_notation: asdot + enforce_first_as: false + non_deterministic_med: false + outbound_optimization: false + confederation: + identifier: 25 + peers: 23 24 + peers_state: absent + route_reflector: + client_to_client: false + cluster_id: + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: absent + summary_only: false + dampening: + value: 15 750 2000 60 + route_map: qq + state: absent + ibgp_redist_internal: + state: absent + default_metric: + distance_bgp: + value: 3 4 6 + state: absent + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: absent + summary_only: false + ibgp_redist_internal: + state: absent + best_path: + as_path: ignore + as_path_state: absent + ignore_router_id: false + med: + - attribute: confed + state: absent + neighbor: + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: absent + address_family: + - type: l2vpn + activate: false + state: absent + admin: up + state: absent + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: absent + peergroup_type: ibgp + adv_interval: 40 + fall_over: absent + password: bgppassword + route_reflector_client: false + adv_start: 100 + adv_start_state: absent + conn_retry_timer: 20 + remove_pri_as: absent + address_family: + - type: ipv4 + activate: false + state: absent + max_prefix: + count: 10 + threshold: 40 + warning: false + state: absent + default_originate: + route_map: aa + state: absent + distribute_list: + in: XX + in_state: absent + out: YY + out_state: absent + send_community: + - type: standard + state: absent + state: absent + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: false + allow_as_in: 5 + next_hop_self: false + soft_reconf: false + add_path: both 3 + route_map: + - name: qq + filter: in + state: absent + state: absent + state: absent + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: absent + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: absent + sender_loop_detect: false + password: bgppassword + address_family: + - type: ipv4 + activate: false + sender_loop_detect: false + state: absent + allow_as_in: 5 + next_hop_self: false + soft_reconf: false + - type: l2vpn + activate: false + sender_loop_detect: false + state: absent + send_community: + - type: standard + state: absent + admin: up + state: absent + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: absent + address_family: + - type: ipv4 + activate: false + state: absent + max_prefix: + count: 20 + threshold: 90 + warning: false + state: absent + listen: + - subnet: 4.4.4.4/32 + limit: 4 + subnet_state: absent + - subnet: 20::/64 + limit: 4 + subnet_state: absent + - name: ibgp_pg + type: peergroup + weight: 10 + state: absent + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: absent + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: absent + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2 new file mode 100644 index 00000000..1ec5c308 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2 @@ -0,0 +1,153 @@ +idempotent: false +os10_bgp: + asn: 12 + vrfs: + - name: "GREEN" + router_id: 50.1.1.1 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: true + fast_ext_fallover: true + always_compare_med: true + default_loc_pref: 1000 + route_reflector: + client_to_client: true + cluster_id: 1 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-worst + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan10 + description: "U_site2 vlan" + send_community: + - type: extended + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.20.1 + name: peer1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + route_reflector_client: true + address_family: + - type: ipv4 + activate: true + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.15.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + - route_type: connected + route_map_name: bb + address_type: ipv4 + state: present + - route_type: l2vpn + route_map_name: cc + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + state: present + - name: "BLUE" + router_id: 6.6.6.6 + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2 new file mode 100644 index 00000000..69d90add --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2 @@ -0,0 +1,147 @@ +idempotent: false +os10_bgp: + asn: 12 + vrfs: + - name: "GREEN" + router_id: + maxpath_ibgp: + maxpath_ebgp: + graceful_restart: false + log_neighbor_changes: false + fast_ext_fallover: false + always_compare_med: false + default_loc_pref: 1000 + route_reflector: + client_to_client: false + cluster_id: + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: absent + dampening: + value: 15 750 2000 60 + route_map: qq + state: absent + ibgp_redist_internal: + state: absent + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: absent + summary_only: false + ibgp_redist_internal: + state: absent + best_path: + as_path: ignore + as_path_state: absent + ignore_router_id: false + med: + - attribute: confed + state: absent + - attribute: missing-as-worst + state: absent + neighbor: + - type: ipv4 + interface: vlan10 + description: "U_site2 vlan" + send_community: + - type: extended + state: absent + admin: up + state: absent + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.20.1 + name: peer1 + peergroup: peer1 + peergroup_state: absent + peergroup_type: ibgp + adv_interval: 40 + fall_over: absent + route_reflector_client: false + address_family: + - type: ipv4 + activate: false + state: absent + send_community: + - type: standard + state: absent + state: absent + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.15.3 + address_family: + - type: ipv4 + activate: false + allow_as_in: 5 + next_hop_self: false + soft_reconf: false + add_path: both 3 + route_map: + - name: qq + filter: in + state: absent + state: absent + state: absent + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: absent + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: absent + address_family: + - type: ipv4 + activate: false + sender_loop_detect: false + state: absent + allow_as_in: 5 + next_hop_self: false + soft_reconf: false + send_community: + - type: standard + state: absent + admin: up + state: absent + - name: peer1 + type: peergroup + bfd: yes + state: absent + - name: ebgp_pg + type: peergroup + bfd: yes + state: absent + - name: ibgp_pg + type: peergroup + weight: 10 + state: absent + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: absent + - route_type: connected + route_map_name: bb + address_type: ipv4 + state: absent + - route_type: l2vpn + route_map_name: cc + address_type: ipv4 + state: absent + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: absent + state: present + - name: "BLUE" + router_id: + state: absent + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2 new file mode 100644 index 00000000..cd530af4 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2 @@ -0,0 +1,314 @@ +idempotent: true +os10_bgp: + asn: 12 + router_id: 90.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + always_compare_med: true + default_loc_pref: 1000 + as_notation: asdot + non_deterministic_med: true + outbound_optimization: true + confederation: + identifier: 25 + peers: 23 24 + peers_state: present + route_reflector: + cluster_id: 4294967295 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + summary_only: true + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + default_metric: 10 + distance_bgp: + value: 3 4 6 + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + address_family: + - type: ipv4 + state: present + max_prefix: + count: 20 + threshold: 90 + warning: true + state: present + listen: + - subnet: 4.4.4.4/32 + limit: 4 + subnet_state: present + - subnet: 20::/64 + limit: 4 + subnet_state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + state: present + admin: up + state: present + - type: ipv4 + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + route_reflector_client: true + adv_start: 100 + adv_start_state: present + conn_retry_timer: 20 + remove_pri_as: present + address_family: + - type: ipv4 + state: present + max_prefix: + count: 10 + threshold: 40 + warning: true + state: present + default_originate: + route_map: aa + state: present + distribute_list: + in: XX + in_state: present + out: YY + out_state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + address_family: + - type: ipv4 + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + vrfs: + - name: "GREEN" + router_id: 50.1.1.1 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + always_compare_med: true + default_loc_pref: 1000 + route_reflector: + cluster_id: 1 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-worst + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan10 + description: U_site2 vlan + send_community: + - type: extended + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.20.1 + name: peer1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + route_reflector_client: true + address_family: + - type: ipv4 + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.15.3 + address_family: + - type: ipv4 + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + address_family: + - type: ipv4 + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + - route_type: connected + route_map_name: bb + address_type: ipv4 + state: present + - route_type: l2vpn + route_map_name: cc + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2 new file mode 100644 index 00000000..1fa12ee2 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2 @@ -0,0 +1,313 @@ +idempotent: false +os10_bgp: + asn: 12 + router_id: 90.1.1.4 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + log_neighbor_changes: true + always_compare_med: true + default_loc_pref: 1000 + as_notation: asdot + non_deterministic_med: true + outbound_optimization: true + confederation: + identifier: 25 + peers: 23 24 + peers_state: present + route_reflector: + cluster_id: 4294967295 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + summary_only: true + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + default_metric: 10 + distance_bgp: + value: 3 4 6 + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + address_family: + - type: ipv4 + state: present + max_prefix: + count: 20 + threshold: 90 + warning: true + state: present + listen: + - subnet: 4.4.4.4/32 + limit: 4 + subnet_state: present + - subnet: 20::/64 + limit: 4 + subnet_state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan20 + send_community: + - type: extended + state: present + address_family: + - type: l2vpn + activate: true + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2-spine1" + remote_asn: 11 + ip: 192.168.10.1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + password: bgppassword + route_reflector_client: true + adv_start: 100 + adv_start_state: present + conn_retry_timer: 20 + remove_pri_as: present + address_family: + - type: ipv4 + activate: true + state: present + max_prefix: + count: 10 + threshold: 40 + warning: true + state: present + default_originate: + route_map: aa + state: present + distribute_list: + in: XX + in_state: present + out: YY + out_state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.12.3 + address_family: + - type: ipv4 + activate: true + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + sender_loop_detect: true + password: bgppassword + address_family: + - type: ipv4 + activate: true + sender_loop_detect: true + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + - type: l2vpn + activate: true + state: present + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + vrfs: + - name: "GREEN" + router_id: 50.1.1.1 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + always_compare_med: true + default_loc_pref: 1000 + route_reflector: + cluster_id: 1 + address_family_ipv4: + aggregate_address: + - ip_and_mask: 1.1.1.1/16 + state: present + dampening: + value: 15 750 2000 60 + route_map: qq + state: present + ibgp_redist_internal: + state: present + address_family_ipv6: + aggregate_address: + - ip_and_mask: 2001:4898:5808:ffa0::/126 + state: present + summary_only: true + ibgp_redist_internal: + state: present + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-worst + state: present + neighbor: + - name: peer1 + type: peergroup + bfd: yes + state: present + - name: ebgp_pg + type: peergroup + bfd: yes + state: present + - name: ibgp_pg + type: peergroup + weight: 10 + state: present + - type: ipv4 + interface: vlan10 + description: U_site2 vlan + send_community: + - type: extended + state: present + admin: up + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 11 + ip: 192.168.20.1 + name: peer1 + peergroup: peer1 + peergroup_state: present + peergroup_type: ibgp + adv_interval: 40 + fall_over: present + route_reflector_client: true + address_family: + - type: ipv4 + state: present + send_community: + - type: standard + state: present + state: present + - type: ipv4 + description: "U_site2 spine1" + remote_asn: 13 + local_as: 10 + weight: 10 + ip: 192.168.15.3 + address_family: + - type: ipv4 + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + add_path: both 3 + route_map: + - name: qq + filter: in + state: present + state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + state: present + - type: ipv6 + description: "U_site2-spine1-Auto Discovered peers" + auto_peer: unnumbered-auto + ibgp_peergroup: ibgp_pg + ibgp_peergroup_state: present + address_family: + - type: ipv4 + state: present + allow_as_in: 5 + next_hop_self: true + soft_reconf: true + send_community: + - type: standard + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + address_type: ipv4 + state: present + - route_type: connected + route_map_name: bb + address_type: ipv4 + state: present + - route_type: l2vpn + route_map_name: cc + address_type: ipv4 + state: present + bfd_all_neighbors: + interval: 200 + min_rx: 200 + multiplier: 3 + role: active + state: present + state: present + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2 new file mode 100644 index 00000000..ccf217d3 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2 @@ -0,0 +1,13 @@ +os10_vrf: + vrfdetails: + - vrf_name: GREEN + state: present + - vrf_name: BLUE + state: present +os10_vlan: + vlan 10: + description: "red" + state: present + vlan 20: + description: "yellow" + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2 new file mode 100644 index 00000000..1c0d524e --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2 @@ -0,0 +1,13 @@ +os10_vrf: + vrfdetails: + - vrf_name: GREEN + state: absent + - vrf_name: BLUE + state: absent +os10_vlan: + vlan 10: + description: "red" + state: absent + vlan 20: + description: "yellow" + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml new file mode 100644 index 00000000..63123891 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml @@ -0,0 +1,7 @@ +test_roles: + - dellemc.os10.os10_vrf + - dellemc.os10.os10_vlan + - dellemc.os10.os10_bgp + +idempotent_roles: + - dellemc.os10.os10_bgp diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml new file mode 100644 index 00000000..8c11e106 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml @@ -0,0 +1,16 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml new file mode 100644 index 00000000..415c99d8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml new file mode 100644 index 00000000..879a3d3f --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml @@ -0,0 +1,19 @@ +--- +- debug: msg="START cli/bad_operator.yaml" + +- name: test bad operator + os10_command: + commands: + - show version + - show interface ethernet 1/1/1 + wait_for: + - "result[0] contains 'Description : blah'" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- debug: msg="END cli/bad_operator.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml new file mode 100644 index 00000000..1aa70582 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml @@ -0,0 +1,19 @@ +--- +- debug: msg="START cli/contains.yaml" + +- name: test contains operator + os10_command: + commands: + - show version + - show interface ethernet 1/1/1 + wait_for: + - "result[0] contains OS10 Enterprise" + - "result[1] contains Ethernet " + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- debug: msg="END cli/contains.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml new file mode 100644 index 00000000..0eff6170 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml @@ -0,0 +1,26 @@ +--- +- debug: msg="START cli/invalid.yaml" + +- name: run invalid command + os10_command: + commands: ['show foo'] + register: result + ignore_errors: yes + +- assert: + that: + - "'Error: Unrecognized command' in result.msg" + +- name: run commands that include invalid command + os10_command: + commands: + - show version + - show foo + register: result + ignore_errors: yes + +- assert: + that: + - "'Error: Unrecognized command' in result.msg" + +- debug: msg="END cli/invalid.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml new file mode 100644 index 00000000..0c85c0f8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml @@ -0,0 +1,27 @@ +--- +- debug: msg="START cli/output.yaml" + +- name: get output for single command + os10_command: + commands: ['show version'] + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for multiple commands + os10_command: + commands: + - show version + - show interface Eth 1/1/1 + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + - "result.stdout | length == 2" + +- debug: msg="END cli/output.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml new file mode 100644 index 00000000..65e5a82e --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml @@ -0,0 +1,18 @@ +--- +- debug: msg="START cli/timeout.yaml" + +- name: test bad condition + os10_command: + commands: + - show version + wait_for: + - "result[0] contains bad_value_string" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- debug: msg="END cli/timeout.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml new file mode 100644 index 00000000..d675462d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml @@ -0,0 +1,15 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml new file mode 100644 index 00000000..415c99d8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml new file mode 100644 index 00000000..d376e6eb --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml @@ -0,0 +1,38 @@ +--- +- debug: msg="START cli/sublevel.yaml" + +- name: setup test + os10_config: + lines: + - 'no ip access-list test' + match: none + +- name: configure sub level command + os10_config: + lines: ['seq 5 permit ip any any count byte'] + parents: ['ip access-list test'] + register: result + +- assert: + that: + - "result.changed == true" + - "'ip access-list test' in result.updates" + - "'seq 5 permit ip any any count byte' in result.updates" + +- name: configure sub level command idempotent check + os10_config: + lines: ['seq 5 permit ip any any count byte'] + parents: ['ip access-list test'] + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os10_config: + lines: + - 'no ip access-list test' + match: none + +- debug: msg="END cli/sublevel.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml new file mode 100644 index 00000000..ad598f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml @@ -0,0 +1,58 @@ +--- +- debug: msg="START cli/sublevel_block.yaml" + +- name: setup + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + parents: ['router bgp 10'] + before: ['no router bgp'] + after: ['exit'] + match: none + +- name: configure sub level command using block resplace + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + - neighbor 1.1.1.4 + parents: ['router bgp 10'] + replace: block + after: ['exit'] + register: result + +- assert: + that: + - "result.changed == true" + - "'router bgp 10' in result.updates" + - "'neighbor 1.1.1.1' in result.updates" + - "'neighbor 1.1.1.2' in result.updates" + - "'neighbor 1.1.1.3' in result.updates" + - "'neighbor 1.1.1.4' in result.updates" + +- name: check sub level command using block replace + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + - neighbor 1.1.1.4 + parents: ['router bgp 10'] + replace: block + after: ['exit'] + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os10_config: + lines: + - no router bgp + match: none + +- debug: msg="END cli/sublevel_block.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml new file mode 100644 index 00000000..0093e4c7 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml @@ -0,0 +1,62 @@ +--- +- debug: msg="START cli/sublevel_exact.yaml" + +- name: setup + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + - neighbor 1.1.1.4 + - neighbor 1.1.1.5 + parents: ['router bgp 10'] + before: ['no router bgp'] + after: ['exit'] + match: none + +- name: configure sub level command using exact match + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + - neighbor 1.1.1.4 + parents: ['router bgp 10'] + after: ['exit'] + match: exact + register: result + +- assert: + that: + - "result.changed == true" + - "'router bgp 10' in result.updates" + - "'neighbor 1.1.1.1' in result.updates" + - "'neighbor 1.1.1.2' in result.updates" + - "'neighbor 1.1.1.3' in result.updates" + - "'neighbor 1.1.1.4' in result.updates" + - "'neighbor 1.1.1.5' not in result.updates" + +- name: check sub level command using exact match + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + - neighbor 1.1.1.4 + - neighbor 1.1.1.5 + parents: ['router bgp 10'] + after: ['exit'] + match: exact + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os10_config: + lines: + - no router bgp + match: none + +- debug: msg="END cli/sublevel_exact.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml new file mode 100644 index 00000000..38865340 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml @@ -0,0 +1,59 @@ +--- +- debug: msg="START cli/sublevel_strict.yaml" + +- name: setup + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + - neighbor 1.1.1.4 + - neighbor 1.1.1.5 + parents: ['router bgp 10'] + before: ['no router bgp'] + after: ['exit'] + match: none + +- name: configure sub level command using strict match + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.2 + - neighbor 1.1.1.3 + - neighbor 1.1.1.4 + parents: ['router bgp 10'] + match: strict + register: result + +- assert: + that: + - "result.changed == false" + +- name: check sub level command using strict match + os10_config: + lines: + - neighbor 1.1.1.1 + - neighbor 1.1.1.3 + - neighbor 1.1.1.2 + parents: ['router bgp 10'] + after: ['exit'] + match: strict + register: result + +- assert: + that: + - "result.changed == true" + - "'router bgp 10' in result.updates" + - "'neighbor 1.1.1.1' not in result.updates" + - "'neighbor 1.1.1.2' in result.updates" + - "'neighbor 1.1.1.3' in result.updates" + - "'neighbor 1.1.1.4' not in result.updates" + - "'neighbor 1.1.1.5' not in result.updates" + +- name: teardown + os10_config: + lines: + - no router bgp + match: none + +- debug: msg="END cli/sublevel_strict.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml new file mode 100644 index 00000000..c90037b1 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml @@ -0,0 +1,33 @@ +--- +- debug: msg="START cli/toplevel.yaml" + +- name: setup + os10_config: + lines: ['hostname {{ inventory_hostname_short }}'] + match: none + +- name: configure top level command + os10_config: + lines: ['hostname foo'] + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + +- name: configure top level command idempotent check + os10_config: + lines: ['hostname foo'] + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os10_config: + lines: ['hostname {{ inventory_hostname_short }}'] + match: none + +- debug: msg="END cli/toplevel.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml new file mode 100644 index 00000000..7a50790f --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml @@ -0,0 +1,40 @@ +--- +- debug: msg="START cli/toplevel_after.yaml" + +- name: setup + os10_config: + lines: + - "snmp-server contact ansible" + - "hostname {{ inventory_hostname_short }}" + match: none + +- name: configure top level command with before + os10_config: + lines: ['hostname foo'] + after: ['snmp-server contact bar'] + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + - "'snmp-server contact bar' in result.updates" + +- name: configure top level command with before idempotent check + os10_config: + lines: ['hostname foo'] + after: ['snmp-server contact foo'] + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os10_config: + lines: + - "no snmp-server contact" + - "hostname {{ inventory_hostname_short }}" + match: none + +- debug: msg="END cli/toplevel_after.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml new file mode 100644 index 00000000..3af72fa8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml @@ -0,0 +1,40 @@ +--- +- debug: msg="START cli/toplevel_before.yaml" + +- name: setup + os10_config: + lines: + - "snmp-server contact ansible" + - "hostname {{ inventory_hostname_short }}" + match: none + +- name: configure top level command with before + os10_config: + lines: ['hostname foo'] + before: ['snmp-server contact bar'] + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + - "'snmp-server contact bar' in result.updates" + +- name: configure top level command with before idempotent check + os10_config: + lines: ['hostname foo'] + before: ['snmp-server contact foo'] + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os10_config: + lines: + - "no snmp-server contact" + - "hostname {{ inventory_hostname_short }}" + match: none + +- debug: msg="END cli/toplevel_before.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml new file mode 100644 index 00000000..14ee21c3 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml @@ -0,0 +1,35 @@ +--- +- debug: msg="START cli/toplevel_nonidempotent.yaml" + +- name: setup + os10_config: + lines: ['hostname {{ inventory_hostname_short }}'] + match: none + +- name: configure top level command + os10_config: + lines: ['hostname foo'] + match: strict + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + +- name: configure top level command idempotent check + os10_config: + lines: ['hostname foo'] + match: strict + register: result + +- assert: + that: + - "result.changed == true" + +- name: teardown + os10_config: + lines: ['hostname {{ inventory_hostname_short }}'] + match: none + +- debug: msg="END cli/toplevel_nonidempotent.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2 new file mode 100644 index 00000000..52d49ff2 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2 @@ -0,0 +1,4 @@ +idempotent: true +os10_ecmp: + ecmp_group_max_paths: 3 + trigger_threshold: 50 diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2 new file mode 100644 index 00000000..6c0bb03f --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2 @@ -0,0 +1,4 @@ +idempotent: true +os10_ecmp: + ecmp_group_max_paths: 29 + trigger_threshold: 86 diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2 new file mode 100644 index 00000000..0187b2c1 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2 @@ -0,0 +1,4 @@ +idempotent: false +os10_ecmp: + ecmp_group_max_paths: + trigger_threshold: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2 new file mode 100644 index 00000000..d2cb816a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2 @@ -0,0 +1,3 @@ +os10_ecmp: + ecmp_group_max_paths: + trigger_threshold: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml new file mode 100644 index 00000000..1ee0bd40 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml @@ -0,0 +1,5 @@ +test_roles: + - dellemc.os10.os10_ecmp + +idempotent_roles: + - dellemc.os10.os10_ecmp diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml new file mode 100644 index 00000000..8c11e106 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml @@ -0,0 +1,16 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml new file mode 100644 index 00000000..415c99d8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml new file mode 100644 index 00000000..d68efadc --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml @@ -0,0 +1,45 @@ +--- +- debug: msg="START cli/facts.yaml" + +- name: test all facts + os10_facts: + gather_subset: + - all + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts is defined" + - "result.ansible_facts.ansible_net_interfaces is defined" + - "result.ansible_facts.ansible_net_memfree_mb is defined" + - "result.ansible_facts.ansible_net_model is defined" + - "result.ansible_facts.ansible_net_servicetag is defined" + - "result.ansible_facts.ansible_net_version is defined" + +- name: test all facts except hardware + os10_facts: + gather_subset: + - "!hardware" + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts.ansible_net_interfaces is defined" + - "result.ansible_facts.ansible_net_memfree_mb is not defined" + +- name: test interface facts + os10_facts: + gather_subset: + - interfaces + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts.ansible_net_interfaces is defined" + - "result.ansible_facts.ansible_net_memfree_mb is not defined" + + +- debug: msg="END cli/facts.yaml" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2 new file mode 100644 index 00000000..fb184583 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2 @@ -0,0 +1,14 @@ +os10_flow_monitor: + session 1: + session_type: local + description: "Discription goes here" + port_match: + - interface_name: {{ os10_interface_1 }} + location: source + state: present + - interface_name: {{ os10_interface_2 }} + location: destination + state: present + flow_based: true + shutdown: up + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2 new file mode 100644 index 00000000..2f20dd25 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2 @@ -0,0 +1,16 @@ +os10_flow_monitor: + session 1: + session_type: local + description: "session1 desc" + port_match: + - interface_name: {{ os10_interface_1 }} + location: source + state: present + - interface_name: {{ os10_interface_2 }} + location: destination + state: present + flow_based: true + shutdown: down + state: present + session 2: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2 new file mode 100644 index 00000000..00225e2f --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2 @@ -0,0 +1,16 @@ +os10_flow_monitor: + session 1: + session_type: local + description: + port_match: + - interface_name: {{ os10_interface_1 }} + location: + state: present + - interface_name: {{ os10_interface_2 }} + location: destination + state: present + flow_based: + shutdown: + state: present + session 2: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2 new file mode 100644 index 00000000..c7eeeae1 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2 @@ -0,0 +1,3 @@ +os10_interface: + {{ os10_interface_2 }}: + switchport: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2 new file mode 100644 index 00000000..94690f2a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2 @@ -0,0 +1,23 @@ +test_roles: + - dellemc.os10.os10_flow_monitor + - dellemc.os10.os10_interface +os10_interface: + {{ os10_interface_2 }}: + switchport: true + portmode: access +os10_flow_monitor: + session 1: + session_type: local + description: "Discription goes here" + port_match: + - interface_name: {{ os10_interface_1 }} + location: source + state: absent + - interface_name: {{ os10_interface_2 }} + location: destination + state: absent + flow_based: true + shutdown: up + state: absent + session 2: + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml new file mode 100644 index 00000000..8183f282 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml @@ -0,0 +1,3 @@ +test_roles: + - dellemc.os10.os10_interface + - dellemc.os10.os10_flow_monitor diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2 new file mode 100644 index 00000000..bb4364d6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2 @@ -0,0 +1,6 @@ +os10_interface: + {{ os10_interface_1 }}: + desc: "Connected to Core 2" + mtu: 2500 + admin: up + switchport: False diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2 new file mode 100644 index 00000000..725cbded --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2 @@ -0,0 +1,14 @@ +os10_interface: + {{ os10_interface_1 }}: + desc: "DHCP Ip" + mtu: 2500 + admin: up + switchport: False + ip_type_dynamic: True + ipv6_type_dynamic: True + {{ os10_interface_2 }}: + desc: "IPV6 Auto config" + mtu: 2000 + admin: up + switchport: False + ipv6_autoconfig: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2 new file mode 100644 index 00000000..74273ab0 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2 @@ -0,0 +1,12 @@ +os10_interface: + {{ os10_interface_1 }}: + desc: "Static Ip" + mtu: 2500 + portmode: + admin: up + switchport: False + ip_and_mask: 10.9.0.4/31 + ipv6_and_mask: 2001:4898:5809:faa2::10/126 + ipv6_autoconfig: false + ip_type_dynamic: false + ipv6_type_dynamic: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2 new file mode 100644 index 00000000..c77200ea --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2 @@ -0,0 +1,14 @@ +os10_interface: + {{ os10_interface_1 }}: + desc: "Static Ip" + mtu: 2500 + portmode: + admin: up + switchport: False + ip_and_mask: 10.9.0.4/31 + ipv6_and_mask: 2001:4898:5809:faa2::10/126 + ipv6_autoconfig: false + flowcontrol: + mode: "receive" + enable: "on" + state: "present" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2 new file mode 100644 index 00000000..a38709cc --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2 @@ -0,0 +1,17 @@ +os10_interface: + {{ os10_interface_3 }}: + desc: "Switch port" + mtu: 3000 + portmode: trunk + admin: up + switchport: true + {{ os10_interface_1 }}: + ip_and_mask: + ipv6_and_mask: + ipv6_autoconfig: false + state_ipv6: absent + {{ os10_interface_2 }}: + ip_and_mask: + ipv6_and_mask: + ipv6_autoconfig: false + state_ipv6: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2 new file mode 100644 index 00000000..ea4414de --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2 @@ -0,0 +1,17 @@ +test_roles: + - dellemc.os10.os10_vlan + - dellemc.os10.os10_interface + +os10_vlan: + vlan 10: + state: present + +os10_interface: + vlan 10: + ip_and_mask: + ipv6_and_mask: 2001:4898:5808:ffaf::1/64 + state_ipv6: present + ip_helper: + - ip: 10.0.0.33 + state: present + admin: up diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2 new file mode 100644 index 00000000..310dcd39 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2 @@ -0,0 +1,4 @@ +os10_interface: + range ethernet {{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }},{{ os10_interface_3.split()[1] }}: + mtu: 2500 + admin: up diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2 new file mode 100644 index 00000000..e51374b7 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2 @@ -0,0 +1,45 @@ +test_roles: + - dellemc.os10.os10_vlan + - dellemc.os10.os10_interface + +os10_vlan: + vlan 10: + state: absent + +os10_interface: + {{ os10_interface_1 }}: + desc: + mtu: + portmode: access + admin: down + switchport: true + ip_and_mask: + ipv6_and_mask: + ip_type_dynamic: false + ipv6_type_dynamic: false + ipv6_autoconfig: false + flowcontrol: + mode: receive + enable: off + state: absent + {{ os10_interface_2 }}: + desc: + mtu: + portmode: access + admin: down + switchport: true + ip_and_mask: + ipv6_and_mask: + ip_type_dynamic: false + ipv6_type_dynamic: false + ipv6_autoconfig: false + flowcontrol: + mode: receive + enable: off + state: absent + {{ os10_interface_3 }}: + desc: + mtu: + portmode: access + admin: down + switchport: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tests/interface_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tests/interface_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml new file mode 100644 index 00000000..ff3aa0eb --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml @@ -0,0 +1,2 @@ +test_roles: + - dellemc.os10.os10_interface diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2 new file mode 100644 index 00000000..fc74977a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2 @@ -0,0 +1,12 @@ +os10_lag: + Po 12: + type: dynamic + min_links: 2 + max_bundle_size: 2 + lacp_system_priority: 2 + channel_members: + - port: {{ os10_interface_1 }} + mode: "active" + port_priority: 3 + lacp_rate_fast: true + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2 new file mode 100644 index 00000000..e8c2338e --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2 @@ -0,0 +1,12 @@ +os10_lag: + Po 12: + type: dynamic + min_links: 32 + max_bundle_size: 32 + lacp_system_priority: 5 + channel_members: + - port: {{ os10_interface_1 }} + mode: passive + port_priority: 4 + lacp_rate_fast: false + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2 new file mode 100644 index 00000000..03d18fd2 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2 @@ -0,0 +1,12 @@ +os10_lag: + Po 12: + type: dynamic + min_links: + max_bundle_size: + lacp_system_priority: + channel_members: + - port: {{ os10_interface_1 }} + mode: + port_priority: + lacp_rate_fast: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2 new file mode 100644 index 00000000..c9e7b820 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2 @@ -0,0 +1,12 @@ +os10_lag: + Po 12: + type: dynamic + min_links: 2 + max_bundle_size: 2 + lacp_system_priority: 2 + channel_members: + - port: {{ os10_interface_1 }} + mode: "active" + port_priority: 3 + lacp_rate_fast: true + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2 new file mode 100644 index 00000000..09b6c3ea --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2 @@ -0,0 +1,12 @@ +os10_lag: + Po 12: + type: dynamic + min_links: 2 + max_bundle_size: 2 + lacp_system_priority: + channel_members: + - port: {{ os10_interface_1 }} + mode: "active" + port_priority: 3 + lacp_rate_fast: true + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tests/lag_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tests/lag_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml new file mode 100644 index 00000000..4b1840fa --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml @@ -0,0 +1,2 @@ +test_roles: + - dellemc.os10.os10_lag diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2 new file mode 100644 index 00000000..aa806694 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2 @@ -0,0 +1,44 @@ +os10_lldp: + enable: true + multiplier: 3 + reinit: 2 + timer: 5 + advertise: + med: + fast_start_repeat_count: 4 + application: + - name: guest-voice + network_policy_id: 0 + vlan_id: 2 + vlan_type: tag + l2_priority: 3 + code_point_value: 4 + state: present + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: present + local_interface: + "{{ os10_interface_1 }}": + mode: rx + mode_state: present + advertise: + med: + enable: true + tlv: inventory + tlv_state: present + application: + - network_policy_id: 4 + state: present + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2 new file mode 100644 index 00000000..8b0272ab --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2 @@ -0,0 +1,44 @@ +os10_lldp: + enable: true + multiplier: 10 + reinit: 10 + timer: 15 + advertise: + med: + fast_start_repeat_count: 10 + application: + - name: guest-voice + network_policy_id: 1 + vlan_id: 5 + vlan_type: untag + l2_priority: 7 + code_point_value: 20 + state: present + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: present + local_interface: + "{{ os10_interface_1 }}": + mode: transmit + mode_state: present + advertise: + med: + enable: true + tlv: network-policy + tlv_state: present + application: + - network_policy_id: 5 + state: present + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2 new file mode 100644 index 00000000..7ded6af6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2 @@ -0,0 +1,44 @@ +os10_lldp: + enable: false + multiplier: 3 + reinit: 2 + timer: 5 + advertise: + med: + fast_start_repeat_count: 4 + application: + - name: guest-voice + network_policy_id: 0 + vlan_id: 2 + vlan_type: tag + l2_priority: 3 + code_point_value: 4 + state: present + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: absent + local_interface: + "{{ os10_interface_1 }}": + mode: rx + mode_state: present + advertise: + med: + enable: true + tlv: inventory + tlv_state: present + application: + - network_policy_id: 4 + state: absent + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2 new file mode 100644 index 00000000..9f372d8a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2 @@ -0,0 +1,46 @@ +os10_lldp: + enable: true + multiplier: + reinit: + timer: + advertise: + med: + fast_start_repeat_count: + application: + - name: guest-voice + network_policy_id: 0 + vlan_id: 2 + vlan_type: tag + l2_priority: 3 + code_point_value: 4 + state: absent + - name: voice + network_policy_id: 1 + vlan_id: 3 + vlan_type: untag + l2_priority: 3 + code_point_value: 4 + state: absent + local_interface: + "{{ os10_interface_1 }}": + mode: rx + mode_state: present + advertise: + med: + enable: true + tlv: inventory + tlv_state: absent + application: + - network_policy_id: 4 + state: absent + - network_policy_id: 5 + state: absent + tlv: + - name: basic-tlv + value: management-address port-description + state: present + - name: dcbxp-appln + value: iscsi + state: present + - name: dcbxp + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml new file mode 100644 index 00000000..031311ec --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml @@ -0,0 +1,2 @@ +test_roles: + - dellemc.os10.os10_lldp diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2 new file mode 100644 index 00000000..75a47735 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2 @@ -0,0 +1,6 @@ +idempotent: true +os10_logging: + console: + severity: log-err + log_file: + severity: log-err diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2 new file mode 100644 index 00000000..ea10cb8e --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2 @@ -0,0 +1,6 @@ +idempotent: true +os10_logging: + console: + severity: log-debug + log_file: + severity: log-debug diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2 new file mode 100644 index 00000000..f34f7ced --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2 @@ -0,0 +1,8 @@ +idempotent: true +os10_logging: + console: + enable: false + severity: log-err + log_file: + enable: false + severity: log-err diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2 new file mode 100644 index 00000000..d714efd2 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2 @@ -0,0 +1,12 @@ +idempotent: false +os10_logging: + logging: + - ip: 1.1.1.1 + state: present + console: + enable: True + severity: log-err + log_file: + enable: True + severity: log-err + source_interface: "{{ os10_interface_1 }}" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2 new file mode 100644 index 00000000..992bafac --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2 @@ -0,0 +1,12 @@ +idempotent: false +os10_logging: + logging: + - ip: 1.1.1.1 + state: absent + console: + enable: True + severity: log-err + log_file: + enable: True + severity: log-err + source_interface: "{{ os10_interface_1 }}" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2 new file mode 100644 index 00000000..d8d31880 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2 @@ -0,0 +1,11 @@ +os10_logging: + logging: + - ip: 1.1.1.1 + state: absent + console: + enable: True + severity: log-notice + log_file: + enable: True + severity: log-notice + source_interface: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tests/logging_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tests/logging_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml new file mode 100644 index 00000000..55afbe1d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml @@ -0,0 +1,5 @@ +test_roles: + - dellemc.os10.os10_logging + +idempotent_roles: + - dellemc.os10.os10_logging diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2 new file mode 100644 index 00000000..35d8889a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2 @@ -0,0 +1,24 @@ +os10_ntp: + source: "{{ os10_interface_1 }}" + master: 5 + authenticate: true + authentication_key: + - key_num: 123 + key_string_type: 0 + key_string: test + state: present + trusted_key: + - key_num: 1323 + state: present + server: + - ip: 2.2.2.2 + key: 345 + prefer: true + state: present + intf: + "{{ os10_interface_1 }}": + disable: true + broadcast: true + vrf: + name: red + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2 new file mode 100644 index 00000000..f2b11184 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2 @@ -0,0 +1,20 @@ +os10_ntp: + master: 4 + authenticate: false + authentication_key: + - key_num: 456 + key_string_type: 0 + key_string: ntptest + state: present + trusted_key: + - key_num: 4626 + state: present + server: + - ip: 2.2.2.2 + key: 567 + prefer: false + state: present + intf: + "{{ os10_interface_1 }}": + disable: false + broadcast: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2 new file mode 100644 index 00000000..56aff6af --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2 @@ -0,0 +1,14 @@ +os10_ntp: + authentication_key: + - key_num: 456 + key_string_type: 0 + key_string: ntptest + state: absent + trusted_key: + - key_num: 4626 + state: absent + server: + - ip: 2.2.2.2 + key: 567 + prefer: false + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2 new file mode 100644 index 00000000..61c78472 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2 @@ -0,0 +1,11 @@ +os10_ntp: + authentication_key: + - key_num: 456 + key_string_type: 0 + key_string: ntptest + trusted_key: + - key_num: 4626 + server: + - ip: 2.2.2.2 + key: 567 + prefer: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2 new file mode 100644 index 00000000..a2326521 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2 @@ -0,0 +1,16 @@ +os10_ntp: + authenticate: '' + authentication_key: + - key_num: 456 + key_string_type: 0 + key_string: ntptest + trusted_key: + - key_num: 4626 + server: + - ip: 2.2.2.2 + key: 567 + prefer: '' + intf: + "{{ os10_interface_1 }}": + disable: '' + broadcast: '' diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2 new file mode 100644 index 00000000..7ea26a0a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2 @@ -0,0 +1,4 @@ +os10_vrf: + vrfdetails: + - vrf_name: red + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2 new file mode 100644 index 00000000..a916200c --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2 @@ -0,0 +1,32 @@ +test_roles: + - dellemc.os10.os10_ntp + - dellemc.os10.os10_vrf +os10_ntp: + source: + master: + authenticate: + authentication_key: + - key_num: 123 + key_string_type: 0 + key_string: test + state: absent + trusted_key: + - key_num: 1323 + state: absent + server: + - ip: 2.2.2.2 + key: 345 + prefer: true + state: absent + intf: + "{{ os10_interface_1 }}": + disable: false + broadcast: false + vrf: + name: red + state: absent + +os10_vrf: + vrfdetails: + - vrf_name: red + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml new file mode 100644 index 00000000..b42240dd --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml @@ -0,0 +1,3 @@ +test_roles: + - dellemc.os10.os10_vrf + - dellemc.os10.os10_ntp diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2 new file mode 100644 index 00000000..4e9d912b --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2 @@ -0,0 +1,17 @@ +idempotent: true +os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2 new file mode 100644 index 00000000..3481d358 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2 @@ -0,0 +1,17 @@ +idempotent: true +os10_prefix_list: + - name: testpl + type: ipv4 + description: prefixlistdesc + entries: + - number: 18 + permit: false + net_num: 20.0.0.0 + mask: 24 + condition_list: + - condition: le + prelen: 14 + - condition: ge + prelen: 13 + state: present + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2 new file mode 100644 index 00000000..d14b8dda --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2 @@ -0,0 +1,17 @@ +idempotent: false +os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: absent + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2 new file mode 100644 index 00000000..b349bd90 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2 @@ -0,0 +1,16 @@ +os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: absent + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2 new file mode 100644 index 00000000..b349bd90 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2 @@ -0,0 +1,16 @@ +os10_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: absent + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml new file mode 100644 index 00000000..706b6a87 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml @@ -0,0 +1,5 @@ +test_roles: + - dellemc.os10.os10_prefix_list + +idempotent_roles: + - dellemc.os10.os10_prefix_list diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2 new file mode 100644 index 00000000..eec0f565 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2 @@ -0,0 +1,10 @@ +idempotent: true +os10_qos: + policy_map: + - name: testpolicy + type: qos + state: present + class_map: + - name: testclas + type: qos + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2 new file mode 100644 index 00000000..ba593683 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2 @@ -0,0 +1,10 @@ +idempotent: true +os10_qos: + policy_map: + - name: testpolicy + type: + state: present + class_map: + - name: testclas + type: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2 new file mode 100644 index 00000000..3310a13e --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2 @@ -0,0 +1,10 @@ +idempotent: false +os10_qos: + policy_map: + - name: testpolicy + type: + state: absent + class_map: + - name: testclas + type: + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2 new file mode 100644 index 00000000..2e7960c6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2 @@ -0,0 +1,9 @@ +os10_qos: + policy_map: + - name: testpolicy + type: + state: absent + class_map: + - name: testclas + type: + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tests/qos_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tests/qos_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml new file mode 100644 index 00000000..9a1913c9 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml @@ -0,0 +1,5 @@ +test_roles: + - dellemc.os10.os10_qos + +idempotent_roles: + - dellemc.os10.os10_qos diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2 new file mode 100644 index 00000000..1c0adec0 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2 @@ -0,0 +1,48 @@ +idempotent: true +os10_route_map: + as_path: + - access_list: aa + permit: true + regex: www + state: present + community_list: + - type: standard + name: qqq + permit: false + community: internet + state: present + extcommunity_list: + - type: standard + name: qqq + permit: false + community: "rt 22:33" + state: present + route_map: + - name: test + permit: true + seq_num: 1 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + set: + local_pref: 1200 + metric_type: type-1 + metric: + 30 + origin: igp + weight: 50 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: present + community: internet + comm_list: + add: qq + delete: qqq + extcommunity: "22:33" + extcomm_list: + add: aa + delete: aa + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2 new file mode 100644 index 00000000..fb464397 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2 @@ -0,0 +1,48 @@ +idempotent: true +os10_route_map: + as_path: + - access_list: aa + permit: false + regex: abc + state: present + community_list: + - type: standard + name: qqq + permit: false + regex: internet + state: present + extcommunity_list: + - type: standard + name: qqq + permit: false + community: "rt 22:35" + state: present + route_map: + - name: test + permit: false + seq_num: 5 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + set: + local_pref: 1500 + metric_type: type-1 + metric: "- 20" + origin: egp + weight: 60 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: present + community: no-export + comm_list: + add: commstd + delete: commex + extcommunity: "25:37" + extcomm_list: + add: commstd + delete: commex + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2 new file mode 100644 index 00000000..a10c5d0a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2 @@ -0,0 +1,48 @@ +idempotent: false +os10_route_map: + as_path: + - access_list: aa + permit: + regex: www + state: present + community_list: + - type: standard + name: qqq + permit: + community: internet + state: present + extcommunity_list: + - type: standard + name: qqq + permit: + community: "rt 22:33" + state: present + route_map: + - name: test + permit: + seq_num: + continue: + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + set: + local_pref: + metric_type: + metric: + origin: + weight: + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: present + community: + comm_list: + add: + delete: + extcommunity: + extcomm_list: + add: + delete: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2 new file mode 100644 index 00000000..181d8823 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2 @@ -0,0 +1,47 @@ +os10_route_map: + as_path: + - access_list: aa + permit: true + regex: www + state: absent + community_list: + - type: standard + name: qqq + permit: false + community: internet + state: absent + extcommunity_list: + - type: standard + name: qqq + permit: false + community: "rt 22:33" + state: absent + route_map: + - name: test + permit: true + seq_num: 1 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + set: + local_pref: 1200 + metric_type: type-1 + metric: + 30 + origin: igp + weight: 50 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: absent + community: internet + comm_list: + add: qq + delete: qqq + extcommunity: "22:33" + extcomm_list: + add: aa + delete: aa + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2 new file mode 100644 index 00000000..e7380b3d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2 @@ -0,0 +1,47 @@ +os10_route_map: + as_path: + - access_list: aa + permit: true + regex: www + state: absent + community_list: + - type: standard + name: qqq + permit: false + community: internet + state: absent + extcommunity_list: + - type: standard + name: qqq + permit: false + community: "rt 22:33" + state: absent + route_map: + - name: test + permit: true + seq_num: 1 + continue: 20 + match: + - ip_type: ipv4 + access_group: testaccess + prefix_list: testprefix + set: + local_pref: 1200 + metric_type: internal + metric: + 30 + origin: igp + weight: 50 + next_hop: + - type: ip + address: 10.1.1.1 + track_id: 3 + state: absent + community: internet + comm_list: + add: qq + delete: qqq + extcommunity: "22:33" + extcomm_list: + add: aa + delete: aa + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml new file mode 100644 index 00000000..85313472 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml @@ -0,0 +1,5 @@ +test_roles: + - dellemc.os10.os10_route_map + +idempotent_roles: + - dellemc.os10.os10_route_map diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2 new file mode 100644 index 00000000..bbc97642 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2 @@ -0,0 +1,135 @@ +test_roles: + - dellemc.os10.os10_snmp +os10_snmp: + snmp_source_interface: mgmt 1/1/1 + snmp_location: Chennai + snmp_community: + - name: public + access_mode: ro + access_list: + name: test_acl + state: present + - name: test + access_mode: ro + access_list: + name: test_acl + state: present + state: present + snmp_traps: + - name: all + state: present + snmp_engine_id: 123456789 + snmp_remote_engine_id: + - host: 1.1.1.1 + engine_id: '0xab' + - host: 1.1.1.1 + engine_id: '0xcd' + udpport: 200 + - host: 2.1.1.1 + engine_id: '0xef' + udpport: 200 + snmp_vrf: red + snmp_group: + - name: group_1 + version: 1 + write_view: + name: view_2 + - name: group_2 + version: 2c + state: present + access_list: + name: test_acl + read_view: + name: view_1 + write_view: + name: view_2 + notify_view: + name: view_3 + - name: group_3 + version: 3 + security_level: auth + read_view: + name: view_1 + write_view: + name: view_2 + state: present + - name: group_4 + version: 3 + security_level: priv + notify_view: + name: view_1 + state: present + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: present + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + state: present + - ip: 2.1.1.1 + version: 1 + communitystring: c3 + trap_categories: + dom: true + entity: true + snmp: true + state: present + - ip: 3.1.1.1 + version: 3 + security_level: priv + security_name: test + notification_type: informs + udpport: 200 + trap_categories: + dom: true + entity: true + envmon: true + lldp: true + state: present + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: present + snmp_user: + - name: user_1 + group_name: group_1 + version: 3 + authentication: + localized: false + algorithm: md5 + password: 9fc53d9d908118b2804fe80e3ba8763d + encryption: + algorithm: aes + password: d0452401a8c3ce42804fe80e3ba8763d + state: present + - name: user_2 + group_name: group_1 + version: 3 + authentication: + localized: true + algorithm: md5 + password: '0x9fc53d9d908118b2804fe80e3ba8763d' + encryption: + algorithm: aes + password: '0xd0452401a8c3ce42804fe80e3ba8763d' + state: present + - name: user_3 + group_name: group_1 + version: 2c + state: present + - name: user_4 + group_name: group_1 + version: 3 + state: present + - name: user_5 + group_name: group_2 + version: 2c + remote: + ip: 1.1.1.1 + udpport: 200 + access_list: test_acl + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2 new file mode 100644 index 00000000..8c43046d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2 @@ -0,0 +1,144 @@ +os10_snmp: + snmp_source_interface: mgmt 1/1/1 + snmp_location: Chennai + snmp_community: + - name: public + access_mode: ro + access_list: + name: test_acl + state: absent + - name: test + access_mode: ro + access_list: + name: test_acl + state: present + state: present + snmp_traps: + - name: all + state: present + snmp_engine_id: 123456789 + snmp_remote_engine_id: + - host: 1.1.1.1 + engine_id: '0xab' + - host: 1.1.1.1 + engine_id: '0xcd' + udpport: 200 + - host: 2.1.1.1 + engine_id: '0xef' + udpport: 200 + snmp_vrf: red + snmp_group: + - name: group_1 + version: 1 + access_list: + name: test_acl + read_view: + name: view_1 + write_view: + name: view_2 + - name: group_2 + version: 2c + state: present + access_list: + name: test_acl + read_view: + name: view_1 + write_view: + name: view_2 + notify_view: + name: view_3 + state: absent + - name: group_3 + version: 3 + security_level: auth + read_view: + name: view_1 + write_view: + name: view_2 + state: present + - name: group_4 + version: 3 + security_level: priv + notify_view: + name: view_1 + state: present + read_view: + name: view_1 + write_view: + name: view_2 + state: present + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: present + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + state: present + - ip: 2.1.1.1 + version: 1 + communitystring: c3 + trap_categories: + dom: true + entity: true + snmp: true + lldp: true + state: present + - ip: 3.1.1.1 + version: 3 + security_level: priv + security_name: test + notification_type: informs + udpport: 200 + trap_categories: + dom: true + entity: true + envmon: false + lldp: false + state: present + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: present + snmp_user: + - name: user_1 + group_name: group_1 + version: 3 + authentication: + localized: false + algorithm: md5 + password: 9fc53d9d908118b2804fe80e3ba8763d + encryption: + algorithm: aes + password: d0452401a8c3ce42804fe80e3ba8763d + state: present + - name: user_2 + group_name: group_1 + version: 3 + authentication: + localized: true + algorithm: md5 + password: '0x9fc53d9d908118b2804fe80e3ba8763d' + encryption: + algorithm: aes + password: '0xd0452401a8c3ce42804fe80e3ba8763d' + state: present + - name: user_3 + group_name: group_1 + version: 2c + state: present + - name: user_4 + group_name: group_1 + version: 3 + state: present + - name: user_5 + group_name: group_2 + version: 2c + remote: + ip: 1.1.1.1 + udpport: 200 + access_list: test_acl + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2 new file mode 100644 index 00000000..4b4a8683 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2 @@ -0,0 +1,147 @@ +os10_snmp: + snmp_source_interface: mgmt 1/1/1 + snmp_location: Chennai + snmp_community: + - name: public + access_mode: ro + access_list: + name: test_acl + state: absent + - name: test + access_mode: ro + access_list: + name: test_acl + state: present + state: absent + snmp_traps: + - name: all + state: absent + snmp_engine_id: + snmp_remote_engine_id: + - host: 1.1.1.1 + engine_id: '0xab' + - host: 1.1.1.1 + engine_id: '0xcd' + udpport: 200 + - host: 2.1.1.1 + engine_id: '0xef' + udpport: 200 + state: absent + snmp_vrf: red + snmp_group: + - name: group_1 + version: 1 + access_list: + name: test_acl + read_view: + name: view_1 + write_view: + name: view_2 + - name: group_2 + version: 2c + state: present + access_list: + name: test_acl + read_view: + name: view_1 + write_view: + name: view_2 + notify_view: + name: view_3 + state: absent + - name: group_3 + version: 3 + security_level: auth + read_view: + name: view_1 + write_view: + name: view_2 + state: present + - name: group_4 + version: 3 + security_level: priv + notify_view: + name: view_1 + state: absent + read_view: + name: view_1 + state: absent + write_view: + name: view_2 + state: absent + state: absent + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: present + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + state: present + - ip: 2.1.1.1 + version: 1 + communitystring: c3 + trap_categories: + dom: true + entity: true + snmp: true + lldp: true + state: present + - ip: 3.1.1.1 + version: 3 + security_level: priv + security_name: test + notification_type: informs + udpport: 200 + trap_categories: + dom: true + entity: true + envmon: false + lldp: false + state: absent + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: absent + snmp_user: + - name: user_1 + group_name: group_1 + version: 3 + authentication: + localized: false + algorithm: md5 + password: 9fc53d9d908118b2804fe80e3ba8763d + encryption: + algorithm: aes + password: d0452401a8c3ce42804fe80e3ba8763d + state: present + - name: user_2 + group_name: group_1 + version: 3 + authentication: + localized: true + algorithm: md5 + password: '0x9fc53d9d908118b2804fe80e3ba8763d' + encryption: + algorithm: aes + password: '0xd0452401a8c3ce42804fe80e3ba8763d' + state: present + - name: user_3 + group_name: group_1 + version: 2c + state: absent + - name: user_4 + group_name: group_1 + version: 3 + state: absent + - name: user_5 + group_name: group_2 + version: 2c + remote: + ip: 1.1.1.1 + udpport: 200 + access_list: test_acl + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2 new file mode 100644 index 00000000..7ea26a0a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2 @@ -0,0 +1,4 @@ +os10_vrf: + vrfdetails: + - vrf_name: red + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2 new file mode 100644 index 00000000..1a3ba013 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2 @@ -0,0 +1,147 @@ +test_roles: + - dellemc.os10.os10_snmp + - dellemc.os10.os10_vrf +os10_vrf: + vrfdetails: + - vrf_name: "red" + state: "absent" +os10_snmp: + snmp_source_interface: + snmp_location: + snmp_community: + - name: public + access_mode: ro + access_list: + name: test_acl + state: absent + - name: test + access_mode: ro + access_list: + name: test_acl + state: absent + state: absent + snmp_traps: + - name: all + state: absent + snmp_engine_id: + snmp_remote_engine_id: + - host: 1.1.1.1 + engine_id: '0xab' + state: absent + - host: 1.1.1.1 + engine_id: '0xcd' + udpport: 200 + state: absent + - host: 2.1.1.1 + engine_id: '0xef' + udpport: 200 + state: absent + snmp_group: + - name: group_1 + version: 1 + write_view: + name: view_2 + state: absent + - name: group_2 + version: 2c + state: present + access_list: + name: test_acl + read_view: + name: view_1 + write_view: + name: view_2 + notify_view: + name: view_3 + state: absent + - name: group_3 + version: 3 + security_level: auth + read_view: + name: view_1 + write_view: + name: view_2 + state: present + state: absent + - name: group_4 + version: 3 + security_level: priv + notify_view: + name: view_1 + state: present + state: absent + snmp_host: + - ip: 1.1.1.1 + communitystring: c1 + version: "2c" + udpport: 4 + state: absent + - ip: 2.2.2.2 + version: 1 + communitystring: c3 + state: absent + - ip: 2.1.1.1 + version: 1 + communitystring: c3 + trap_categories: + dom: true + entity: true + snmp: true + state: absent + - ip: 3.1.1.1 + version: 3 + security_level: priv + security_name: test + notification_type: informs + udpport: 200 + trap_categories: + dom: true + entity: true + envmon: true + lldp: true + state: absent + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: absent + snmp_vrf: + snmp_user: + - name: user_1 + group_name: group_1 + version: 3 + authentication: + localized: false + algorithm: md5 + password: 9fc53d9d908118b2804fe80e3ba8763d + encryption: + algorithm: aes + password: d0452401a8c3ce42804fe80e3ba8763d + state: absent + - name: user_2 + group_name: group_1 + version: 3 + authentication: + localized: true + algorithm: md5 + password: '0x9fc53d9d908118b2804fe80e3ba8763d' + encryption: + algorithm: aes + password: '0xd0452401a8c3ce42804fe80e3ba8763d' + state: absent + - name: user_3 + group_name: group_1 + version: 2c + state: absent + - name: user_4 + group_name: group_1 + version: 3 + state: absent + - name: user_5 + group_name: group_2 + version: 2c + remote: + ip: 1.1.1.1 + udpport: 200 + access_list: test_acl + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml new file mode 100644 index 00000000..0e3995a5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml @@ -0,0 +1,3 @@ +test_roles: + - dellemc.os10.os10_vrf + - dellemc.os10.os10_snmp diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2 new file mode 100644 index 00000000..13549985 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2 @@ -0,0 +1,26 @@ +os10_system: + hostname: OS10 + hardware_forwarding: scaled-l3-hosts + hash_algo: + algo: + - name: lag + mode: crc + state: present + - name: ecmp + mode: xor + state: present + load_balance: + ingress_port: true + ip_selection: + - field: source-ip + state: present + ipv6_selection: + - field: source-ip + state: present + mac_selection: + - field: source-mac + state: present + tcp_udp_selection: + - field: l4-source-port + state: present + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2 new file mode 100644 index 00000000..7263204b --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2 @@ -0,0 +1,26 @@ +os10_system: + hostname: OS10 + hardware_forwarding: scaled-l2 + hash_algo: + algo: + - name: ecmp + mode: random + state: present + - name: lag + mode: xor + state: present + load_balance: + ingress_port: true + ip_selection: + - field: destination-ip + state: present + ipv6_selection: + - field: destination-ip + state: present + mac_selection: + - field: destination-mac + state: present + tcp_udp_selection: + - field: l4-destination-port + state: present + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2 new file mode 100644 index 00000000..17c84d33 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2 @@ -0,0 +1,26 @@ +os10_system: + hostname: OS10 + hardware_forwarding: + hash_algo: + algo: + - name: ecmp + mode: random + state: absent + - name: lag + mode: xor + state: present + load_balance: + ingress_port: + ip_selection: + - field: destination-ip + state: absent + ipv6_selection: + - field: destination-ip + state: absent + mac_selection: + - field: destination-mac + state: absent + tcp_udp_selection: + - field: l4-destination-port + state: absent + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2 new file mode 100644 index 00000000..480ce114 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2 @@ -0,0 +1,25 @@ +os10_system: + hostname: + hardware_forwarding: + hash_algo: + algo: + - name: ecmp + mode: random + state: absent + - name: lag + mode: xor + state: absent + load_balance: + ingress_port: + ip_selection: + - field: destination-ip + state: absent + ipv6_selection: + - field: destination-ip + state: absent + mac_selection: + - field: destination-mac + state: absent + tcp_udp_selection: + - field: l4-destination-port + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tests/system_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tests/system_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml new file mode 100644 index 00000000..d847941e --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml @@ -0,0 +1,2 @@ +test_roles: + - dellemc.os10.os10_system diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2 new file mode 100644 index 00000000..fd7a9165 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2 @@ -0,0 +1,32 @@ +test_roles: + - dellemc.os10.os10_uplink +os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "present" + - type: "downstream" + intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}" + state: "present" + downstream: + disable_links: all + auto_recover: false + defer_time: 50 + state: "present" + - id: 2 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel2" + state: "present" + - type: "downstream" + intf: "{{ os10_interface_3 }}" + state: "present" + downstream: + disable_links: 10 + auto_recover: true + defer_time: 50 + state: "present" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2 new file mode 100644 index 00000000..d412e3e0 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2 @@ -0,0 +1,30 @@ +os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: absent + - type: "downstream" + intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}" + state: "present" + downstream: + disable_links: 10 + auto_recover: false + defer_time: 50 + state: "present" + - id: 2 + enable: false + uplink_type: + - type: "upstream" + intf: "port-channel2" + state: "present" + - type: "downstream" + intf: "{{ os10_interface_3 }}" + state: absent + downstream: + disable_links: 15 + auto_recover: false + defer_time: 40 + state: "present" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2 new file mode 100644 index 00000000..b2d3da07 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2 @@ -0,0 +1,30 @@ +os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "absent" + - type: "downstream" + intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}" + state: "absent" + downstream: + disable_links: all + auto_recover: false + defer_time: 50 + state: "absent" + - id: 2 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel2" + state: "absent" + - type: "downstream" + intf: "{{ os10_interface_3 }}" + state: "absent" + downstream: + disable_links: 10 + auto_recover: true + defer_time: 50 + state: "absent" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2 new file mode 100644 index 00000000..2f579616 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2 @@ -0,0 +1,5 @@ +os10_lag: + Po 1: + state: present + Po 2: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2 new file mode 100644 index 00000000..dcd90471 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2 @@ -0,0 +1,38 @@ +test_roles: + - dellemc.os10.os10_uplink + - dellemc.os10.os10_lag +os10_lag: + Po 1: + state: absent + Po 2: + state: absent +os10_uplink: + uplink_state_group: + - id: 1 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel1" + state: "absent" + - type: "downstream" + intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}" + state: "absent" + downstream: + disable_links: all + auto_recover: false + defer_time: 50 + state: "absent" + - id: 2 + enable: True + uplink_type: + - type: "upstream" + intf: "port-channel2" + state: "absent" + - type: "downstream" + intf: "{{ os10_interface_3 }}" + state: "absent" + downstream: + disable_links: 10 + auto_recover: true + defer_time: 50 + state: "absent" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml new file mode 100644 index 00000000..c3d3900d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml @@ -0,0 +1,3 @@ +test_roles: + - dellemc.os10.os10_lag + - dellemc.os10.os10_uplink diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml new file mode 100644 index 00000000..4de62fb9 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml @@ -0,0 +1,15 @@ +--- +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2 new file mode 100644 index 00000000..b53a5bb0 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2 @@ -0,0 +1,9 @@ +os10_users: + - username: test + password: a1a2a3a4!@#$ + role: sysadmin + state: present + - username: u1 + password: a1a2a3a4!@#$ + role: netadmin + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2 new file mode 100644 index 00000000..abb30cfa --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2 @@ -0,0 +1,5 @@ +os10_users: + - username: test + password: a1a2a3a4!@#$ + role: sysadmin + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2 new file mode 100644 index 00000000..f817f7b2 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2 @@ -0,0 +1,9 @@ +os10_users: + - username: test + password: a1a2a3a4!@#$ + role: sysadmin + state: absent + - username: u1 + password: a1a2a3a4!@#$ + role: netadmin + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tests/users_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tests/users_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml new file mode 100644 index 00000000..0a54dcad --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml @@ -0,0 +1,2 @@ +test_roles: + - dellemc.os10.os10_users diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2 new file mode 100644 index 00000000..593fb33d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2 @@ -0,0 +1,17 @@ +idempotent: true +os10_vlan: + vlan 100: + description: "Blue" + tagged_members: + - port: {{ os10_interface_1 }} + state: present + untagged_members: + - port: {{ os10_interface_2 }} + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: "present" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2 new file mode 100644 index 00000000..a2fd82f4 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2 @@ -0,0 +1,19 @@ +idempotent: true +os10_vlan: + vlan 100: + description: "Blue VLAN" + tagged_members: + - port: {{ os10_interface_1 }} + state: present + - port: {{ os10_interface_3 }} + state: present + untagged_members: + - port: {{ os10_interface_2 }} + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: "present" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2 new file mode 100644 index 00000000..bbff49b1 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2 @@ -0,0 +1,17 @@ +idempotent: false +os10_vlan: + vlan 100: + description: "Blue VLAN" + tagged_members: + - port: range ethernet {{ os10_interface_1.split()[1] }},{{ os10_interface_3.split()[1] }} + state: present + untagged_members: + - port: range {{ os10_interface_2 }} + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: "present" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2 new file mode 100644 index 00000000..2cc502d2 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2 @@ -0,0 +1,20 @@ +idempotent: false +os10_vlan: + default_vlan_id: 1 + vlan 100: + description: + tagged_members: + - port: {{ os10_interface_1 }} + state: present + - port: {{ os10_interface_3 }} + state: absent + untagged_members: + - port: {{ os10_interface_2 }} + state: present + state: present + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: "present" + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2 new file mode 100644 index 00000000..81cff710 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2 @@ -0,0 +1,3 @@ +os10_lag: + Po 1: + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2 new file mode 100644 index 00000000..e640d0c1 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2 @@ -0,0 +1,25 @@ +test_roles: + - dellemc.os10.os10_vlan + - dellemc.os10.os10_lag +os10_lag: + Po 1: + state: absent +os10_vlan: + default_vlan_id: + vlan 100: + description: + tagged_members: + - port: {{ os10_interface_1 }} + state: absent + - port: {{ os10_interface_3 }} + state: absent + untagged_members: + - port: {{ os10_interface_2 }} + state: absent + state: absent + vlan 888: + description: "NSX_Cluster" + untagged_members: + - port: port-channel 1 + state: absent + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml new file mode 100644 index 00000000..7acc287d --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml @@ -0,0 +1,6 @@ +test_roles: + - dellemc.os10.os10_lag + - dellemc.os10.os10_vlan + +idempotent_roles: + - dellemc.os10.os10_vlan diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml new file mode 100644 index 00000000..967b3f14 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +testcase: "*" +test_idempotency: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2 new file mode 100644 index 00000000..90ca5988 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2 @@ -0,0 +1,15 @@ +idempotent: true +test_roles: + - dellemc.os10.os10_vlt +os10_vlt: + domain: 1 + backup_destination: "192.168.211.175" + destination_type: "ipv4" + discovery_intf: {{ os10_interface_1.split()[1] }} + discovery_intf_state: present + peer_routing: True + vlt_mac: aa:aa:aa:aa:aa:aa + vlt_peers: + Po 12: + peer_lag: 13 + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2 new file mode 100644 index 00000000..e1406d21 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2 @@ -0,0 +1,14 @@ +idempotent: false +os10_vlt: + domain: 1 + backup_destination: "192.168.211.175" + destination_type: "ipv4" + backup_destination_vrf: + discovery_intf: {{ os10_interface_1.split()[1] }} + discovery_intf_state: absent + peer_routing: false + vlt_mac: aa:aa:aa:aa:aa:ab + vlt_peers: + Po 12: + peer_lag: 14 + state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2 new file mode 100644 index 00000000..1d667cca --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2 @@ -0,0 +1,12 @@ +os10_vlt: + domain: 1 + backup_destination: "192.168.211.175" + destination_type: "ipv4" + discovery_intf: {{ os10_interface_1.split()[1] }} + discovery_intf_state: present + peer_routing: True + vlt_mac: aa:aa:aa:aa:aa:aa + vlt_peers: + Po 12: + peer_lag: 13 + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2 new file mode 100644 index 00000000..17245d04 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2 @@ -0,0 +1,7 @@ +os10_lag: + Po 12: + state: present + +os10_interface: + {{ os10_interface_1 }}: + switchport: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2 new file mode 100644 index 00000000..4460af86 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2 @@ -0,0 +1,23 @@ +test_roles: + - dellemc.os10.os10_vlt + - dellemc.os10.os10_lag + - dellemc.os10.os10_interface +os10_interface: + {{ os10_interface_1 }}: + portmode: access + switchport: true +os10_lag: + Po 12: + state: absent +os10_vlt: + domain: 1 + backup_destination: "192.168.211.175" + destination_type: "ipv4" + discovery_intf: {{ os10_interface_1.split()[1] }} + discovery_intf_state: present + peer_routing: True + vlt_mac: aa:aa:aa:aa:aa:aa + vlt_peers: + Po 12: + peer_lag: 13 + state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml new file mode 100644 index 00000000..e18e8364 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml @@ -0,0 +1,7 @@ +test_roles: + - dellemc.os10.os10_interface + - dellemc.os10.os10_lag + - dellemc.os10.os10_vlt + +idempotent_roles: + - dellemc.os10.os10_vlt diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2 new file mode 100644 index 00000000..0f95b515 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2 @@ -0,0 +1,43 @@ +test_roles: + - dellemc.os10.os10_vrrp +os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + {{ os10_interface_1 }}: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: present + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: present + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: present + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + adv_interval_centisecs: 200 + state: present + vlan100: + vrrp_active_active_mode: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2 new file mode 100644 index 00000000..b3fc61ce --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2 @@ -0,0 +1,41 @@ +os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + {{ os10_interface_1 }}: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 200 + preempt: true + track_interface: + - resource_id: 3 + priority_cost: 30 + state: present + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: absent + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 300 + state: present + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: absent + - ip: 4.1.1.3 + state: absent + priority: 140 + preempt: true + track_interface: + - resource_id: 3 + priority_cost: 20 + state: present + adv_interval_centisecs: 300 + state: present + vlan100: + vrrp_active_active_mode: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2 new file mode 100644 index 00000000..f013293a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2 @@ -0,0 +1,41 @@ +os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + {{ os10_interface_1 }}: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: absent + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: present + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: present + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: present + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: absent + adv_interval_centisecs: 200 + state: present + vlan100: + vrrp_active_active_mode: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2 new file mode 100644 index 00000000..8e79b319 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2 @@ -0,0 +1,41 @@ +os10_vrrp: + vrrp: + delay_reload: 2 + version: 3 + {{ os10_interface_1 }}: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: present + - ip: 3001:4828:5808:ffa3::9 + state: present + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: absent + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: present + - ip: 4.1.1.2 + state: present + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + adv_interval_centisecs: 200 + state: absent + vlan100: + vrrp_active_active_mode: true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2 new file mode 100644 index 00000000..dd6b16b4 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2 @@ -0,0 +1,3 @@ +os10_interface: + {{ os10_interface_1 }}: + switchport: false diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2 new file mode 100644 index 00000000..2d876f55 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2 @@ -0,0 +1,47 @@ +test_roles: + - dellemc.os10.os10_vrrp + - dellemc.os10.os10_vlan +os10_vlan: + vlan 100: + state: absent +os10_vrrp: + vrrp: + delay_reload: 0 + version: + {{ os10_interface_1 }}: + vrrp_group: + - group_id: 2 + type: ipv6 + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: absent + virtual_address: + - ip: 2001:4828:5808:ffa3::9 + state: absent + - ip: 3001:4828:5808:ffa3::9 + state: absent + - ip: 4001:4828:5808:ffa3::9 + state: absent + adv_interval_centisecs: 200 + state: absent + - group_id: 4 + virtual_address: + - ip: 4.1.1.1 + state: absent + - ip: 4.1.1.2 + state: absent + - ip: 4.1.1.3 + state: absent + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: absent + adv_interval_centisecs: 200 + state: absent + vlan100: + vrrp_active_active_mode: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml new file mode 100644 index 00000000..1eb402bf --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml @@ -0,0 +1,3 @@ +test_roles: + - dellemc.os10.os10_interface + - dellemc.os10.os10_vrrp diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2 new file mode 100644 index 00000000..f1a85dbd --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2 @@ -0,0 +1,14 @@ +test_roles: + - dellemc.os10.os10_xstp +os10_xstp: + type: rstp + enable: true + path_cost: false + mac_flush_timer: 5 + rstp: + max_age: 7 + hello_time: 8 + forward_time: 9 + force_version: stp + bridge_priority: 8192 + mac_flush_threshold: 6 diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2 new file mode 100644 index 00000000..f6c324bd --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2 @@ -0,0 +1,12 @@ +os10_xstp: + type: rstp + enable: true + path_cost: false + mac_flush_timer: + rstp: + max_age: + hello_time: + forward_time: + force_version: stp + bridge_priority: + mac_flush_threshold: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2 new file mode 100644 index 00000000..07ed32db --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2 @@ -0,0 +1,16 @@ +os10_xstp: + type: rapid-pvst + enable: true + path_cost: true + mac_flush_timer: 5 + pvst: + vlan: + - range_or_id: 10 + max_age: 10 + enable: true + hello_time: 8 + forward_time: 8 + bridge_priority: 8192 + mac_flush_threshold: 6 + root: secondary + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2 new file mode 100644 index 00000000..82469210 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2 @@ -0,0 +1,16 @@ +os10_xstp: + type: rapid-pvst + enable: true + path_cost: false + mac_flush_timer: + pvst: + vlan: + - range_or_id: 10 + max_age: + enable: + hello_time: + forward_time: + bridge_priority: + mac_flush_threshold: + root: + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2 new file mode 100644 index 00000000..ded2976b --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2 @@ -0,0 +1,24 @@ +os10_xstp: + type: mst + enable: true + path_cost: true + mac_flush_timer: 5 + mstp: + max_age: 8 + max_hops: 7 + hello_time: 8 + forward_time: 8 + force_version: stp + mstp_instances: + - number_or_range: 1 + enable: true + root: secondary + mac_flush_threshold: 8 + bridge_priority: 8192 + mst_config: + name: cfg1 + revision: 3 + cfg_list: + - number: 1 + vlans: 4,5 + vlans_state: present diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2 new file mode 100644 index 00000000..c39becdc --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2 @@ -0,0 +1,24 @@ +os10_xstp: + type: mst + enable: true + path_cost: false + mac_flush_timer: + mstp: + max_age: + max_hops: + hello_time: + forward_time: + force_version: + mstp_instances: + - number_or_range: 1 + enable: + root: + mac_flush_threshold: + bridge_priority: + mst_config: + name: cfg1 + revision: + cfg_list: + - number: 1 + vlans: 4,5 + vlans_state: absent diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2 new file mode 100644 index 00000000..2ec48bc2 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2 @@ -0,0 +1,25 @@ +os10_xstp: + type: rstp + enable: true + path_cost: true + mac_flush_timer: + intf: + {{ os10_interface_1 }}: + edge_port: true + bpdu_filter: true + bpdu_guard: true + guard: loop + enable: true + link_type: point-to-point + msti: + - instance_number: 1 + priority: 32 + cost: 1 + rstp: + priority: 32 + cost: 7 + vlan: + - range_or_id: 6 + priority: 16 + cost: 8 + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2 new file mode 100644 index 00000000..b28ea451 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2 @@ -0,0 +1,25 @@ +os10_xstp: + type: rstp + enable: true + path_cost: true + mac_flush_timer: + intf: + {{ os10_interface_1 }}: + edge_port: + bpdu_filter: + bpdu_guard: + guard: + enable: false + link_type: + msti: + - instance_number: 1 + priority: + cost: + rstp: + priority: + cost: + vlan: + - range_or_id: 6 + priority: + cost: + diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2 new file mode 100644 index 00000000..e01f16e4 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2 @@ -0,0 +1,4 @@ +os10_interface: + {{ os10_interface_1 }}: + switchport: true + portmode: access diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2 new file mode 100644 index 00000000..e1e5ffa8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2 @@ -0,0 +1,60 @@ +os10_xstp: + type: + enable: true + path_cost: false + mac_flush_timer: + rstp: + max_age: + hello_time: + forward_time: + force_version: + bridge_priority: + mac_flush_threshold: + pvst: + vlan: + - range_or_id: 10 + max_age: + enable: true + hello_time: + forward_time: + bridge_priority: + mac_flush_threshold: + root: + mstp: + max_age: + max_hops: + hello_time: + forward_time: + force_version: + mstp_instances: + - number_or_range: 1 + enable: true + root: + mac_flush_threshold: + bridge_priority: + mst_config: + name: + revision: + cfg_list: + - number: 1 + vlans: 4,5 + vlans_state: absent + intf: + {{ os10_interface_1 }}: + edge_port: + bpdu_filter: + bpdu_guard: + guard: + enable: true + link_type: + msti: + - instance_number: 1 + priority: + cost: + rstp: + priority: + cost: + vlan: + - range_or_id: 6 + priority: + cost: diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml new file mode 100644 index 00000000..001043cd --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml @@ -0,0 +1,3 @@ +test_roles: + - dellemc.os10.os10_interface + - dellemc.os10.os10_xstp diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml new file mode 100644 index 00000000..88be0f20 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- { include: prepare_test_facts.yaml, tags: ['role']} +- { include: tests.yaml, tags: ['role']} diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml new file mode 100644 index 00000000..a86b71f6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml @@ -0,0 +1,14 @@ +--- +- block: + - name: Collect interface list + import_role: + name: os10_template + tasks_from: show_ip_interface_brief.yaml + register: result + - name: Set interface list + set_fact: + os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}" + os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}" + os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}" + + when: prepare_os10_role_tests_task | default(True) | bool diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml new file mode 100644 index 00000000..395d2fe8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml @@ -0,0 +1,20 @@ +- include_role: + name: "{{ os10_role_name }}" + +- debug: msg="Role completed {{ os10_role_name }}" + notify: "os10_role completed" + +- block: + - name: "Testing idempotency for {{ os10_role_name }}" + include_role: + name: "{{ os10_role_name }}" + - name: "idempotency result for {{ os10_role_name }}" + fail: + msg: "idempotency test failed for {{ os10_role_name }}" + when: output.changed + + when: > + ( test_idempotency is defined and test_idempotency and + idempotent_roles is defined and os10_role_name in idempotent_roles and + idempotent is defined and idempotent + ) diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml new file mode 100644 index 00000000..c84b1033 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml @@ -0,0 +1,62 @@ +- name: set test case + set_fact: + role_testcase: "{{ role_testcase_path | basename | splitext | first }}" + +- name: set test case output dir + set_fact: + testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}" + + +- name: Prepare testcase output dir + file: + path: "{{ testcase_outdir }}" + state: directory + +- name: Source testcase variables + include_vars: "{{ item }}" + vars: + params: + files: + - "{{ role_testcase }}.yaml" + paths: + - "vars" + loop: "{{ query('first_found', params, errors='ignore') }}" + +- name: Include Testcase tasks + include: "{{ role_testcase_path }}" + +- name: Identifying steps + block: + - name: Identifying steps + find: + paths: "{{ role_path }}/templates/{{ role_testcase }}/steps" + patterns: "*.j2" + register: step_files + - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}" + - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}" + when: teststeps is not defined + +- name: Check if setup step exists + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2" + ignore_errors: true + register: setup_template + +- name: Setup Testcase + include: testcase/run_test_step.yaml role_teststep=setup idempotent=false + when: setup_template.stat.exists == true + +- name: Run Test Steps + block: + - name: Run Test Steps + include: testcase/run_test_step.yaml idempotent=false + with_items: "{{ teststeps }}" + loop_control: + loop_var: role_teststep + always: + - name: Check teardown + stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2" + ignore_errors: true + register: teardown_template + - name: Run teardown + include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false + when: teardown_template.stat.exists == true diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml new file mode 100644 index 00000000..624325e5 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml @@ -0,0 +1,26 @@ +--- +- name: Running step {{ role_teststep }} + debug: msg="{{ role_teststep }}" + +- name: Set step vars file + set_fact: + teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2" + teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml" + +- name: Preparing step variables + template: + src: "{{ teststep_var_template }}" + dest: "{{ teststep_vars_file }}" + +- name: Load step variables + include_vars: + file: "{{ teststep_vars_file }}" + +- name: Including roles for the step + include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}" + # include_role: + # name: "{{ step_role }}" + with_items: "{{ test_roles }}" + loop_control: + loop_var: step_role + when: test_roles is defined diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml new file mode 100644 index 00000000..572c2538 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml @@ -0,0 +1,14 @@ +- name: collect all test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: testcase/run_test_case.yaml + with_items: "{{ test_items }}" + loop_control: + loop_var: role_testcase_path diff --git a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..bfed9cf6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt @@ -0,0 +1,20 @@ +plugins/action/os10.py action-plugin-docs +plugins/action/textfsm_parser.py action-plugin-docs +plugins/modules/bgp_validate.py validate-modules:parameter-list-no-elements +plugins/modules/mtu_validate.py validate-modules:parameter-list-no-elements +plugins/modules/vlt_validate.py validate-modules:parameter-list-no-elements +plugins/modules/wiring_validate.py validate-modules:parameter-list-no-elements +plugins/modules/show_system_network_summary.py validate-modules:parameter-list-no-elements +plugins/modules/os10_config.py validate-modules:parameter-list-no-elements +plugins/modules/os10_facts.py validate-modules:parameter-list-no-elements +plugins/modules/os10_command.py validate-modules:parameter-list-no-elements +plugins/modules/os10_config.py validate-modules:doc-default-does-not-match-spec +plugins/modules/os10_facts.py validate-modules:doc-default-does-not-match-spec +plugins/modules/os10_command.py validate-modules:doc-default-does-not-match-spec +plugins/module_utils/network/base_network_show.py import-2.6 +plugins/modules/base_xml_to_dict.py import-2.6 +plugins/modules/bgp_validate.py import-2.6 +plugins/modules/mtu_validate.py import-2.6 +plugins/modules/show_system_network_summary.py import-2.6 +plugins/modules/vlt_validate.py import-2.6 +plugins/modules/wiring_validate.py import-2.6 diff --git a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..16dc721f --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt @@ -0,0 +1,13 @@ +plugins/action/os10.py action-plugin-docs +plugins/action/textfsm_parser.py action-plugin-docs +plugins/modules/bgp_validate.py validate-modules:parameter-list-no-elements +plugins/modules/mtu_validate.py validate-modules:parameter-list-no-elements +plugins/modules/vlt_validate.py validate-modules:parameter-list-no-elements +plugins/modules/wiring_validate.py validate-modules:parameter-list-no-elements +plugins/modules/show_system_network_summary.py validate-modules:parameter-list-no-elements +plugins/modules/os10_config.py validate-modules:parameter-list-no-elements +plugins/modules/os10_facts.py validate-modules:parameter-list-no-elements +plugins/modules/os10_command.py validate-modules:parameter-list-no-elements +plugins/modules/os10_config.py validate-modules:doc-default-does-not-match-spec +plugins/modules/os10_facts.py validate-modules:doc-default-does-not-match-spec +plugins/modules/os10_command.py validate-modules:doc-default-does-not-match-spec diff --git a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..ee4573d7 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt @@ -0,0 +1,3 @@ +plugins/action/os10.py action-plugin-docs +plugins/action/textfsm_parser.py action-plugin-docs +plugins/modules/show_system_network_summary.py validate-modules:missing-module-utils-import diff --git a/ansible_collections/dellemc/os10/tests/sanity/requirements.txt b/ansible_collections/dellemc/os10/tests/sanity/requirements.txt new file mode 100644 index 00000000..3e3a9669 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/sanity/requirements.txt @@ -0,0 +1,4 @@ +packaging # needed for update-bundled and changelog +sphinx ; python_version >= '3.5' # docs build requires python 3+ +sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+ +straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+ diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/__init__.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg new file mode 100644 index 00000000..83e3e891 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg @@ -0,0 +1,13 @@ +! +hostname router +! +interface ethernet1/1/2 + ip address 1.2.3.4/24 + description test string +! +interface ethernet1/1/3 + ip address 6.7.8.9/24 + description test string + shutdown +! + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg new file mode 100644 index 00000000..7303a0c4 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg @@ -0,0 +1,12 @@ +! +hostname foo +! +interface ethernet1/1/2 + no ip address +! +interface ethernet1/1/3 + ip address 6.7.8.9/24 + description test string + shutdown +! + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml new file mode 100644 index 00000000..bd13f0e6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml @@ -0,0 +1,19467 @@ + + + + + + ethernet1/1/1 + vlan1 + + + + + ethernet1/1/1 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/1 + ianaift:ethernetCsmacd + up + up + 17305068 + 14:18:77:09:ae:01 + 40000000000 + + 884475 + 0 + 0 + 5429 + 0 + 0 + 0 + 6212880 + 0 + 0 + 88684 + 0 + 0 + 0 + 0 + 94113 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 7097355 + 94113 + 0 + 0 + 88684 + 5429 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 4 + 0 + 5425 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 83258 + 0 + 5426 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 5429 + 88684 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:01 + 44 + 16272700 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/1 + + + + + + + + + ethernet1/1/2 + + + + + ethernet1/1/2 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2DISABLED + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/2 + ianaift:ethernetCsmacd + up + up + 17305094 + 14:18:77:09:ae:05 + 40000000000 + + 6220575 + 0 + 0 + 88787 + 0 + 0 + 0 + 892090 + 0 + 0 + 5523 + 0 + 0 + 0 + 0 + 94310 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 7112665 + 94310 + 0 + 0 + 5523 + 88787 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 83350 + 0 + 5437 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 77 + 6 + 5440 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 88787 + 5523 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:05 + 44 + 16306900 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + true + fe80::1618:77ff:fe09:ae05/64 + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/2 + + + + + + + + + ethernet1/1/3 + vlan1 + + + + + ethernet1/1/3 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/3 + ianaift:ethernetCsmacd + up + up + 17305120 + 14:18:77:09:ae:09 + 40000000000 + + 6396220 + 0 + 0 + 91295 + 0 + 0 + 0 + 911207 + 0 + 0 + 5593 + 0 + 0 + 0 + 0 + 96888 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 7307427 + 96888 + 0 + 0 + 5593 + 91295 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 85705 + 0 + 5590 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 4 + 0 + 5589 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 91295 + 5593 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:09 + 44 + 16764600 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/3 + + + + + + + + + ethernet1/1/4 + vlan1 + + + + + ethernet1/1/4 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/4 + ianaift:ethernetCsmacd + up + up + 17305146 + 14:18:77:09:ae:0d + 40000000000 + + 919800 + 0 + 0 + 5693 + 0 + 0 + 0 + 6410845 + 0 + 0 + 91497 + 0 + 0 + 0 + 0 + 97190 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 7330645 + 97190 + 0 + 0 + 91497 + 5693 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 77 + 6 + 5610 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 85890 + 0 + 5607 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 5693 + 91497 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:0d + 44 + 16818100 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/4 + + + + + + + + + ethernet1/1/5 + vlan1 + + + + + ethernet1/1/5 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/5 + ianaift:ethernetCsmacd + up + down + 17305172 + 14:18:77:09:ae:11 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:11 + 36 + 16913000 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/5 + + + + + + + + + ethernet1/1/6 + vlan1 + + + + + ethernet1/1/6 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/6 + ianaift:ethernetCsmacd + up + down + 17305198 + 14:18:77:09:ae:15 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:15 + 36 + 16950900 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/6 + + + + + + + + + ethernet1/1/7 + vlan1 + + + + + ethernet1/1/7 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/7 + ianaift:ethernetCsmacd + up + down + 17305224 + 14:18:77:09:ae:19 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:19 + 36 + 16995200 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/7 + + + + + + + + + ethernet1/1/8 + + + + + ethernet1/1/8 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2DISABLED + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/8 + ianaift:ethernetCsmacd + up + down + 17305250 + 14:18:77:09:ae:1d + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:1d + 36 + 17021600 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + true + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/8 + + + + + + + + + ethernet1/1/9 + vlan1 + + + + + ethernet1/1/9 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/9 + ianaift:ethernetCsmacd + up + down + 17305276 + 14:18:77:09:ae:21 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:21 + 36 + 17027900 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/9 + + + + + + + + + ethernet1/1/10 + vlan1 + + + + + ethernet1/1/10 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/10 + ianaift:ethernetCsmacd + up + down + 17305302 + 14:18:77:09:ae:25 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:25 + 36 + 17039500 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/10 + + + + + + + + + ethernet1/1/11 + vlan1 + + + + + ethernet1/1/11 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/11 + ianaift:ethernetCsmacd + up + down + 17305328 + 14:18:77:09:ae:29 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:29 + 37 + 17048300 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/11 + + + + + + + + + ethernet1/1/13 + + + + + ethernet1/1/13 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2DISABLED + 299 + auto + AUTO + true + + true + false + + + + 4 + false + + false + 0 + + 120 + + 1 + 200 + + 4.1.1.1 + 4.1.1.2 + false + no-authentication + ip + + 3 + 25 + + + + + + 2 + false + + false + 0 + + 120 + + 1 + 200 + + fe80::10 + 3001:4828:5808:ffa3::9 + false + no-authentication + ip + + 3 + 25 + + + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/13 + ianaift:ethernetCsmacd + up + down + 17305380 + 14:18:77:09:ae:31 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:31 + 37 + 17074100 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + true + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/13 + + + + + + + + + ethernet1/1/16 + + + + + ethernet1/1/16 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2DISABLED + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/16 + ianaift:ethernetCsmacd + up + down + 17305458 + 14:18:77:09:ae:34 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:34 + 37 + 17087400 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + true + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/16 + + + + + + + + + ethernet1/1/17 + + + + + ethernet1/1/17 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2DISABLED + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/17 + ianaift:ethernetCsmacd + up + down + 17305484 + 14:18:77:09:ae:35 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:35 + 37 + 17090400 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + true + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/17 + + + + + + + + + ethernet1/1/18 + + + + + ethernet1/1/18 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2DISABLED + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/18 + ianaift:ethernetCsmacd + up + down + 17305510 + 14:18:77:09:ae:39 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:39 + 37 + 17116100 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + true + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/18 + + + + + + + + + ethernet1/1/19 + + + + + ethernet1/1/19 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2DISABLED + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/19 + ianaift:ethernetCsmacd + up + down + 17305536 + 14:18:77:09:ae:3d + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:3d + 37 + 17128600 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + true + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/19 + + + + + + + + + ethernet1/1/20 + vlan1 + + + + + ethernet1/1/20 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/20 + ianaift:ethernetCsmacd + up + down + 17305562 + 14:18:77:09:ae:41 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:41 + 37 + 17135100 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/20 + + + + + + + + + ethernet1/1/21 + vlan1 + + + + + ethernet1/1/21 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/21 + ianaift:ethernetCsmacd + up + down + 17305588 + 14:18:77:09:ae:45 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:45 + 37 + 17145300 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/21 + + + + + + + + + ethernet1/1/23 + vlan1 + + + + + ethernet1/1/23 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/23 + ianaift:ethernetCsmacd + up + down + 17305640 + 14:18:77:09:ae:4d + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:4d + 37 + 17164100 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/23 + + + + + + + + + ethernet1/1/24 + vlan1 + + + + + ethernet1/1/24 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/24 + ianaift:ethernetCsmacd + up + down + 17305666 + 14:18:77:09:ae:51 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:51 + 38 + 17164900 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/24 + + + + + + + + + ethernet1/1/25 + vlan1 + + + + + ethernet1/1/25 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/25 + ianaift:ethernetCsmacd + up + down + 17305692 + 14:18:77:09:ae:55 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:55 + 38 + 17424700 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/25 + + + + + + + + + ethernet1/1/26 + vlan1 + + + + + ethernet1/1/26 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/26 + ianaift:ethernetCsmacd + up + down + 17305718 + 14:18:77:09:ae:59 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:59 + 38 + 17431600 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/26 + + + + + + + + + ethernet1/1/27 + vlan1 + + + + + ethernet1/1/27 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/27 + ianaift:ethernetCsmacd + up + down + 17305744 + 14:18:77:09:ae:5d + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:5d + 38 + 17470200 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/27 + + + + + + + + + ethernet1/1/28 + vlan1 + + + + + ethernet1/1/28 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/28 + ianaift:ethernetCsmacd + up + down + 17305770 + 14:18:77:09:ae:61 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:61 + 38 + 17477600 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/28 + + + + + + + + + ethernet1/1/30 + vlan1 + + + + + ethernet1/1/30 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/30 + ianaift:ethernetCsmacd + up + down + 17305822 + 14:18:77:09:ae:66 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:66 + 38 + 17491400 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/30 + + + + + + + + + ethernet1/1/31 + vlan1 + + + + + ethernet1/1/31 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/31 + ianaift:ethernetCsmacd + up + down + 17305848 + 14:18:77:09:ae:67 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:67 + 38 + 17493000 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/31 + + + + + + + + + ethernet1/1/32 + vlan1 + + + + + ethernet1/1/32 + ianaift:ethernetCsmacd + true + 1532 + HW + MODE_L2 + 299 + auto + AUTO + true + + true + false + + + true + true + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + ethernet1/1/32 + ianaift:ethernetCsmacd + up + down + 17305874 + 14:18:77:09:ae:68 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:68 + 38 + 17498900 + 30 + 1532 + n/a + + false + false + false + false + 0 + + 0 + 1 + + + 1 + 2 + + + 2 + 3 + + + 3 + 4 + + + 4 + 5 + + + 5 + 10 + + + 6 + 25 + + + 7 + 50 + + + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + ethernet1/1/32 + + + + + + + + + + mgmt1/1/1 + base-if:management + true + 1500 + HW + auto + AUTO + true + +
+ 10.16.148.144/16 +
+
+ + true + true + + + true + true + +
+
+ + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + +
+ + + + mgmt1/1/1 + base-if:management + up + up + 35454736 + 00:a0:c9:00:00:00 + 1000000000 + + 74473686 + 0 + 0 + 0 + 66 + 0 + 0 + 17129927 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 889900 + 52776 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + 10MBPS + 100MBPS + 1GIGE + NOT_SUPPORTED + not-supported + default + 00:a0:c9:00:00:00 + 38 + 17509300 + 30 + 1532 + + manual-cfg + 10.16.148.144/16 + + + true + fe80::2a0:c9ff:fe00:0/64 + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + mgmt1/1/1 + + +
+ + + + + + + vlan1 + ianaift:l2vlan + true + 1532 + HW + DATA + ethernet1/1/1 + ethernet1/1/3 + ethernet1/1/4 + ethernet1/1/5 + ethernet1/1/6 + ethernet1/1/7 + ethernet1/1/9 + ethernet1/1/10 + ethernet1/1/11 + ethernet1/1/20 + ethernet1/1/21 + ethernet1/1/22 + ethernet1/1/23 + ethernet1/1/24 + ethernet1/1/25 + ethernet1/1/26 + ethernet1/1/27 + ethernet1/1/28 + ethernet1/1/29 + ethernet1/1/30 + ethernet1/1/31 + ethernet1/1/32 + port-channel12 + false + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + vlan1 + ianaift:l2vlan + up + up + 69208865 + 14:18:77:09:af:01 + 10000000000 + + 8587506 + 0 + 0 + 0 + 9015054 + 0 + 0 + 0 + 0 + 0 + 107065 + 107059 + + 14:18:77:09:af:01 + 44 + 17517200 + 1532 + DATA + ethernet1/1/1 + ethernet1/1/3 + ethernet1/1/4 + ethernet1/1/5 + ethernet1/1/6 + ethernet1/1/7 + ethernet1/1/9 + ethernet1/1/10 + ethernet1/1/11 + ethernet1/1/20 + ethernet1/1/21 + ethernet1/1/22 + ethernet1/1/23 + ethernet1/1/24 + ethernet1/1/25 + ethernet1/1/26 + ethernet1/1/27 + ethernet1/1/28 + ethernet1/1/29 + ethernet1/1/30 + ethernet1/1/31 + ethernet1/1/32 + port-channel12 + false + + + true + fe80::1618:77ff:fe09:af01/64 + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + vlan1 + + + + + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + vlan4094 + ianaift:l2vlan + up + down + 69212958 + 14:18:77:09:af:01 + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 14:18:77:09:af:01 + 3992 + 17135400 + 1532 + INTERNAL + true + + + true + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + vlan4094 + + + + + + + + + port-channel12 + vlan1 + + + + + port-channel12 + ianaift:ieee8023adLag + true + 1532 + HW + MODE_L2 + 299 + 1 + STATIC + false + + 13 + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + port-channel12 + ianaift:ieee8023adLag + up + down + 85886092 + 14:18:77:09:ae:8d + 0 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + false + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + false + NOT_SUPPORTED + not-supported + 14:18:77:09:ae:8d + 3750 + 17160800 + 30 + 1532 + 1 + STATIC + 0 + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + port-channel12 + + + + + + + + + + null0 + base-if:null + true + 1532 + HW + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + null0 + base-if:null + up + up + 119690512 + 39 + 17549600 + + + false + + + Disabled + 00:00:00:00:00:00:00:00 + 00:00:00 + 0 + + + + + null0 + + + + + + + + + + 60 + + + + + + + phy-eth1/1/1 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/2 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/3 + + 38 + + + 131137546 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/4 + + 38 + + + 3857532632 + 0.0 + + QSFP-PLUS + AR_QSFP_40GBASE_CR4_1M + true + + + phy-eth1/1/5 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/6 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/7 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/8 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/9 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/10 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/11 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/12 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/13 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/14 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/15 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/16 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/17 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/18 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/19 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/20 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/21 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/22 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/23 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/24 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/25 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/26 + + 0 + + + 3758089944 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/27 + + 0 + + + 3840747224 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/28 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/29 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/30 + + 0 + + + 3857532632 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/31 + + 0 + + + 3865925336 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + phy-eth1/1/32 + + 0 + + + 3849139928 + 0.0 + + AR_POPTICS_NOTPRESENT + false + + + + + + 162762 + + + + + + + + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml new file mode 100644 index 00000000..de3ad4ed --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml @@ -0,0 +1,855 @@ + + + + + + ethernet1/1/1 + ianaift:ethernetCsmacd + up + up + 17305068 + 14:18:77:09:ae:01 + 40000000000 + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:01 + 44 + 17570800 + 30 + 1532 + n/a + + + 330300 + 4 + 1 + 17305068 + 260 + FBh3Ca4A + + ZXRoZXJuZXQxLzEvMw== + + mac-address + interface-alias + 120 + 172395 + 107 + OS10 + ethernet1/1/3 + os10 + 1 + 1532 + false + router bridge repeater + router bridge repeater + true + false + true + true + b-1000base-t + + + + + ethernet1/1/2 + ianaift:ethernetCsmacd + up + up + 17305094 + 14:18:77:09:ae:05 + 40000000000 + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:05 + 44 + 17570700 + 30 + 1532 + n/a + + + 330300 + 5 + 1 + 17305094 + 264 + FBh3Ca4A + + ZXRoZXJuZXQxLzEvNA== + + mac-address + interface-alias + 120 + 172395 + 107 + OS10 + ethernet1/1/4 + os10 + 1 + 1532 + false + router bridge repeater + router bridge repeater + true + false + true + true + b-1000base-t + + + + + ethernet1/1/3 + ianaift:ethernetCsmacd + up + up + 17305120 + 14:18:77:09:ae:09 + 40000000000 + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:09 + 44 + 17570700 + 30 + 1532 + n/a + + + 330300 + 2 + 1 + 17305120 + 268 + FBh3Ca4A + + ZXRoZXJuZXQxLzEvMQ== + + mac-address + interface-alias + 120 + 172395 + 107 + OS10 + ethernet1/1/1 + os10 + 1 + 1532 + false + router bridge repeater + router bridge repeater + true + false + true + true + b-1000base-t + + + + + ethernet1/1/4 + ianaift:ethernetCsmacd + up + up + 17305146 + 14:18:77:09:ae:0d + 40000000000 + true + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:0d + 44 + 17570700 + 30 + 1532 + n/a + + + 330300 + 3 + 1 + 17305146 + 272 + FBh3Ca4A + + ZXRoZXJuZXQxLzEvMg== + + mac-address + interface-alias + 120 + 172395 + 107 + OS10 + ethernet1/1/2 + os10 + 1 + 1532 + false + router bridge repeater + router bridge repeater + true + false + true + true + b-1000base-t + + + + + ethernet1/1/5 + ianaift:ethernetCsmacd + up + down + 17305172 + 14:18:77:09:ae:11 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:11 + 36 + 17570700 + 30 + 1532 + n/a + + + + ethernet1/1/6 + ianaift:ethernetCsmacd + up + down + 17305198 + 14:18:77:09:ae:15 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:15 + 36 + 17570700 + 30 + 1532 + n/a + + + + ethernet1/1/7 + ianaift:ethernetCsmacd + up + down + 17305224 + 14:18:77:09:ae:19 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:19 + 36 + 17570700 + 30 + 1532 + n/a + + + + ethernet1/1/8 + ianaift:ethernetCsmacd + up + down + 17305250 + 14:18:77:09:ae:1d + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:1d + 36 + 17570700 + 30 + 1532 + n/a + + + + ethernet1/1/9 + ianaift:ethernetCsmacd + up + down + 17305276 + 14:18:77:09:ae:21 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:21 + 36 + 17570700 + 30 + 1532 + n/a + + + + ethernet1/1/10 + ianaift:ethernetCsmacd + up + down + 17305302 + 14:18:77:09:ae:25 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:25 + 36 + 17570700 + 30 + 1532 + n/a + + + + ethernet1/1/11 + ianaift:ethernetCsmacd + up + down + 17305328 + 14:18:77:09:ae:29 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:29 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/12 + ianaift:ethernetCsmacd + up + down + 17305354 + 14:18:77:09:ae:2d + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:2d + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/13 + ianaift:ethernetCsmacd + up + down + 17305380 + 14:18:77:09:ae:31 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:31 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/14 + ianaift:ethernetCsmacd + up + down + 17305406 + 14:18:77:09:ae:32 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:32 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/15 + ianaift:ethernetCsmacd + up + down + 17305432 + 14:18:77:09:ae:33 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:33 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/16 + ianaift:ethernetCsmacd + up + down + 17305458 + 14:18:77:09:ae:34 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:34 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/17 + ianaift:ethernetCsmacd + up + down + 17305484 + 14:18:77:09:ae:35 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:35 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/18 + ianaift:ethernetCsmacd + up + down + 17305510 + 14:18:77:09:ae:39 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:39 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/19 + ianaift:ethernetCsmacd + up + down + 17305536 + 14:18:77:09:ae:3d + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:3d + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/20 + ianaift:ethernetCsmacd + up + down + 17305562 + 14:18:77:09:ae:41 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:41 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/21 + ianaift:ethernetCsmacd + up + down + 17305588 + 14:18:77:09:ae:45 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:45 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/22 + ianaift:ethernetCsmacd + up + down + 17305614 + 14:18:77:09:ae:49 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:49 + 37 + 17570600 + 30 + 1532 + n/a + + + + ethernet1/1/23 + ianaift:ethernetCsmacd + up + down + 17305640 + 14:18:77:09:ae:4d + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:4d + 37 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/24 + ianaift:ethernetCsmacd + up + down + 17305666 + 14:18:77:09:ae:51 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:51 + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/25 + ianaift:ethernetCsmacd + up + down + 17305692 + 14:18:77:09:ae:55 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:55 + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/26 + ianaift:ethernetCsmacd + up + down + 17305718 + 14:18:77:09:ae:59 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:59 + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/27 + ianaift:ethernetCsmacd + up + down + 17305744 + 14:18:77:09:ae:5d + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:5d + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/28 + ianaift:ethernetCsmacd + up + down + 17305770 + 14:18:77:09:ae:61 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:61 + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/29 + ianaift:ethernetCsmacd + up + down + 17305796 + 14:18:77:09:ae:65 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:65 + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/30 + ianaift:ethernetCsmacd + up + down + 17305822 + 14:18:77:09:ae:66 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:66 + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/31 + ianaift:ethernetCsmacd + up + down + 17305848 + 14:18:77:09:ae:67 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:67 + 38 + 17570500 + 30 + 1532 + n/a + + + + ethernet1/1/32 + ianaift:ethernetCsmacd + up + down + 17305874 + 14:18:77:09:ae:68 + 0 + false + 40GIGE + BOTH_SUPPORTED + not-supported + 0MBPS + 14:18:77:09:ae:68 + 38 + 17570500 + 30 + 1532 + n/a + + + + mgmt1/1/1 + base-if:management + up + up + 35454736 + 00:a0:c9:00:00:00 + 1000000000 + false + 10MBPS + 100MBPS + 1GIGE + NOT_SUPPORTED + not-supported + default + 00:a0:c9:00:00:00 + 38 + 17570400 + 30 + 1532 + + + 3100 + 1 + 1 + 35454736 + 4 + kLEc9C9t + + VGVuR2lnYWJpdEV0aGVybmV0IDAvMA== + + mac-address + interface-name + 20 + 175667 + 15 + Dell Real Time Operating System Software. Dell Operating System Version: 2.0. Dell Application Software Version: 9.11(2.0) Copyright (c) 1999-2017Dell Inc. All Rights Reserved.Build Time: Tue Apr 25 21:22:59 2017 + TenGigabitEthernet 0/0 + swlab1-maa-tor-A2 + 148 + 0 + false + router bridge repeater + router bridge repeater + false + false + false + false + + + + + + + mgmt1/1/1 + + + + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_ new file mode 100644 index 00000000..78903b69 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_ @@ -0,0 +1 @@ +KiB Mem: 8127144 total, 2297272 used, 5829872 free, 137360 buffers diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config new file mode 100644 index 00000000..ff7ff279 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config @@ -0,0 +1,252 @@ +! Version 10.4.0E(R1) +! Last configuration change at Jan 11 12:26:08 2018 +! +snmp-server contact http://www.dell.com/support +snmp-server host 192.0.2.1 traps version 1 c4 udp-port 5 +snmp-server host 192.0.2.1 traps version 2c c1 udp-port 4 +snmp-server host 192.0.2.2 traps version 1 c3 udp-port 162 +ip community-list expanded commex deny aaa +ip community-list standard commstd deny internet +ip community-list standard commstd permit no-advertise +ip as-path access-list accesslist deny abc +ip as-path access-list accesslist deny www +ip extcommunity-list expanded extcommex deny aaa +ip extcommunity-list standard extcommstd deny rt 22:33 +ip extcommunity-list standard extcommstd permit soo 22:33 +hostname os10 +interface breakout 1/1/1 map 40g-1x +interface breakout 1/1/2 map 40g-1x +interface breakout 1/1/3 map 40g-1x +interface breakout 1/1/4 map 40g-1x +interface breakout 1/1/5 map 40g-1x +interface breakout 1/1/6 map 40g-1x +interface breakout 1/1/7 map 40g-1x +interface breakout 1/1/8 map 40g-1x +interface breakout 1/1/9 map 40g-1x +interface breakout 1/1/10 map 40g-1x +interface breakout 1/1/11 map 40g-1x +interface breakout 1/1/12 map 40g-1x +interface breakout 1/1/13 map 40g-1x +interface breakout 1/1/14 map 40g-1x +interface breakout 1/1/15 map 40g-1x +interface breakout 1/1/16 map 40g-1x +interface breakout 1/1/17 map 40g-1x +interface breakout 1/1/18 map 40g-1x +interface breakout 1/1/19 map 40g-1x +interface breakout 1/1/20 map 40g-1x +interface breakout 1/1/21 map 40g-1x +interface breakout 1/1/22 map 40g-1x +interface breakout 1/1/23 map 40g-1x +interface breakout 1/1/24 map 40g-1x +interface breakout 1/1/25 map 40g-1x +interface breakout 1/1/26 map 40g-1x +interface breakout 1/1/27 map 40g-1x +interface breakout 1/1/28 map 40g-1x +interface breakout 1/1/29 map 40g-1x +interface breakout 1/1/30 map 40g-1x +interface breakout 1/1/31 map 40g-1x +interface breakout 1/1/32 map 40g-1x +username admin password $6$q9QBeYjZ$jfxzVqGhkxX3smxJSH9DDz7/3OJc6m5wjF8nnLD7/VKx8SloIhp4NoGZs0I/UNwh8WVuxwfd9q4pWIgNs5BKH. role sysadmin +aaa authentication local +iscsi target port 860 +iscsi target port 3260 +hash-algorithm ecmp xor +logging console disable +vrrp delay reload 5 +vrrp version 3 +spanning-tree mode rstp +! +interface vlan1 + no shutdown +! +interface vlan100 + no shutdown +! +interface port-channel12 + no shutdown + switchport access vlan 1 + vlt-port-channel 13 +! +interface ethernet1/1/1 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/2 + no shutdown + no switchport +! +interface ethernet1/1/3 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/4 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/5 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/6 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/7 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/8 + no shutdown + no switchport +! +interface ethernet1/1/9 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/10 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/11 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/12 + no shutdown + no switchport +! +interface ethernet1/1/13 + no shutdown + no switchport + ! + vrrp-group 4 + priority 120 + track 3 priority-cost 25 + virtual-address 4.1.1.1 + virtual-address 4.1.1.2 + advertise-interval centisecs 200 + no preempt + ! + vrrp-ipv6-group 2 + priority 120 + track 3 priority-cost 25 + virtual-address 3001:4828:5808:ffa3::9 + virtual-address fe80::10 + advertise-interval centisecs 200 + no preempt +! +interface ethernet1/1/14 + no shutdown + no switchport +! +interface ethernet1/1/15 + no shutdown + no switchport +! +interface ethernet1/1/16 + no shutdown + no switchport +! +interface ethernet1/1/17 + no shutdown + no switchport +! +interface ethernet1/1/18 + no shutdown + no switchport +! +interface ethernet1/1/19 + no shutdown + no switchport +! +interface ethernet1/1/20 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/21 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/22 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/23 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/24 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/25 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/26 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/27 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/28 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/29 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/30 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/31 + no shutdown + switchport access vlan 1 +! +interface ethernet1/1/32 + no shutdown + switchport access vlan 1 +! +interface mgmt1/1/1 + no shutdown + ip address 10.16.148.144/16 + ipv6 address autoconfig +! +route-map test permit 1 + match ip address prefix-list testprefix + match ip address testaccess + continue 20 + set comm-list commstd delete + set comm-list commex add + set community internet + set extcommunity rt 22:33 + set extcomm-list extcommstd delete + set extcomm-list extcommex add + set ip next-hop 10.1.1.1 track-id 3 + set local-preference 1200 + set metric + 30 + set metric-type internal + set origin igp + set weight 50 +! +route-map test deny 10 + match ip address prefix-list testprefix + match ip address testaccess + set ip next-hop 10.1.1.1 track-id 3 +! +support-assist +! +policy-map type application policy-iscsi +! +class-map type application class-iscsi +! +class-map type qos class-trust +! +vlt-domain 1 + discovery-interface ethernet1/1/12 + vlt-mac aa:aa:aa:aa:aa:aa + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml new file mode 100644 index 00000000..bb496cc9 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml @@ -0,0 +1,194 @@ + + + + + + 1 + 14:18:77:09:ae:00 + 384 + + DELL + S6010-ON + X01 + x86_64-dell_s6010_c2538-r0 + 0088 + + + + + 1 + S6010 + S6010 + up + 10.4.0E(R1) + 32x40GbE + + DELL + S6010-ON + X01 + x86_64-dell_s6010_c2538-r0 + 0088 + 083R0P + + + + user-triggered + S6010-ON 32x40GbE QSFP+ Interface Module + false + 1 + + BIOS + 3.26.0.1 + + + System CPLD + 10 + + + Master CPLD + 9 + + + Slave CPLD + 4 + + + + 1 + fail + + + + 2 + up + UNKNOWN + + 1 + fail + 1920 + + NORMAL + + + + + + + + + + + + + 1 + up + + 1 + up + 22090 + + NORMAL + + + + F01 + + CN123456FAN100589021 + P1FAN1 + + + + + + 2 + up + + 1 + up + 22215 + + NORMAL + + + + F02 + + CN123456FAN200589031 + P2FAN2 + + + + + + 3 + up + + 1 + up + 22215 + + NORMAL + + + + F03 + + CN123456FAN300589041 + P3FAN3 + + + + + + 4 + up + + 1 + up + 22215 + + NORMAL + + + + F04 + + CN123456FAN400589051 + P4FAN4 + + + + + + 5 + up + + 1 + up + 21724 + + NORMAL + + + + F05 + + CN123456FAN500589061 + P5FAN5 + + + + + + + + + os10 + 161826 + 2018-01-12T13:42:36.20+00:00 + 2018-01-10T16:45:30+00:00 + + + + + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version new file mode 100644 index 00000000..b9aa8fea --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version @@ -0,0 +1,9 @@ +Dell EMC Networking OS10 Enterprise +Copyright (c) 1999-2017 by Dell Inc. All Rights Reserved. +OS Version: 10.4.0E(R1) +Build Version: 10.4.0E(R1.56) +Build Time: 2017-12-19T22:11:00-0800 +System Type: S6000-VM +Architecture: x86_64 +Up Time: 6 days 00:33:35 + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml new file mode 100644 index 00000000..50f84ab6 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml @@ -0,0 +1,27 @@ + + + + + + os10 + 162698 + 2018-01-12T13:57:08.58+00:00 + 2018-01-10T16:45:30+00:00 + + + + + 10.4.0E(R1) + Enterprise + Dell EMC Networking OS10 Enterprise + S6010-ON + Dell EMC OS10 Enterprise Edition Blueprint 1.0.0 + x86_64 + 2017-12-14T23:39:27-0800 + 10.4.0E(R1.55) + Copyright (c) 1999-2017 by Dell Inc. All Rights Reserved. + + + + + diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py new file mode 100644 index 00000000..8f990d73 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py @@ -0,0 +1,90 @@ +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import json + +from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase + + +fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') +fixture_data = {} + + +def load_fixture(name): + path = os.path.join(fixture_path, name) + + if path in fixture_data: + return fixture_data[path] + + with open(path) as f: + data = f.read() + + try: + data = json.loads(data) + except Exception: + pass + + fixture_data[path] = data + return data + + +class TestDellos10Module(ModuleTestCase): + + def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False): + + self.load_fixtures(commands) + + if failed: + result = self.failed() + self.assertTrue(result['failed'], result) + else: + result = self.changed(changed) + self.assertEqual(result['changed'], changed, result) + + if commands is not None: + if sort: + self.assertEqual(sorted(commands), sorted(result['updates']), result['updates']) + else: + self.assertEqual(commands, result['updates'], result['updates']) + + return result + + def failed(self): + with self.assertRaises(AnsibleFailJson) as exc: + self.module.main() + + result = exc.exception.args[0] + self.assertTrue(result['failed'], result) + return result + + def changed(self, changed=False): + with self.assertRaises(AnsibleExitJson) as exc: + self.module.main() + + result = exc.exception.args[0] + self.assertEqual(result['changed'], changed, result) + return result + + def load_fixtures(self, commands=None): + pass diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py new file mode 100644 index 00000000..77f6f1a7 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py @@ -0,0 +1,110 @@ +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from units.compat.mock import patch +from ansible_collections.dellemc.os10.plugins.modules import os10_command +from units.modules.utils import set_module_args +from .os10_module import TestDellos10Module, load_fixture + + +class TestDellos10CommandModule(TestDellos10Module): + + module = os10_command + + def setUp(self): + super(TestDellos10CommandModule, self).setUp() + + self.mock_run_commands = patch('ansible.modules.network.os10.os10_command.run_commands') + self.run_commands = self.mock_run_commands.start() + + def tearDown(self): + super(TestDellos10CommandModule, self).tearDown() + self.mock_run_commands.stop() + + def load_fixtures(self, commands=None): + + def load_from_file(*args, **kwargs): + module, commands = args + output = list() + + for item in commands: + try: + obj = json.loads(item['command']) + command = obj['command'] + except ValueError: + command = item['command'] + filename = str(command).replace(' ', '_') + output.append(load_fixture(filename)) + return output + + self.run_commands.side_effect = load_from_file + + def test_os10_command_simple(self): + set_module_args(dict(commands=['show version'])) + result = self.execute_module() + self.assertEqual(len(result['stdout']), 1) + self.assertTrue(result['stdout'][0].startswith('Dell EMC Networking')) + + def test_os10_command_multiple(self): + set_module_args(dict(commands=['show version', 'show version'])) + result = self.execute_module() + self.assertEqual(len(result['stdout']), 2) + self.assertTrue(result['stdout'][0].startswith('Dell EMC Networking')) + + def test_os10_command_wait_for(self): + wait_for = 'result[0] contains "Dell EMC"' + set_module_args(dict(commands=['show version'], wait_for=wait_for)) + self.execute_module() + + def test_os10_command_wait_for_fails(self): + wait_for = 'result[0] contains "test string"' + set_module_args(dict(commands=['show version'], wait_for=wait_for)) + self.execute_module(failed=True) + self.assertEqual(self.run_commands.call_count, 10) + + def test_os10_command_retries(self): + wait_for = 'result[0] contains "test string"' + set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2)) + self.execute_module(failed=True) + self.assertEqual(self.run_commands.call_count, 2) + + def test_os10_command_match_any(self): + wait_for = ['result[0] contains "Dell EMC"', + 'result[0] contains "test string"'] + set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any')) + self.execute_module() + + def test_os10_command_match_all(self): + wait_for = ['result[0] contains "Dell EMC"', + 'result[0] contains "OS10 Enterprise"'] + set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all')) + self.execute_module() + + def test_os10_command_match_all_failure(self): + wait_for = ['result[0] contains "Dell EMC"', + 'result[0] contains "test string"'] + commands = ['show version', 'show version'] + set_module_args(dict(commands=commands, wait_for=wait_for, match='all')) + self.execute_module(failed=True) diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py new file mode 100644 index 00000000..e38d124a --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py @@ -0,0 +1,150 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from units.compat.mock import patch +from ansible_collections.dellemc.os10.plugins.modules import os10_config +from units.modules.utils import set_module_args +from .os10_module import TestDellos10Module, load_fixture + + +class TestDellos10ConfigModule(TestDellos10Module): + + module = os10_config + + def setUp(self): + super(TestDellos10ConfigModule, self).setUp() + + self.mock_get_config = patch('ansible.modules.network.os10.os10_config.get_config') + self.get_config = self.mock_get_config.start() + + self.mock_load_config = patch('ansible.modules.network.os10.os10_config.load_config') + self.load_config = self.mock_load_config.start() + + self.mock_run_commands = patch('ansible.modules.network.os10.os10_config.run_commands') + self.run_commands = self.mock_run_commands.start() + + def tearDown(self): + super(TestDellos10ConfigModule, self).tearDown() + self.mock_get_config.stop() + self.mock_load_config.stop() + self.mock_run_commands.stop() + + def load_fixtures(self, commands=None): + config_file = 'os10_config_config.cfg' + self.get_config.return_value = load_fixture(config_file) + self.load_config.return_value = None + + def test_os10_config_unchanged(self): + src = load_fixture('os10_config_config.cfg') + set_module_args(dict(src=src)) + self.execute_module() + + def test_os10_config_src(self): + src = load_fixture('os10_config_src.cfg') + set_module_args(dict(src=src)) + commands = ['hostname foo', 'interface ethernet1/1/2', + 'no ip address'] + self.execute_module(changed=True, commands=commands) + + def test_os10_config_backup(self): + set_module_args(dict(backup=True)) + result = self.execute_module() + self.assertIn('__backup__', result) + + def test_os10_config_save(self): + set_module_args(dict(save=True)) + self.execute_module(changed=True) + self.assertEqual(self.run_commands.call_count, 1) + self.assertEqual(self.get_config.call_count, 0) + self.assertEqual(self.load_config.call_count, 0) + args = self.run_commands.call_args[0][1] + self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0]) +# self.assertIn('copy running-config startup-config\r', args) + + def test_os10_config_lines_wo_parents(self): + set_module_args(dict(lines=['hostname foo'])) + commands = ['hostname foo'] + self.execute_module(changed=True, commands=commands) + + def test_os10_config_lines_w_parents(self): + set_module_args(dict(lines=['shutdown'], parents=['interface ethernet1/1/2'])) + commands = ['interface ethernet1/1/2', 'shutdown'] + self.execute_module(changed=True, commands=commands) + + def test_os10_config_before(self): + set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar'])) + commands = ['snmp-server contact bar', 'hostname foo'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os10_config_after(self): + set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar'])) + commands = ['hostname foo', 'snmp-server contact bar'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os10_config_before_after_no_change(self): + set_module_args(dict(lines=['hostname router'], + before=['snmp-server contact bar'], + after=['snmp-server location chennai'])) + self.execute_module() + + def test_os10_config_config(self): + config = 'hostname localhost' + set_module_args(dict(lines=['hostname router'], config=config)) + commands = ['hostname router'] + self.execute_module(changed=True, commands=commands) + + def test_os10_config_replace_block(self): + lines = ['description test string', 'test string'] + parents = ['interface ethernet1/1/2'] + set_module_args(dict(lines=lines, replace='block', parents=parents)) + commands = parents + lines + self.execute_module(changed=True, commands=commands) + + def test_os10_config_match_none(self): + lines = ['hostname router'] + set_module_args(dict(lines=lines, match='none')) + self.execute_module(changed=True, commands=lines) + + def test_os10_config_match_none(self): + lines = ['ip address 1.2.3.4/24', 'description test string'] + parents = ['interface ethernet1/1/2'] + set_module_args(dict(lines=lines, parents=parents, match='none')) + commands = parents + lines + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os10_config_match_strict(self): + lines = ['ip address 1.2.3.4/24', 'description test string', + 'shutdown'] + parents = ['interface ethernet1/1/2'] + set_module_args(dict(lines=lines, parents=parents, match='strict')) + commands = parents + ['shutdown'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os10_config_match_exact(self): + lines = ['ip address 1.2.3.4/24', 'description test string', + 'shutdown'] + parents = ['interface ethernet1/1/2'] + set_module_args(dict(lines=lines, parents=parents, match='exact')) + commands = parents + lines + self.execute_module(changed=True, commands=commands, sort=False) diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py new file mode 100644 index 00000000..7f03eab8 --- /dev/null +++ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py @@ -0,0 +1,110 @@ +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from units.compat.mock import patch +from units.modules.utils import set_module_args +from .os10_module import TestDellos10Module, load_fixture +from ansible_collections.dellemc.os10.plugins.modules import os10_facts + + +class TestDellos10Facts(TestDellos10Module): + + module = os10_facts + + def setUp(self): + super(TestDellos10Facts, self).setUp() + + self.mock_run_command = patch( + 'ansible.modules.network.os10.os10_facts.run_commands') + self.run_command = self.mock_run_command.start() + + def tearDown(self): + super(TestDellos10Facts, self).tearDown() + + self.mock_run_command.stop() + + def load_fixtures(self, commands=None): + + def load_from_file(*args, **kwargs): + module, commands = args + output = list() + + for item in commands: + try: + obj = json.loads(item) + command = obj['command'] + except ValueError: + command = item + if '|' in command: + command = str(command).replace('|', '') + filename = str(command).replace(' ', '_') + filename = filename.replace('/', '7') + filename = filename.replace(':', '_colon_') + output.append(load_fixture(filename)) + return output + + self.run_command.side_effect = load_from_file + + def test_os10_facts_gather_subset_default(self): + set_module_args(dict()) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('hardware', ansible_facts['ansible_net_gather_subset']) + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset']) + self.assertEquals('os10', ansible_facts['ansible_net_hostname']) + self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys()) + self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb']) + self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb']) + + def test_os10_facts_gather_subset_config(self): + set_module_args({'gather_subset': 'config'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('config', ansible_facts['ansible_net_gather_subset']) + self.assertEquals('os10', ansible_facts['ansible_net_hostname']) + self.assertIn('ansible_net_config', ansible_facts) + + def test_os10_facts_gather_subset_hardware(self): + set_module_args({'gather_subset': 'hardware'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('hardware', ansible_facts['ansible_net_gather_subset']) + self.assertEquals('x86_64', ansible_facts['ansible_net_cpu_arch']) + self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb']) + self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb']) + + def test_os10_facts_gather_subset_interfaces(self): + set_module_args({'gather_subset': 'interfaces'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset']) + self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys()) + self.assertEquals(sorted(['mgmt1/1/1', 'ethernet1/1/4', 'ethernet1/1/2', 'ethernet1/1/3', 'ethernet1/1/1']), + sorted(list(ansible_facts['ansible_net_neighbors'].keys()))) + self.assertIn('ansible_net_interfaces', ansible_facts) diff --git a/ansible_collections/dellemc/os6/.ansible-lint b/ansible_collections/dellemc/os6/.ansible-lint new file mode 100644 index 00000000..d8c4900d --- /dev/null +++ b/ansible_collections/dellemc/os6/.ansible-lint @@ -0,0 +1,2 @@ +skip_list: + - '208' diff --git a/ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml new file mode 100644 index 00000000..e5692b0a --- /dev/null +++ b/ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml @@ -0,0 +1,33 @@ +name: CI +on: +- pull_request + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - stable-2.10 + - devel + runs-on: ubuntu-latest + steps: + + - name: Check out code + uses: actions/checkout@v1 + with: + path: ansible_collections/dellemc/os6 + + - name: Set up Python 3.6 + uses: actions/setup-python@v1 + with: + python-version: 3.6 + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Install ansible_collections.ansible.netcommon + run: ansible-galaxy collection install ansible.netcommon -p ../../ + + - name: Run sanity tests + run: ansible-test sanity --docker -v --color --python 3.6 diff --git a/ansible_collections/dellemc/os6/.gitignore b/ansible_collections/dellemc/os6/.gitignore new file mode 100644 index 00000000..c6fc14ad --- /dev/null +++ b/ansible_collections/dellemc/os6/.gitignore @@ -0,0 +1,387 @@ + +# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv + +### dotenv ### +.env + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# + +### Linux ### + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### pydev ### +.pydevproject + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### WebStorm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### WebStorm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/**/sonarlint/ + +# SonarQube Plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator/ + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/ansible_collections/dellemc/os6/COPYING b/ansible_collections/dellemc/os6/COPYING new file mode 100644 index 00000000..10926e87 --- /dev/null +++ b/ansible_collections/dellemc/os6/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/ansible_collections/dellemc/os6/FILES.json b/ansible_collections/dellemc/os6/FILES.json new file mode 100644 index 00000000..cf23654d --- /dev/null +++ b/ansible_collections/dellemc/os6/FILES.json @@ -0,0 +1,2987 @@ +{ + "files": [ + { + "format": 1, + "ftype": "dir", + "chksum_sha256": null, + "name": ".", + "chksum_type": null + }, + { + "ftype": "file", + "chksum_sha256": "0c29a1ae51505d7a5d1e7f80c5abac708f68c44c5bd96fc94f0afff2408daeca", + "name": ".ansible-lint", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/sanity", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c8c055821f8876eba6a702956071e467c89976dbf574600421b0cde8491d9744", + "name": "tests/sanity/ignore-2.9.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c8a4ac4bfdef88e75d6e748e35a42fb4915947dfa2b7dd788626fd829600e014", + "name": "tests/sanity/requirements.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2049a8032bd8451483531ee5f5376c9ab37a3f4f3f4194b8e04df6643cb933b1", + "name": "tests/sanity/ignore-2.10.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6cac17a5998aa8480af2ea636ab0534293389d9e8303941d33bb591009d4f2a7", + "name": "tests/sanity/ignore-2.11.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_facts", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_facts/os6_facts", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_facts/os6_facts/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_facts/os6_facts/tests/cli", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8317cea049586b1ac611ed0414663e3a0e6a07804f4a056ec463f3dfd8cf433d", + "name": "tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_facts/os6_facts/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "name": "tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_facts/os6_facts/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1", + "name": "tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "81ae4136ca3d879f645bc323268dd5af5a89467b0d776010965374f56ef07eb0", + "name": "tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_command", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_command/os6_command", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_command/os6_command/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_command/os6_command/tests/cli", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "824ab4e366ae3b1f839019886fae66bd8be5852ec91ecc40490437562df4aa70", + "name": "tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d3c99fb4690aa3f479f810768bcb8a147b00ec579f8581fdfde66fedc3a00e4c", + "name": "tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ceca17eebf5d04dcc3ec39adf08a8291d71b30e17a65b16f02c1a278b165c254", + "name": "tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "dd6945de4ad85b2fa1373aa9c167423b41ba6ab8cd7cd766b41dea238f5518cb", + "name": "tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "aed021038fc74a5d58e1935744f8eec94725f56464f7a70aa52f43d17ed6019a", + "name": "tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_command/os6_command/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "name": "tests/integration/targets/os6_command/os6_command/defaults/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_command/os6_command/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1", + "name": "tests/integration/targets/os6_command/os6_command/tasks/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0c3d448b4f0a8de268734dd08d79db1ab073d9402de62d777d2d9f79340c05db", + "name": "tests/integration/targets/os6_command/os6_command/tasks/cli.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_config", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_config/os6_config", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_config/os6_config/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_config/os6_config/tests/cli", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a8acbe90eb42e7161e40e560bb8ae6ef38b59992505d87390d0871fa6e8f557c", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e6eb3df6c455d89e9e143cb424b15ccfbdf7beef726fb5ccee09f1485b146601", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a5af25d5e357ad36a366a00a494d6b45b6a6a484f0d278013c0b8923d2d83c58", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b602bf2052373f5e7e9df68adabec84d52c060842d65295907c687ca278b55a8", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1fe97966921c1e53b7ec280a1d7d7d232d0393b8a37cc089bb5c52cfd1cab71c", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c79daefb569c4128eb236e734e97de568e030ee98ed1f45d0cdd0d62570c1131", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "24225e8b46caaa034d2b40eb50591c6022ad3fd825467a1dcef84ad14eae3777", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8f3367a21641a0f445cff8434becc77cbc852670b29576ecde56371fc574ff5a", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6f509679767b1c8467bcbc72f419ac24fad67d697181f4d7c1c1515373df5ad1", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2f0876fb112582e491b8c771901f0c4abd15c2481ee1e19aca53509596032335", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "73fb7bbde923be1d01019de5a546137572ddbdd36acc4301dd4452d1faa77171", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f75521089a6b55df79214fc31e7b4b676999d191ebabb210503256901ddfe73a", + "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_config/os6_config/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "name": "tests/integration/targets/os6_config/os6_config/defaults/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os6_config/os6_config/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1", + "name": "tests/integration/targets/os6_config/os6_config/tasks/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0c3d448b4f0a8de268734dd08d79db1ab073d9402de62d777d2d9f79340c05db", + "name": "tests/integration/targets/os6_config/os6_config/tasks/cli.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600", + "name": "tests/.gitignore", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules/network", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules/network/os6", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "140157fdc99fb9e439a4d3be0aad737aaceafcb6da8799c90243d548315158b8", + "name": "tests/unit/modules/network/os6/test_os6_command.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "tests/unit/modules/network/os6/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6c2284a2c56cb6fa05ecb3e9fb49d957309c0bd2119c6ae351d9c71eb0a3527d", + "name": "tests/unit/modules/network/os6/test_os6_facts.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8c42ab3958dfa26f512a20fcce57a261601797f75f8563df7ba8acb030e1af1c", + "name": "tests/unit/modules/network/os6/os6_module.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules/network/os6/fixtures", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9b4d1295a7098ece89dcfbdd241327022d0994c1ab53adbda8a71d43ce80c719", + "name": "tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "09a6e09bcde3874bbd0755a1a2842b6356c8b9a7f1c053c1fedcdd3423bcb4e1", + "name": "tests/unit/modules/network/os6/fixtures/os6_config_src.cfg", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "dbead93951c6ae31171cbe355dd89d88a862a0e27ba9911facc961056ddaf2d6", + "name": "tests/unit/modules/network/os6/fixtures/show_interfaces", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c4c6643ce44ee771014c871eccf320921efe5e6d9bd2d03a0940739102228f78", + "name": "tests/unit/modules/network/os6/fixtures/show_memory_cpu", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ece82c342beda30022b119fd410c364edeb54d55147d9f0c2a6b1afbe88f88cf", + "name": "tests/unit/modules/network/os6/fixtures/show_interfaces_status", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "dd90ed90e19d6d7f48492f6de1f0cea6f5775c079abf4da219aa851cebcb0cfa", + "name": "tests/unit/modules/network/os6/fixtures/show_version", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "175c44bf6d745d134eaec45f1422e983b498ad61785ab963d7722b1cfa94285b", + "name": "tests/unit/modules/network/os6/fixtures/show_ip_int", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6893a3c5f2ef207b48eb4374df58e66c8cc390da3413a7168eb3d0207dc4cad4", + "name": "tests/unit/modules/network/os6/fixtures/os6_config_config.cfg", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "db4102606a12c3c3c3b72d603a7289caff2c8072210368cf5f6bba0d49f3e12f", + "name": "tests/unit/modules/network/os6/fixtures/show_running-config", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "414b84267a6c20d6b8d7f67eb9fac07c357320d899f26dd0a784eb4580d0ddae", + "name": "tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ac27540a277fa3b1094c0d0caf9581955b411f1d09f17774607e245a58f498d3", + "name": "tests/unit/modules/network/os6/fixtures/show_lldp", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "924e55fa0c98396d7fc365ddbd75df2037f73bbb21729433408ed61bda71ce5c", + "name": "tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "952a955b0a6cb013c968737f63ecda29abf0449f34f3c39393b0b242781a8c2f", + "name": "tests/unit/modules/network/os6/test_os6_config.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6ba2d44d2272d8dd4c24b362a95ed270bee2105f7ade0150045f183270d1fc7c", + "name": "meta/runtime.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": ".github", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": ".github/workflows", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d2b86353e3881830dab4a73db505985b635a647a3288e1987d9069e3905ae867", + "name": ".github/workflows/ansible-test.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/module_utils", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/module_utils/network", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9b4ad754c7fd54be06cef308e2a6296194c708248905fc3958e316ecb6f9fdff", + "name": "plugins/module_utils/network/os6.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/module_utils/network/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/action", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "417ab2baa65d27eefc01cd6c130f2930f09dadfc140eeceeddca3cedb7bb11b1", + "name": "plugins/action/os6.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/action/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/terminal", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8ac39b1c679da46a21dc582100e3cb12fb56b6fe599464e615a08840c0b48684", + "name": "plugins/terminal/os6.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/terminal/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/cliconf", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4e9d0c393cbd23b0b305b85cbf2b15b73cd996d4f8ab75e0f6a175ec6f400ab1", + "name": "plugins/cliconf/os6.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/cliconf/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/doc_fragments", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "051fd978ce9149eed8d1f82210faa9d09cbbaadd7440c76e01f7a2a8bfcb47bb", + "name": "plugins/doc_fragments/os6.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/doc_fragments/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/modules", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/modules/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "83a17696f13b1a1b4a8f4c6c64d97a6febfaed6a8a2aa8480a248ee606c7e7b2", + "name": "plugins/modules/os6_command.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "455b54c0a3026a62aa614672d1d9b1cfc46f3bb4e55d8afe0f5210791aa8b36a", + "name": "plugins/modules/os6_facts.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f21de03ce85e6feb7a2057173cd9ed98babd669a6c383232fdd33952e5edae20", + "name": "plugins/modules/os6_config.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227", + "name": "COPYING", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720", + "name": ".gitignore", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "changelogs", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2b6f825d35f7a2b89f5ff73b498eeccb10165f423b5d4babbb7c1f5aaabfbb49", + "name": "changelogs/config.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "358846a679247201321be96384821e4d2f6ecf4d6f2f8bf093efc97c21dd399e", + "name": "changelogs/changelog.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1b8442430a6cb14a263f40104bc78d65df935398f7175b91f5fee40fead79cf8", + "name": "changelogs/CHANGELOG.rst", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "docs", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6a6c2890becf81f87d57ed5da9c8c3bc6872c8a7eeef4e24fcb9bd7fba1c0471", + "name": "docs/os6_bgp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5a6ebdb3535baa818835da111f9c89c9eadfc0ffac539f42ad3f7b536209d490", + "name": "docs/os6_qos.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "944f16070757f6e903f717dfdfdfb6256faafb1c49e9ca88c6c4565b119eae87", + "name": "docs/os6_snmp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "05ef9ab09f593d61256adc8a96fd2d4e3f8a2146f593969e7563907d09eb20b4", + "name": "docs/os6_xstp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ac44a6208cf2c79612d543f45b0537752487b31272c26821f9633546f2c1b6c6", + "name": "docs/roles.rst", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9a95d328cf1bf27671bba9df1534e171a3284f2ee2423b4cccd0f7b769bb4dd8", + "name": "docs/os6_interface.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ee8ceb504133f6d404a928073786b95f733a86e697aae7a0bf367ece75c3fe71", + "name": "docs/os6_acl.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6186b5006e3bb89a11e8c7a4fc7fbb69d0f559db5b8e875497e8400dd28ae023", + "name": "docs/os6_lldp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2bd34914da5ee317c0dce534b3eb907c33cefb1d2408e79a530d3155f42e673e", + "name": "docs/os6_vlan.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "15551d1995afdb953a0e5bd532b8ff219708236ab473b9a837ac5cdc41fc6ed5", + "name": "docs/os6_ntp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f067df9f87921cd7ba6f9bf4437fb2919111a44e619f476cdc3df3e2f3b79f8d", + "name": "docs/os6_lag.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "82184a0e54c3f7d7cc10fade2f2ec306db907284622ba149cd989155a9488a19", + "name": "docs/os6_vrrp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "263c5fc6dd50f3d00e66aa38a06710c3e754a8248d70e4cda0b1971d3be69fcd", + "name": "docs/os6_logging.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "31f3bfd5a7c28cdf3f614929a7592e6794d1aeb1ad44ad0dbf063a30219c4aad", + "name": "docs/os6_users.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b3b0a11e433186c5452577abcb41cf9dc18d36633bc3c826196f2c768255ccfd", + "name": "docs/os6_system.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7cb9a2fa4b0d1b5393ac7e745fa81668f1977a6a6e9e9ca7a5970562c46829d2", + "name": "docs/os6_aaa.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2f14c0c53085dd3bd638b743c0633f3e033864ee15199b83478050bda953fa8b", + "name": "README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks/ibgp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks/ibgp/group_vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a4dcd6b5ab317541bc24f9ff0125cf67f9d421a6bc5a6af41105b206d5313a79", + "name": "playbooks/ibgp/group_vars/all", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9d3f7db46fb66d404ef752cf999295fcf7b0d22e2cd0f5dc953958c1ecf3e9d9", + "name": "playbooks/ibgp/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d1819a593a0da5bfb520f9cc0016d6f9a772d91b9f5fd2067f7dc9956d7dce17", + "name": "playbooks/ibgp/os6switch.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8e4ffdb53fb7979e97567fece37df85405c12e11da9af3b03d9a91bf3c91e62a", + "name": "playbooks/ibgp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks/ibgp/host_vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "809b97cd311b1815f41d16edfad0ce38b3f6e71238f139a8ac349a265d8f3b54", + "name": "playbooks/ibgp/host_vars/switch1.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a9f9a7e49808177dc5887e34711c3123581c7007fa3f582242f2cdedc5dad682", + "name": "playbooks/ibgp/host_vars/switch2.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "803c5e860e5d8a7eb300be934ce8cbd68e3e869329f9392df91b4f71d31d8a35", + "name": "roles/os6_xstp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_xstp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_xstp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6ce58745ddc603524750b1e333257ba3ec441f83f19afd26fdf2e7f0add9dfb9", + "name": "roles/os6_xstp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "372224afb6413913603cb411f026c3b45d8d6a968e66a215324074f022f5850b", + "name": "roles/os6_xstp/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "546039128bbdb58e82bd748f879928d0454aa88e72bcea47c4a151fdf23286e9", + "name": "roles/os6_xstp/templates/os6_xstp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e104eb31847e55c38c3bb29947a64ccc654403d8b4a32c65f9251fd851791e31", + "name": "roles/os6_xstp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0031377870ad92e182c393f50f6246dc3f531e28f97652a1d047bbe9c1a3cd80", + "name": "roles/os6_xstp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "be4219c0315e68d60560105f4f311d2d38ffe4abc7a0243392aa626e3954450c", + "name": "roles/os6_xstp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1346c427518f75ebeaa5e971bfdb1e91f50a8a8a40d02c805385b6d9784cac01", + "name": "roles/os6_xstp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_xstp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1790db61b96fb9f8c3e5fc7463c6ddfb2f267b8ca8dceaa0e2677f0764dd7020", + "name": "roles/os6_xstp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a8842aa89c1070a6a4046429bdf96d68fa8fcc7b5f3ce5cc8fe1e3aed69ebd1e", + "name": "roles/os6_users/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_users/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_users/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3826d6ef51782ac0fa23bd7e294af5cadc7621c70c9da81f14e7d020f5f67eb2", + "name": "roles/os6_users/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8152c65af2b224d448cd01f4bb71ed055f4cb68d290c8add5ff848ca018b87c1", + "name": "roles/os6_users/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6475570465d1c051d3d2e8726367a81ef84eabea5858fa5e126f74da11f87cd2", + "name": "roles/os6_users/templates/os6_users.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a573eb01484b13cead3c181c38a1de473ab9010dbb70e54186e812b4bf6a6d21", + "name": "roles/os6_users/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ee1e43cce86751cb7faca77c654c0c6a13755d27a8c659a740e831566f65fd28", + "name": "roles/os6_users/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "40bfec1ec536a53cc3eb0a85eef64b8db0a414aa86d12818e3807f8f288e9026", + "name": "roles/os6_users/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "08c816f5389fe337cb524d11a7cc0f39d73fabb43bb228122c4fddec77175f6e", + "name": "roles/os6_users/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_users/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "90d00857c5b7af44e5d17fcf65caf4fb1c75a39fa3298b775063f9d30780276a", + "name": "roles/os6_users/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7e63a91921eee58c938b7f543019705fa270472fe85ad8b9d87dc5a86d2046ff", + "name": "roles/os6_system/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_system/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_system/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "821cf9c0f6e08b6ec3b90ac5fcbf62dfc71780a83d0f8b71af536e28230793e8", + "name": "roles/os6_system/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9113d5ead08435c38a153c1aaa0613a5fdd0d27f734e1a45ae5e7282c96423ce", + "name": "roles/os6_system/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "780b3fa89934626febc37bd06b94f95a994b792cef2e3aa2891d486c155be436", + "name": "roles/os6_system/templates/os6_system.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "adac9abe9d800edad25875485db442e6b99a3f5ea46105df5085246ac71dbf72", + "name": "roles/os6_system/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0d244afe331461734522bb38ca0e2e1b59aaa06ea43489b235ff8d9cb79d1364", + "name": "roles/os6_system/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "aca109e63d63e9833221cfabcc46b3d8cf28cad129c9a4878f0139e484193535", + "name": "roles/os6_system/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "43f1e0c3a3418f33b7cb2cbf8c500b56ac3b7520c144367698c008e367cb57ae", + "name": "roles/os6_system/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_system/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "04a65ba21c0d5c1a224b3c844b06dd07c1fe70fedb3f963b087e578791f72285", + "name": "roles/os6_system/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d9521a1781f5f231c12566af9f6b17175e595b469596a038a7289e92e15d5e76", + "name": "roles/os6_logging/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_logging/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_logging/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a8ea92c60561762438c369c8332255d03cabcd3ca5002bde7ddb0addfa586992", + "name": "roles/os6_logging/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "be69ac8448f04d06a0c54dde157d1c4c76f1f641a49ad588c64102bdaf3e67c3", + "name": "roles/os6_logging/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "96c8f0e9b76943c27a02befaa17b3c0ecc878924f8f84572f2482296f4f2de7e", + "name": "roles/os6_logging/templates/os6_logging.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1ca704c754fdd9ac5ef40e6f26bef56aedd9ffb4a43179efcd7d9865a9ad31e1", + "name": "roles/os6_logging/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "aba3fe03dfddec68b995081167c8fd268206386b6f5815ec11bec8d54e5a4080", + "name": "roles/os6_logging/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "034634e1325cc86a086438b12880fb70c2b4ca5730547f5edfa812964f06165f", + "name": "roles/os6_logging/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "94d552e53d5213567f56cfead012518b68162b5fceb70e657731bf6ebd477c70", + "name": "roles/os6_logging/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_logging/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c6cf973492367a4b3255157408bb5d0b53283a7db01c80c548f1e6b6cdc1a9d7", + "name": "roles/os6_logging/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "82ec73f254aead6543bc058687405e7097c20aec5cbe19433eef54ce1e259ba7", + "name": "roles/os6_snmp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_snmp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_snmp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0e7c27518f72dfbcfc15802f8a26d8381a8dae5ef38dab5f54ba5fac7ca083b1", + "name": "roles/os6_snmp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "19e27ac25bfeadd3a26601f61993620e28a890fbe2ef811304e5e252e7be0516", + "name": "roles/os6_snmp/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "763907e3f886c1c57f69f7e427147838e111c81229624ff3e0e7c52f13d3a423", + "name": "roles/os6_snmp/templates/os6_snmp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6d2b78261f5fff90d93b85804400100f843e1fa11d14db07e4c7386fc5d74260", + "name": "roles/os6_snmp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5a075e4051133ed3cb59a4146b14b7dd67265060e1fd724d17c890c3b0f5b57d", + "name": "roles/os6_snmp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6d83dce278e238db0f9e04d56b60857cf7e49b72e6d6c71bdea61fc446516030", + "name": "roles/os6_snmp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2201e5200b746937a66a21fcc93ac8cd8b15c0c916364e5c4c7a71035788d527", + "name": "roles/os6_snmp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_snmp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "06e8ffcf45a3945bb9e712e76913f95e5020231a31afec6724e8ae1cdce2aaad", + "name": "roles/os6_snmp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "da5e40cdaa1b7a69c4054b8d942793f93aa785c82c9da1ce415424ddecbc919d", + "name": "roles/os6_lldp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_lldp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_lldp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d2693a92653a238f48e00a37047a95386d1b02f3115095fae8b8fe606473e769", + "name": "roles/os6_lldp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0dbfa24c6644b5a06a598dd51521c4524b22d5aa20215ccf9a3a84aaafcc7061", + "name": "roles/os6_lldp/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9d372a3a9d65866666088e09d2dc20e397fe3dc4fbcbd3ea436f9d86f3a4b5fc", + "name": "roles/os6_lldp/templates/os6_lldp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "319fa0a08bdccbe50ffd946b199791cea0681413777814a2b360545220d2e711", + "name": "roles/os6_lldp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0921c0bbcff88263cd4a1297bad9b1592c827b79f309be56f75a823d22f9192c", + "name": "roles/os6_lldp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2e11e57e4b40bfb99c0ad7c38168b843fec9cf864953ff04e2bf78c73176e748", + "name": "roles/os6_lldp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d41badac74d65ae5b358513b95a0a5056967df584346aec0509b30cea1e7e746", + "name": "roles/os6_lldp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lldp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "61888d51ad1c72e82c4b02bb4be5eb5e254d8f853659f8537642b60d7f25b7a7", + "name": "roles/os6_lldp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "74d6275a2afaf7fd1f23f91fc4f9f49a03d40d3626b9db180806c2cecf340c52", + "name": "roles/os6_vrrp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_vrrp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_vrrp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "da3347db91a9316e25d2b47998a9124d89c8bb1739c70e9b97d594e9cffda9bb", + "name": "roles/os6_vrrp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1176136a75c61f0d9267805b41a1274332057eca97c97cb234890cbb277be02e", + "name": "roles/os6_vrrp/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8e91701f7b51dcad75504f0172806ef6f65dafb6cb246e033dd8ee8a56f5fe6f", + "name": "roles/os6_vrrp/templates/os6_vrrp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4d75afdb62b5f59f0ebdd88038a48c2fb1c2cc5ecb0547184186a4f031f06872", + "name": "roles/os6_vrrp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3b60c1578bd9a8dc29ef85e5c873b59edeaf4da6bd6720cfaa707ec4d83a1f2a", + "name": "roles/os6_vrrp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c290c23bb9b8c2cfae65e0b00d01817cab1c3b9f4e7e94f7ca45f3d592f33d1b", + "name": "roles/os6_vrrp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "71de967c3d4a1b11dc36e95c6258fdb8b0aef6222c759fa4957bfc8eda3c314b", + "name": "roles/os6_vrrp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vrrp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "052c782bbb023db26ccd40d93254b5b2af7bf6b124426e25d594239f415140f2", + "name": "roles/os6_vrrp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c2732fdca439d06e069471d14f8a87a37faee828b19e613e0312dd25363127b4", + "name": "roles/os6_vlan/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_vlan/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_vlan/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "96e149fd5329afa78c33edc35ab347914ee95afa22e0bf7c62fa31aeef464d56", + "name": "roles/os6_vlan/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6c06f4e556311c5a9dc74f2e1914164d6333c59fc620b989c205418c8d88f33b", + "name": "roles/os6_vlan/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6645c1ecbc6555841a261e14863b1d87f9631ade8a29fb56e866192d70cc0759", + "name": "roles/os6_vlan/templates/os6_vlan.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "67289fe1f09bc1ed85278eb5dc511097363d1fc753be3578f902de6e126b55f7", + "name": "roles/os6_vlan/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f1aebe1ba927d520109c7d3bfc483574abc369988f2524663ddc5aaba4c66d2f", + "name": "roles/os6_vlan/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "db20aa1cfff396930e3511a34e11d703f7e212959e802b63fa3f42afdf089d3f", + "name": "roles/os6_vlan/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "dca829e2d9e9fffca5a0b7d43e196606e9a160f6af26336973757a50d93d3d21", + "name": "roles/os6_vlan/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_vlan/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3b961c92256ddc4c88064b72cf947b4e1e866fda7c1be5e849695a62c2baaa16", + "name": "roles/os6_vlan/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "22810b5b1ad7e24085bbf11547eb842ce297dc0bfb2a49d2c047551a85d2c560", + "name": "roles/os6_lag/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_lag/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_lag/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c8c1a8a56bd960a41e25288853ebbbc8e17b67316fabac82e28c1a1bc7cc7c5c", + "name": "roles/os6_lag/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cabc7457274088b5ac6661e4790aff7e913a535c67c0ce79c9610050d317b4d1", + "name": "roles/os6_lag/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e80b023e177c635bcf7cc9fb240ce6e3b93d38a146b1d5f7d65039d991d4e2ae", + "name": "roles/os6_lag/templates/os6_lag.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6a09aefebf59af427b44818dd420e1017135d3172c6dec56cfd49f975d397b97", + "name": "roles/os6_lag/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ba602fd62628ba43f77bdddc168ba905a58c9e6a8913f92a4939df005f3004d9", + "name": "roles/os6_lag/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9294b173cd96c01140bba5121848bb293e1fb3e4764fed799699c15d49d7a537", + "name": "roles/os6_lag/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6bbde0ade400e3853d1cb029590a055517431c66d43dcb535728f21047e9d1fa", + "name": "roles/os6_lag/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_lag/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4afba3d2195b36af0f0cd3c324aed8e8f570b2244a0afc002322d79d05f266c3", + "name": "roles/os6_lag/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "63134ab7c27b4cc5a64dd03bdc10211031e811667ae27906a90e745019f3f129", + "name": "roles/os6_aaa/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_aaa/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_aaa/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cda0f026e47460f4983b4d10902206939991281f25fe1bedcc6333b91b4a7ce7", + "name": "roles/os6_aaa/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8508ca8af9718ed4ba1d3a97ae63d42fd521a36a6a383aac5a21ea5de40cf70f", + "name": "roles/os6_aaa/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "781463a6d4ea96eb12fff81ecfbc8c6be9621650cd6aa750619a1d47cb654094", + "name": "roles/os6_aaa/templates/os6_aaa.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "809402b4bdb9cb3112d2285b6b1e9bfab2980e37f6472674c743db148f1c2800", + "name": "roles/os6_aaa/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "af0d0c9fed4994986f076864f1f93dcb041ac3c79ad5d783aec69b3a7f584c42", + "name": "roles/os6_aaa/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f0a6662d0137775c9eb175370cded387cf84225265add1ac11afacd60a25a67b", + "name": "roles/os6_aaa/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "765a854c27d68838a34cb96a1b59269a0d2397e50a0986b81764ff28e3e93c06", + "name": "roles/os6_aaa/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_aaa/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "39b3fb9d024aa2518897c83e7f63e455fe55e6d39ca07cb6f4347ebf01000972", + "name": "roles/os6_aaa/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "93b3ecffbb3be43fa0c310ebddc61eb0b9ba75e23268463e3a6446ee202b5704", + "name": "roles/os6_qos/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_qos/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbcaa96e775c2a68ff2cabdc67e4baeeae90964e2692159ce4472cf34b589df5", + "name": "roles/os6_qos/tests/test.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "605557eaef5f8dfc2365202894cd9b0591b77a3b77ddc5ee1e7e56884c3241a4", + "name": "roles/os6_qos/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "name": "roles/os6_qos/tests/inventory", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ca90cefe5596a51c88fd5f8a777a4f1dc9d27ba7128853f176ebf17e995d98ff", + "name": "roles/os6_qos/templates/os6_qos.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "07585253cf6cb7bd222744d6ddfffe8078c0aa669e257c573df25b778f5d6c9f", + "name": "roles/os6_qos/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bfb1de30e19eaaa33dfafac9b3cb45767ac089dc8511c1016b8d7d23c23ece25", + "name": "roles/os6_qos/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4364ba19d60713e73830321f1d32a1cf2b5bf6e45af37951a0ee796e48aabb5c", + "name": "roles/os6_qos/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5220e3c142d2f58f1c7d5525ac6f71c59226e82bd11b34155a5c943f41371849", + "name": "roles/os6_qos/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_qos/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f2423f8dd4614917fced06db2c05495911c9f206ecd16d99325ba2d1c5f63a7c", + "name": "roles/os6_qos/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2404b4a0ddae981fcccab3064ad96bc733e9b3698e885fc1cb26a3c10c4d6bdb", + "name": "roles/os6_acl/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_acl/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_acl/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "92f7529157aba5313231d21c978e0058045c1ba805fa74aa667777b7801676ef", + "name": "roles/os6_acl/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3f2697959dbccac3047eb04365c52dabb8bab254b671be0d370fa5fd6c5cac79", + "name": "roles/os6_acl/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a25418525c08a11dd6bca320e044c2a9429af4bc5ecc7e3628bb96205022a937", + "name": "roles/os6_acl/templates/os6_acl.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e499c9510aaa522477920a93e1d590047ceabeca8aef307b98e5a69ae4f92317", + "name": "roles/os6_acl/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9cae65a8516d0400f6091fe2380a9a257876a08f85a39fa614dbe760a9c58fe9", + "name": "roles/os6_acl/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9f00b8413b2410763c2cca2a948e286e3261d156361aa7c913fba3ce3da9def3", + "name": "roles/os6_acl/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "abe3440632da83602afd2f067d74f6c78f0d8ceda20d7ec3a7a4a0a6efe80f0d", + "name": "roles/os6_acl/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_acl/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4f178f2310a8c9d40983ede3ff18f38f8cc9cf29f7de9f42fb550ef909d8f22c", + "name": "roles/os6_acl/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "23f4af4ab2764fab3a88e6d7390876d3804392d6a96323a4932af3cacb6acda5", + "name": "roles/os6_bgp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_bgp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "11b4ce2f58f779973bac45a5ff5b0a86272f4e1d29cea5c8cd95d2cf194285e4", + "name": "roles/os6_bgp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "37371f94256c61221fa5f52d2bdc9a7681d1950a81f94d890b9762722326eab2", + "name": "roles/os6_bgp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "38cd90a98839739453a2b86bfd28367b513a5ef69fcb6a7b5190fcc2e8250e1f", + "name": "roles/os6_bgp/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "978118306940cb92dd374cdf91197785baae190598e0e404ed484de7a2bea385", + "name": "roles/os6_bgp/templates/os6_bgp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d310c5523a47ce3dc47ed3684ddbc0dfc27469942f8c6f4c20fe90fcdcb85610", + "name": "roles/os6_bgp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3920731d533222b279bc7fde4b8947d7e47a4e6e834ed98d52a2b8149007779f", + "name": "roles/os6_bgp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "09f866d5a0529aaaaba4bf0236e377b388369495fbd61fffee71f961adf6b0ff", + "name": "roles/os6_bgp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9522bb923f414ccabbaf73341a226c4bc2a161b1570950bbc531a9437a277bb9", + "name": "roles/os6_bgp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_bgp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7c97aa0e67a4888c3d31c094f0cee0a07d44c310f36e4ca2c0992f5c5838e249", + "name": "roles/os6_bgp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "91c3c194de2106a16ee51da04a2cd57d95821298c23022aa4e856a2199763a1c", + "name": "roles/os6_ntp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_ntp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "510cf11fd5cd5f601c8da4171355892d32289ec90c1ba3d4febad6dbe746f84f", + "name": "roles/os6_ntp/tests/test.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c67f34a06350c4fe36d01a03d4c6c4132733715819fa568bbf7c47bab9bea937", + "name": "roles/os6_ntp/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "name": "roles/os6_ntp/tests/inventory", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a1cc9d2eb86ba5b0d74edc8dbd4a29a1caba4b550597c77f4f87c59e71289edd", + "name": "roles/os6_ntp/templates/os6_ntp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4e1453927e4f3f0da3d28299e7999c97d621f110acb5c17b964175173c5441b9", + "name": "roles/os6_ntp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bbebd2d2fed07efe0555c8eb380aa715fd52089a694673bc6340d20346febbc5", + "name": "roles/os6_ntp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b1d6519f386c6c11226abd13273303945beb6d1de8fd08ae5a1bea02084258d9", + "name": "roles/os6_ntp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1cfaf6cb22e9e6f3b898f7ebcf805c79996b41555e4e247420c32989b2bec638", + "name": "roles/os6_ntp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_ntp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5461ad3fd435dd6f5778ecb2b66e5c1a03e874bf17b20f0a1cdbf7f2b2ef88d7", + "name": "roles/os6_ntp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "06dada9df09601459174f4beef9acab7363b43e75af2c7c6232cc622c8c7c6f6", + "name": "roles/os6_interface/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os6_interface/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2", + "name": "roles/os6_interface/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8e0874badbc6d9b601696717109a02439c461a1b55fa8044f8af47beafdfda00", + "name": "roles/os6_interface/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c0a8610a990c2048db010b007e7490257215a70722d9ffdaec80cb97c3b2d7b8", + "name": "roles/os6_interface/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b41045b5a1642a1b9fdac94a89edf172d72828b6f32489ae0ef179d61d7d47c6", + "name": "roles/os6_interface/templates/os6_interface.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d10b2645e5b9caf6f2eebdf1c2079bf9ab5031c6c78e5315769f39388261de98", + "name": "roles/os6_interface/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b45d0f61c394fa77e7f1641b2fc686037d847658ab8a93cd232314ff76215c81", + "name": "roles/os6_interface/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0671759da708abf9810f9f7e6aa1afc00f530f7e6596e422c848e4d15502c9d8", + "name": "roles/os6_interface/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "63fd57470a2eefef819d902e96d71e49395a3d2a69e7d104ba10c42ccb475d21", + "name": "roles/os6_interface/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os6_interface/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "654dfb8baff5953f105aabe76d62750ec3e75597cdfedd1adae1fa466e58e8f7", + "name": "roles/os6_interface/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/LICENSE b/ansible_collections/dellemc/os6/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/MANIFEST.json b/ansible_collections/dellemc/os6/MANIFEST.json new file mode 100644 index 00000000..c56cf40e --- /dev/null +++ b/ansible_collections/dellemc/os6/MANIFEST.json @@ -0,0 +1,37 @@ +{ + "collection_info": { + "description": "Ansible Network Collection for Dell EMC OS6", + "repository": "https://github.com/ansible-collections/dellemc.os6", + "tags": [ + "dell", + "dellemc", + "os6", + "emc", + "networking" + ], + "dependencies": { + "ansible.netcommon": ">=1.0.0" + }, + "authors": [ + "Komal Patil ", + "Senthil Ganesan Ganesan " + ], + "issues": "https://github.com/ansible-collections/dellemc.os6/issues", + "name": "os6", + "license": [], + "documentation": "https://github.com/ansible-collections/dellemc.os6/tree/master/docs", + "namespace": "dellemc", + "version": "1.0.7", + "readme": "README.md", + "license_file": "LICENSE", + "homepage": "https://github.com/ansible-collections/dellemc.os6" + }, + "file_manifest_file": { + "format": 1, + "ftype": "file", + "chksum_sha256": "ccb112e42f4caf7be04b0a0ac31e199bc114f01cc74cae9fc02aa5844b3ecd3e", + "name": "FILES.json", + "chksum_type": "sha256" + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/README.md b/ansible_collections/dellemc/os6/README.md new file mode 100644 index 00000000..eaa413cf --- /dev/null +++ b/ansible_collections/dellemc/os6/README.md @@ -0,0 +1,98 @@ +# Ansible Network Collection for Dell EMC OS6 + +## Collection contents + +This collection includes the Ansible modules, plugins and roles needed to privision and manage Dell EMC PowerSwitch platforms running Dell EMC OS6. Sample playbooks and documentation are also included to show how the collection can be used. + +### Collection core modules + +- **os6_command.py** — Run commands on devices running OS6 + +- **os6_config.py** — Manage configuration on devices running OS6 + +- **os6_facts.py** — Collect facts from devices running OS6 + +### Collection roles + +These roles facilitate provisioning and administration of devices running Dell EMC OS6. There are over 15 roles available that provide a comprehensive coverage of most OS6 resources, including os6_interface, os6_aaa, os6_bgp, and os6_xstp. The documentation for each role is at [OS6 roles](https://github.com/ansible-collections/dellemc.os6/blob/master/docs/roles.rst). + +### Sample use case playbooks + +This collection inlcudes the following sample playbook that illustrate end to end use cases: + + - [iBGP](https://github.com/ansible-collections/dellemc.os6/blob/master/playbooks/ibgp/README.md) — Example playbook to configure iBGP between two routers + +## Installation + +Use this command to install the latest version of the OS6 collection from Ansible Galaxy: + +``` +ansible-galaxy collection install dellemc.os6 + +``` +To install a specific version, a version range identifier must be specified. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0: + +``` +ansible-galaxy collection install 'dellemc.os6:>=1.0.0,<2.0.0' + +``` + +## Version compatibility + +* Ansible version 2.10 or higher +* Python 2.7 or higher and Python 3.5 or higher + +> **NOTE**: For Ansible versions lower than 2.10, use the legacy [dellos6 modules](https://ansible-dellos-docs.readthedocs.io/en/latest/modules.html#os6-modules) and [dellos roles](https://ansible-dellos-docs.readthedocs.io/en/latest/roles.html). + +## Sample playbook + +**playbook.yaml** + +``` +- hosts: os6_switches + connection: network_cli + collections: + - dellemc.os6 + roles: + - os6_vlan + +``` + +**host_vars/os6_sw1.yaml** + +``` +hostname: os6_sw1 +# Parameters for connection type network_cli +ansible_ssh_user: xxxx +ansible_ssh_pass: xxxx +ansible_become: yes +ansible_become_method: enable +ansible_network_os: dellemc.os6.os6 + +# Create vlan100 and delete vlan888 +os6_vlan: + vlan 100: + name: "Blue" + state: present + vlan 888: + state: absent + + +``` + +**inventory.yaml** + +``` +[os6_sw1] +os6_sw1 ansible_host= 100.94.51.40 + +[os6_sw2] +os6_sw2 ansible_host= 100.94.52.38 + +[os6_switches:children] +os6_sw1 +os6_sw2 + +``` + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst new file mode 100644 index 00000000..93b6adcb --- /dev/null +++ b/ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst @@ -0,0 +1,98 @@ +====================================================================== +Ansible Network Collection for Dell EMC OS6 Release Notes +====================================================================== + +.. contents:: Topics + +v1.0.7 +====== + +Release Summary +--------------- + +- Fixed sanity error found during the sanity tst of automation hub upload +- os6 interface role readme updated + +v1.0.6 +====== + +Bugfixes +--------------- + +- module utils fix for exit handling in multilevel parent commands +- config module fix to handle multiline banner +- terminal plugin fix to handle error reported by management access lists + +v1.0.5 +====== + +Bugfixes +--------------- + +- config module fix to handle issues faced while parsing running config and fixing idempotency issue for banner config +- command module change to keep similar changes across all dell networking OSs +- terminal plugin fix to send "terminal length 0" command + +v1.0.4 +====== + +Bugfixes +--------------- + +- Fix issue in using list of strings for `commands` argument for `os6_command` module +- Fix issue in using "os6_facts" module for non-legacy n-series platofrms + +v1.0.3 +====== + +Release Summary +--------------- + +Added bug fixes for bugs found during System Test. + +v1.0.2 +====== + +Release Summary +--------------- + +Added changelogs. + +v1.0.1 +====== + +Release Summary +--------------- + +Updated documentation review comments. + +v1.0.0 +====== + +New Modules +----------- + +- os6_command - Run commands on devices running Dell EMC os6. +- os6_config - Manage configuration on devices running os6. +- os6_facts - Collect facts from devices running os6. + +New Roles +--------- + +- os6_aaa - Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server. +- os6_acl - Facilitates the configuration of Access Control lists. +- os6_bgp - Facilitates the configuration of border gateway protocol (BGP) attributes. +- os6_interface - Facilitates the configuration of interface attributes. +- os6_lag - Facilitates the configuration of link aggregation group (LAG) attributes. +- os6_lldp - Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. +- os6_logging - Facilitates the configuration of global logging attributes and logging servers. +- os6_ntp - Facilitates the configuration of network time protocol (NTP) attributes. +- os6_qos - Facilitates the configuration of quality of service attributes including policy-map and class-map. +- os6_snmp - Facilitates the configuration of global SNMP attributes. +- os6_system - Facilitates the configuration of hostname and hashing algorithm. +- os6_users - Facilitates the configuration of global system user attributes. +- os6_vlan - Facilitates the configuration of virtual LAN (VLAN) attributes. +- os6_vrrp - Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes. +- os6_xstp - Facilitates the configuration of xSTP attributes. + +\(c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. diff --git a/ansible_collections/dellemc/os6/changelogs/changelog.yaml b/ansible_collections/dellemc/os6/changelogs/changelog.yaml new file mode 100644 index 00000000..8b9f3456 --- /dev/null +++ b/ansible_collections/dellemc/os6/changelogs/changelog.yaml @@ -0,0 +1,112 @@ +ancestor: null +releases: + 1.0.0: + modules: + - description: Run commands on devices running Dell EMC os6. + name: os6_command + namespace: '' + - description: Manage configuration on devices running os6. + name: os6_config + namespace: '' + - description: Collect facts from devices running os6. + name: os6_facts + namespace: '' + roles: + - description: Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server. + name: os6_aaa + namespace: '' + - description: Facilitates the configuration of Access Control lists. + name: os6_acl + namespace: '' + - description: Facilitates the configuration of border gateway protocol (BGP) attributes. + name: os6_bgp + namespace: '' + - description: Facilitates the configuration of interface attributes. + name: os6_interface + namespace: '' + - description: Facilitates the configuration of link aggregation group (LAG) attributes. + name: os6_lag + namespace: '' + - description: Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. + name: os6_lldp + namespace: '' + - description: Facilitates the configuration of global logging attributes and logging servers. + name: os6_logging + namespace: '' + - description: Facilitates the configuration of network time protocol (NTP) attributes. + name: os6_ntp + namespace: '' + - description: Facilitates the configuration of quality of service attributes including policy-map and class-map. + name: os6_qos + namespace: '' + - description: Facilitates the configuration of global SNMP attributes. + name: os6_snmp + namespace: '' + - description: Facilitates the configuration of hostname and hashing algorithm. + name: os6_system + namespace: '' + - description: Facilitates the configuration of global system user attributes. + name: os6_users + namespace: '' + - description: Facilitates the configuration of virtual LAN (VLAN) attributes. + name: os6_vlan + namespace: '' + - description: Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes. + name: os6_vrrp + namespace: '' + - description: Facilitates the configuration of xSTP attributes. + name: os6_xstp + namespace: '' + release_date: '2020-07-31' + 1.0.1: + changes: + release_summary: Updated documentation review comments + fragments: + - 1.0.1.yaml + release_date: '2020-08-04' + 1.0.2: + changes: + release_summary: Added changelogs. + fragments: + - 1.0.2.yaml + release_date: '2020-08-18' + 1.0.3: + changes: + release_summary: Added bug fixes for bugs found during System Test. + fragments: + - 1.0.3.yaml + release_date: '2020-10-09' + 1.0.4: + changes: + bugfixes: + - Fix issue in using list of strings for `commands` argument for `os6_command` module + - Fix issue in using "os6_facts" module for non-legacy n-series platofrms + fragments: + - 1.0.4.yaml + release_date: '2020-11-17' + 1.0.5: + changes: + bugfixes: + - config module fix to handle issues faced while parsing running config and fixing idempotency issue for banner config + - command module change to keep similar changes across all dell networking OSs + - terminal plugin fix to send "terminal length 0" command + fragments: + - 1.0.5.yaml + release_date: '2020-12-09' + 1.0.6: + changes: + bugfixes: + - module utils fix for exit handling in multilevel parent commands + - config module fix to handle multiline banner + - terminal plugin fix to handle error reported by management access lists + fragments: + - 1.0.6.yaml + release_date: '2020-12-18' + 1.0.7: + changes: + release_summary: + - Fixed sanity error found during the sanity tst of automation hub upload + - os6 interface role readme updated for proper syntax of ip address and mask + fragments: + - 1.0.7.yaml + release_date: '2021-02-15' \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/changelogs/config.yaml b/ansible_collections/dellemc/os6/changelogs/config.yaml new file mode 100644 index 00000000..f1a020eb --- /dev/null +++ b/ansible_collections/dellemc/os6/changelogs/config.yaml @@ -0,0 +1,30 @@ +changelog_filename_template: CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +flatmap: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Ansible Network Collection for Dell OS6 +trivial_section_name: trivial diff --git a/ansible_collections/dellemc/os6/docs/os6_aaa.md b/ansible_collections/dellemc/os6/docs/os6_aaa.md new file mode 100644 index 00000000..b3d5783a --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_aaa.md @@ -0,0 +1 @@ +../roles/os6_aaa/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_acl.md b/ansible_collections/dellemc/os6/docs/os6_acl.md new file mode 100644 index 00000000..6224f56a --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_acl.md @@ -0,0 +1 @@ +../roles/os6_acl/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_bgp.md b/ansible_collections/dellemc/os6/docs/os6_bgp.md new file mode 100644 index 00000000..376f0e03 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_bgp.md @@ -0,0 +1 @@ +../roles/os6_bgp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_interface.md b/ansible_collections/dellemc/os6/docs/os6_interface.md new file mode 100644 index 00000000..6b800960 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_interface.md @@ -0,0 +1 @@ +../roles/os6_interface/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_lag.md b/ansible_collections/dellemc/os6/docs/os6_lag.md new file mode 100644 index 00000000..623771fa --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_lag.md @@ -0,0 +1 @@ +../roles/os6_lag/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_lldp.md b/ansible_collections/dellemc/os6/docs/os6_lldp.md new file mode 100644 index 00000000..3f367237 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_lldp.md @@ -0,0 +1 @@ +../roles/os6_lldp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_logging.md b/ansible_collections/dellemc/os6/docs/os6_logging.md new file mode 100644 index 00000000..f9888545 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_logging.md @@ -0,0 +1 @@ +../roles/os6_logging/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_ntp.md b/ansible_collections/dellemc/os6/docs/os6_ntp.md new file mode 100644 index 00000000..dee2f2b2 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_ntp.md @@ -0,0 +1 @@ +../roles/os6_ntp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_qos.md b/ansible_collections/dellemc/os6/docs/os6_qos.md new file mode 100644 index 00000000..d7dc1fb2 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_qos.md @@ -0,0 +1 @@ +../roles/os6_qos/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_snmp.md b/ansible_collections/dellemc/os6/docs/os6_snmp.md new file mode 100644 index 00000000..dd6f97f3 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_snmp.md @@ -0,0 +1 @@ +../roles/os6_snmp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_system.md b/ansible_collections/dellemc/os6/docs/os6_system.md new file mode 100644 index 00000000..64a2c5ec --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_system.md @@ -0,0 +1 @@ +../roles/os6_system/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_users.md b/ansible_collections/dellemc/os6/docs/os6_users.md new file mode 100644 index 00000000..2b05877b --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_users.md @@ -0,0 +1 @@ +../roles/os6_users/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_vlan.md b/ansible_collections/dellemc/os6/docs/os6_vlan.md new file mode 100644 index 00000000..c28686f8 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_vlan.md @@ -0,0 +1 @@ +../roles/os6_vlan/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_vrrp.md b/ansible_collections/dellemc/os6/docs/os6_vrrp.md new file mode 100644 index 00000000..95a1dc26 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_vrrp.md @@ -0,0 +1 @@ +../roles/os6_vrrp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/os6_xstp.md b/ansible_collections/dellemc/os6/docs/os6_xstp.md new file mode 100644 index 00000000..9f0ff54f --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/os6_xstp.md @@ -0,0 +1 @@ +../roles/os6_xstp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/docs/roles.rst b/ansible_collections/dellemc/os6/docs/roles.rst new file mode 100644 index 00000000..2aab9b84 --- /dev/null +++ b/ansible_collections/dellemc/os6/docs/roles.rst @@ -0,0 +1,94 @@ +############################################################## +Ansible Network Collection Roles for Dell EMC OS6 +############################################################## + +The roles facilitate provisioning of Dell EMC PowerSwitch platforms running Dell EMC OS6. + +AAA role +******** + +The `os6_aaa `_ role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of TACACS and RADIUS server, and AAA. + + +ACL role +******** + +The `os6_acl `_ role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to line terminals. + + +BGP role +******** + +The `os6_bgp `_ role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path. + + +Interface role +************** + +The `os6_interface `_ role facilitates the configuration of interface attributes. It supports the configuration of administrative state, description, MTU, IP address, IP helper, and port mode. + + +LAG role +******** + +The `os6_lag `_ role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of type (static/dynamic), hash scheme, and minimum required link. + + +LLDP role +********* + +The `os6_lldp `_ role facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. This role supports the configuration of hello, mode, multiplier, advertise tlvs, management interface, fcoe, iscsi at global and interface levels. + + +Logging role +************ + +The `os6_logging `_ role facilitates the configuration of global logging attributes, and supports the configuration of logging servers. + + +NTP role +******** + +The `os6_ntp `_ role facilitates the configuration of network time protocol (NTP) attributes. + + +QoS role +******** + +The `os6_qos `_ role facilitates the configuration of quality of service (QoS) attributes including policy-map and class-map. + + +SNMP role +********* + +The `os6_snmp `_ role facilitates the configuration of global simple network management protocol (SNMP) attributes. It supports the configuration of SNMP server attributes like users, group, community, location, and traps. + + +System role +*********** + +The `os6_system `_ role facilitates the configuration of global system attributes. This role specifically enables configuration of hostname and enable password for OS6. + + +Users role +********** + +The `os6_users `_ role facilitates the configuration of global system user attributes. This role supports the configuration of CLI users. + + +VLAN role +********* + +The `os6_vlan `_ role facilitates configuring virtual LAN (VLAN) attributes. This role supports the creation and deletion of a VLAN and its member ports. + + +VRRP role +********* + +The `os6_vrrp `_ role facilitates configuration of virtual router redundancy protocol (VRRP) attributes. This role supports the creation of VRRP groups for interfaces, and setting the VRRP group attributes. + + +xSTP role +********* + +The `os6_xstp `_ role facilitates the configuration of extended spanning-tree protocol (xSTP) attributes. This role supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP) protocol, multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). This role supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/meta/runtime.yml b/ansible_collections/dellemc/os6/meta/runtime.yml new file mode 100644 index 00000000..f31652cc --- /dev/null +++ b/ansible_collections/dellemc/os6/meta/runtime.yml @@ -0,0 +1,8 @@ +plugin_routing: + action: + os6_config: + redirect: dellemc.os6.os6 + os6_command: + redirect: dellemc.os6.os6 + os6_facts: + redirect: dellemc.os6.os6 diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/README.md b/ansible_collections/dellemc/os6/playbooks/ibgp/README.md new file mode 100644 index 00000000..ab3e4e40 --- /dev/null +++ b/ansible_collections/dellemc/os6/playbooks/ibgp/README.md @@ -0,0 +1,21 @@ +# Provision OS6 Switch Stack using the Ansible Network Collection for Dell EMC OS6 + +This example describes how to use Ansible to configure Dell EMC PowerSwitch platforms running Dell EMC OS6. The sample topology contains two OS6 switches connected with each other. This example configures iBGP between two routers using the same AS. + +## Create a simple Ansible playbook + +**1**. Create an inventory file called `inventory.yaml`, then specify the device IP addresses under use in the inventory. + +**2**. Create a group variable file called `group_vars/all`, then define credentials common to all hosts. + +**3**. Create a host variable file called `host_vars/switch1.yaml`, then define credentials, hostname for switch1. + +**4**. Create a host variable file called `host_vars/switch2.yaml`, then define credentials and hostname for switch2. + +**5**. Create a playbook called `os6switch.yaml`. + +**6**. Run the playbook. + + ansible-playbook -i inventory.yaml os6switch.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all b/ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all new file mode 100644 index 00000000..a24c1a2e --- /dev/null +++ b/ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all @@ -0,0 +1,4 @@ +ansible_ssh_user: xxxx +ansible_ssh_pass: xxxx +ansible_network_os: dellemc.os6.os6 +build_dir: ../tmp/tmp_os6 diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml new file mode 100644 index 00000000..8e3a1b36 --- /dev/null +++ b/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml @@ -0,0 +1,47 @@ +hostname: switch1 +ansible_ssh_user: xxxx +ansible_ssh_pass: xxxx +ansible_become: yes +ansible_become_method: enable +ansible_network_os: dellemc.os6.os6 +switch1_hostname: "switch1" + +os6_system: + hostname: "{{ switch1_hostname }}" + #enable_password: xxxx + mtu: 2000 + +os6_vlan: + vlan 20: + default_vlan: False + name: "os6vlan" + untagged_members: + - port: Te7/0/1 + state: present + state: present + +os6_interface: + Te7/0/1: + desc: "bgp" + admin: up + portmode: access + + vlan 20: + ip_type_dynamic: False + ip_and_mask: 20.20.20.3 255.255.255.0 + +os6_bgp: + asn: 4545 + router_id: 20.20.20.3 + maxpath_ibgp: 3 + ipv4_network: + - address: 20.20.20.3 255.255.255.255 + state: present + neighbor: + - type: ipv4 + ip: 20.20.20.2 + remote_asn: 4545 + timer: 5 10 + default_originate: True + state: present + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml new file mode 100644 index 00000000..0416fab8 --- /dev/null +++ b/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml @@ -0,0 +1,47 @@ +hostname: switch2 +ansible_ssh_user: xxxx +ansible_ssh_pass: xxxx +ansible_become: yes +ansible_become_method: enable +ansible_network_os: dellemc.os6.os6 +switch2_hostname: "switch2" + +os6_system: + hostname: "{{ switch2_hostname }}" + #enable_password: xxxx + mtu: 2000 + +os6_vlan: + vlan 20: + default_vlan: False + name: "os6vlan" + untagged_members: + - port: Te1/0/48 + state: present + state: present + +os6_interface: + Te1/0/48: + desc: "bgp" + admin: up + portmode: access + + vlan 20: + ip_type_dynamic: False + ip_and_mask: 20.20.20.2 255.255.255.0 + +os6_bgp: + asn: 4545 + router_id: 20.20.20.2 + maxpath_ibgp: 3 + ipv4_network: + - address: 20.20.20.2 255.255.255.255 + state: present + neighbor: + - type: ipv4 + ip: 20.20.20.3 + remote_asn: 4545 + timer: 5 10 + default_originate: True + state: present + state: present diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml new file mode 100644 index 00000000..1cda8da3 --- /dev/null +++ b/ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 +switch2 ansible_host=100.94.52.38 + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml new file mode 100644 index 00000000..59eb0dc8 --- /dev/null +++ b/ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml @@ -0,0 +1,13 @@ +--- +- hosts: os6switches + gather_facts: no + become: yes + become_method: enable + connection: network_cli + collections: + - dellemc.os6 + roles: + - os6_system + - os6_vlan + - os6_interface + - os6_bgp diff --git a/ansible_collections/dellemc/os6/plugins/action/__init__.py b/ansible_collections/dellemc/os6/plugins/action/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os6/plugins/action/os6.py b/ansible_collections/dellemc/os6/plugins/action/os6.py new file mode 100644 index 00000000..a7f16df3 --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/action/os6.py @@ -0,0 +1,95 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import Connection +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_provider_spec +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'os6_config' else False + socket_path = None + persistent_connection = self._play_context.connection.split('.')[-1] + + if persistent_connection == 'network_cli': + provider = self._task.args.get('provider', {}) + if any(provider.values()): + display.warning('provider is unnecessary when using network_cli and will be ignored') + del self._task.args['provider'] + elif self._play_context.connection == 'local': + provider = load_provider(os6_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'dellemc.os6.os6' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + pc.become = provider['authorize'] or False + if pc.become: + pc.become_method = 'enable' + pc.become_pass = provider['auth_pass'] + + display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + + # make sure we are in the right cli context which should be + # enable mode and not config module + if socket_path is None: + socket_path = self._connection.socket_path + + conn = Connection(socket_path) + out = conn.get_prompt() + while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'): + display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) + conn.send_command('exit') + out = conn.get_prompt() + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/ansible_collections/dellemc/os6/plugins/cliconf/__init__.py b/ansible_collections/dellemc/os6/plugins/cliconf/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os6/plugins/cliconf/os6.py b/ansible_collections/dellemc/os6/plugins/cliconf/os6.py new file mode 100644 index 00000000..9c9290da --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/cliconf/os6.py @@ -0,0 +1,88 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +cliconf: os6 +short_description: Use os6 cliconf to run command on Dell OS6 platform +description: + - This os6 plugin provides low level abstraction apis for + sending and receiving CLI commands from Dell OS6 network devices. +""" + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'dellemc.os6.os6' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Software Version (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'System Type (\S+)', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + reply = self.get('show running-config | grep hostname') + data = to_text(reply, errors='surrogate_or_strict').strip() + match = re.search(r'^hostname (.+)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + return self.invalid_params("fetching configuration from %s is not supported" % source) +# if source == 'running': +# cmd = 'show running-config all' + else: + cmd = 'show startup-config' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['end']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) diff --git a/ansible_collections/dellemc/os6/plugins/doc_fragments/__init__.py b/ansible_collections/dellemc/os6/plugins/doc_fragments/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py b/ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py new file mode 100644 index 00000000..32daca59 --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Peter Sprygada +# Copyright: (c) 2020, Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + port: + description: + - Specifies the port to use when building the connection to the remote + device. + type: int + username: + description: + - User to authenticate the SSH session to the remote device. If the + value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Password to authenticate the SSH session to the remote device. If the + value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + ssh_keyfile: + description: + - Path to an ssh key used to authenticate the SSH session to the remote + device. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path + timeout: + description: + - Specifies idle timeout (in seconds) for the connection. Useful if the + console freezes before continuing. For example when saving + configurations. + type: int + authorize: + description: + - Instructs the module to enter privileged mode on the remote device before + sending any commands. If not specified, the device will attempt to execute + all commands in non-privileged mode. If the value is not specified in the + task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be + used instead. + type: bool + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode on the + remote device. If I(authorize) is false, then this argument does nothing. + If the value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTH_PASS) will be used instead. + type: str +notes: + - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking). +''' diff --git a/ansible_collections/dellemc/os6/plugins/module_utils/network/__init__.py b/ansible_collections/dellemc/os6/plugins/module_utils/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py b/ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py new file mode 100644 index 00000000..aeea2cfc --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py @@ -0,0 +1,278 @@ +# +# (c) 2020 Peter Sprygada, +# (c) 2020 Red Hat, Inc +# +# Copyright (c) 2020 Dell Inc. +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +import json +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import exec_command +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine, ignore_line +from ansible.module_utils._text import to_bytes +from ansible.module_utils.connection import Connection, ConnectionError + +_DEVICE_CONFIGS = {} + +WARNING_PROMPTS_RE = [ + r"[\r\n]?\[confirm yes/no\]:\s?$", + r"[\r\n]?\[y/n\]:\s?$", + r"[\r\n]?\[yes/no\]:\s?$" +] + +os6_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'), + 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True), + 'timeout': dict(type='int'), +} +os6_argument_spec = { + 'provider': dict(type='dict', options=os6_provider_spec), +} + + +def check_args(module, warnings): + pass + + +def get_connection(module): + if hasattr(module, "_os6_connection"): + return module._os6_connection + + capabilities = get_capabilities(module) + network_api = capabilities.get("network_api") + if network_api in ["cliconf"]: + module._os6_connection = Connection(module._socket_path) + else: + module.fail_json(msg="Invalid connection type %s" % network_api) + + return module._os6_connection + + +def get_capabilities(module): + if hasattr(module, "_os6_capabilities"): + return module._os6_capabilities + try: + capabilities = Connection(module._socket_path).get_capabilities() + except ConnectionError as exc: + module.fail_json(msg=to_text(exc, errors="surrogate_then_replace")) + module._os6_capabilities = json.loads(capabilities) + return module._os6_capabilities + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = 'show running-config' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + rc, out, err = exec_command(module, cmd) + if rc != 0: + module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict')) + cfg = to_text(out, errors='surrogate_or_strict').strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict(), + 'sendonly': dict(), + 'newline': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc) + responses.append(to_text(out, errors='surrogate_or_strict')) + return responses + + +def load_config(module, commands): + rc, out, err = exec_command(module, 'configure terminal') + if rc != 0: + module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict')) + + for command in to_list(commands): + if command == 'end': + continue + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc) + exec_command(module, 'end') + + +def get_sublevel_config(running_config, module): + contents = list() + current_config_contents = list() + sublevel_config = NetworkConfig(indent=0) + obj = running_config.get_object(module.params['parents']) + if obj: + contents = obj._children + for c in contents: + if isinstance(c, ConfigLine): + current_config_contents.append(c.raw) + sublevel_config.add(current_config_contents, module.params['parents']) + return sublevel_config + + +def os6_parse(lines, indent=None, comment_tokens=None): + sublevel_cmds = [ + re.compile(r'^vlan\s[\d,-]+.*$'), + re.compile(r'^stack.*$'), + re.compile(r'^interface.*$'), + re.compile(r'datacenter-bridging.*$'), + re.compile(r'line (console|telnet|ssh).*$'), + re.compile(r'ip ssh !(server).*$'), + re.compile(r'ip dhcp pool.*$'), + re.compile(r'ip vrf (?!forwarding).*$'), + re.compile(r'(ip|mac|management|arp) access-list.*$'), + re.compile(r'ipv6 (dhcp pool|router).*$'), + re.compile(r'mail-server.*$'), + re.compile(r'vpc domain.*$'), + re.compile(r'router\s.*$'), + re.compile(r'route-map.*$'), + re.compile(r'policy-map.*$'), + re.compile(r'class-map match-all.*$'), + re.compile(r'captive-portal.*$'), + re.compile(r'admin-profile.*$'), + re.compile(r'link-dependency group.*$'), + re.compile(r'openflow.*$'), + re.compile(r'support-assist.*$'), + re.compile(r'template.*$'), + re.compile(r'address-family.*$'), + re.compile(r'spanning-tree mst configuration.*$'), + re.compile(r'logging (?!.*(cli-command|buffered|console|email|facility|file|monitor|protocol|snmp|source-interface|traps|web-session)).*$'), + re.compile(r'radius server (?!.*(attribute|dead-criteria|deadtime|timeout|key|load-balance|retransmit|source-interface|source-ip|vsa)).*$'), + re.compile(r'(tacacs-server) host.*$')] + + childline = re.compile(r'^exit\s*$') + config = list() + parent = list() + children = [] + parent_match = False + for line in str(lines).split('\n'): + line = str(line).strip() + text = str(re.sub(r'([{};])', '', line)).strip() + cfg = ConfigLine(text) + cfg.raw = line + if not text or ignore_line(text, comment_tokens): + parent = list() + children = [] + continue + + parent_match = False + # handle sublevel parent + for pr in sublevel_cmds: + if pr.match(line): + if len(parent) != 0: + cfg._parents.extend(parent) + parent.append(cfg) + config.append(cfg) + if children: + children.insert(len(parent) - 1, []) + children[len(parent) - 2].append(cfg) + if not children and len(parent) > 1: + configlist = [cfg] + children.append(configlist) + children.insert(len(parent) - 1, []) + parent_match = True + continue + # handle exit + if childline.match(line): + if children: + parent[len(children) - 1]._children.extend(children[len(children) - 1]) + if len(children) > 1: + parent[len(children) - 2]._children.extend(parent[len(children) - 1]._children) + cfg._parents.extend(parent) + children.pop() + parent.pop() + if not children: + children = list() + if parent: + cfg._parents.extend(parent) + parent = list() + config.append(cfg) + # handle sublevel children + elif parent_match is False and len(parent) > 0: + if not children: + cfglist = [cfg] + children.append(cfglist) + else: + children[len(parent) - 1].append(cfg) + cfg._parents.extend(parent) + config.append(cfg) + # handle global commands + elif not parent: + config.append(cfg) + return config + + +class NetworkConfig(NetworkConfig): + + def load(self, contents): + self._items = os6_parse(contents, self._indent) + + def _diff_line(self, other, path=None): + diff = list() + for item in self.items: + if str(item) == "exit": + for diff_item in diff: + if diff_item._parents: + if item._parents == diff_item._parents: + diff.append(item) + break + elif [e for e in item._parents if e == diff_item]: + diff.append(item) + break + elif item not in other: + diff.append(item) + return diff diff --git a/ansible_collections/dellemc/os6/plugins/modules/__init__.py b/ansible_collections/dellemc/os6/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os6/plugins/modules/os6_command.py b/ansible_collections/dellemc/os6/plugins/modules/os6_command.py new file mode 100644 index 00000000..99df0014 --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/modules/os6_command.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Peter Sprygada +# Copyright: (c) 2020, Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: os6_command +author: "Abirami N (@abirami-n)" +short_description: Run commands on devices running Dell EMC OS6 +description: + - Sends arbitrary commands to a OS6 device and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(dellemc_os6_os6_config) to configure OS6 devices. +extends_documentation_fragment: dellemc.os6.os6 +options: + commands: + description: + - List of commands to send to the remote os6 device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + type: list + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of I(retries), the task fails. + See examples. + type: list + elements: str + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + type: str + default: all + choices: [ all, any ] + retries: + description: + - Specifies the number of retries a command should be tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + type: int + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + type: int + default: 1 +""" + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + os6_command: + commands: show version + - name: run show version and check to see if output contains Dell + os6_command: + commands: show version + wait_for: result[0] contains Dell + - name: run multiple commands on remote nodes + os6_command: + commands: + - show version + - show interfaces + - name: run multiple commands and evaluate the output + os6_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains Dell + - result[1] contains Access +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" + +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import run_commands +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_argument_spec, check_args +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + elif item['command'].startswith('conf'): + module.fail_json( + msg='os6_command does not support running config mode ' + 'commands. Please use os6_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', elements='str'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(os6_argument_spec) + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os6/plugins/modules/os6_config.py b/ansible_collections/dellemc/os6/plugins/modules/os6_config.py new file mode 100644 index 00000000..b4321e9f --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/modules/os6_config.py @@ -0,0 +1,410 @@ +#!/usr/bin/python +# +# (c) 2020 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: os6_config +author: "Abirami N (@abirami-n)" +short_description: Manage Dell EMC OS6 configuration sections +description: + - OS6 configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with OS6 configuration sections in + a deterministic way. +extends_documentation_fragment: dellemc.os6.os6 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. This argument is mutually exclusive with I(src). + type: list + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + type: list + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is + mutually exclusive with I(lines). + type: path + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + type: list + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + type: list + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + type: str + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + type: str + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When you set this argument to + I(merge), the configuration changes merge with the current + device running configuration. When you set this argument to I(check) + the configuration updates are determined but not actually configured + on the remote device. + type: str + default: merge + choices: ['merge', 'check'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + type: bool + default: 'no' + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + type: str + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + type: str + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +""" + +EXAMPLES = """ +- os6_config: + lines: ['hostname {{ inventory_hostname }}'] +- os6_config: + lines: + - 10 permit ip 1.1.1.1 any log + - 20 permit ip 2.2.2.2 any log + - 30 permit ip 3.3.3.3 any log + - 40 permit ip 4.4.4.4 any log + - 50 permit ip 5.5.5.5 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + match: exact +- os6_config: + lines: + - 10 permit ip 1.1.1.1 any log + - 20 permit ip 2.2.2.2 any log + - 30 permit ip 3.3.3.3 any log + - 40 permit ip 4.4.4.4 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + replace: block +- os6_config: + lines: ['hostname {{ inventory_hostname }}'] + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device. + returned: always + type: list + sample: ['interface Te1/0/1', 'no shutdown', 'exit'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['interface Te1/0/1', 'no shutdown', 'exit'] +saved: + description: Returns whether the configuration is saved to the startup + configuration or not. + returned: When not check_mode. + type: bool + sample: True +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/os6_config.2017-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import get_config, get_sublevel_config, NetworkConfig +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_argument_spec, check_args +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import load_config, run_commands +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import WARNING_PROMPTS_RE +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import dumps +import re +from ansible.module_utils.six import iteritems +from ansible.module_utils.connection import exec_command +from ansible.module_utils._text import to_bytes + + +def get_candidate(module): + candidate = NetworkConfig(indent=0) + banners = {} + if module.params['src']: + src, banners = extract_banners(module.params['src']) + candidate.load(src) + elif module.params['lines']: + parents = module.params['parents'] or list() + commands = module.params['lines'][0] + if (isinstance(commands, dict)) and (isinstance(commands['command'], list)): + candidate.add(commands['command'], parents=parents) + elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)): + candidate.add([commands['command']], parents=parents) + else: + lines, banners = extract_banners(module.params['lines']) + candidate.add(lines, parents=parents) + return candidate, banners + + +def extract_banners(config): + flag = False + if isinstance(config, list): + str1 = "\n" + config = str1.join(config) + flag = True + banners = {} + banner_cmds = re.findall(r'^banner (\w+)', config, re.M) + for cmd in banner_cmds: + regex = r'banner %s \"(.+?)\".*' % cmd + match = re.search(regex, config, re.S) + if match: + key = 'banner %s' % cmd + banners[key] = match.group(1).strip() + + for cmd in banner_cmds: + regex = r'banner %s \"(.+?)\".*' % cmd + match = re.search(regex, config, re.S) + if match: + config = config.replace(str(match.group(1)), '') + config = re.sub(r'banner \w+ \"\"', '', config) + if flag: + config = config.split("\n") + return (config, banners) + + +def diff_banners(want, have): + candidate = {} + for key, value in iteritems(want): + if value != have.get(key): + candidate[key] = value + return candidate + + +def get_running_config(module): + contents = module.params['config'] + if not contents: + contents = get_config(module) + contents, banners = extract_banners(contents) + return contents, banners + + +def load_banners(module, banners): + result_banners = [] + exec_command(module, 'configure terminal') + for each in banners: + delimiter = '"' + cmdline = "" + for key, value in each.items(): + cmdline = key + " " + delimiter + value + delimiter + for cmd in cmdline.split("\n"): + rc, out, err = exec_command(module, module.jsonify({'command': cmd, 'sendonly': True})) + result_banners.append(cmdline) + exec_command(module, 'end') + return result_banners + + +def main(): + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + src=dict(type='path'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', + choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + update=dict(choices=['merge', 'check'], default='merge'), + save=dict(type='bool', default=False), + config=dict(), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec) + ) + + argument_spec.update(os6_argument_spec) + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + parents = module.params['parents'] or list() + + match = module.params['match'] + replace = module.params['replace'] + + warnings = list() + check_args(module, warnings) + result = dict(changed=False, saved=False, warnings=warnings) + + candidate, want_banners = get_candidate(module) + if module.params['backup']: + if not module.check_mode: + result['__backup__'] = get_config(module) + + commands = list() + if any((module.params['lines'], module.params['src'])): + if match != 'none': + config, have_banners = get_running_config(module) + config = NetworkConfig(contents=config, indent=0) + if parents: + config = get_sublevel_config(config, module) + configobjs = candidate.difference(config, match=match, replace=replace) + else: + configobjs = candidate.items + have_banners = {} + diffbanners = diff_banners(want_banners, have_banners) + banners = list() + if diffbanners: + banners.append(diffbanners) + if configobjs or banners: + commands = dumps(configobjs, 'commands') + if ((isinstance(module.params['lines'], list)) and + (isinstance(module.params['lines'][0], dict)) and + set(['prompt', 'answer']).issubset(module.params['lines'][0])): + cmd = {'command': commands, + 'prompt': module.params['lines'][0]['prompt'], + 'answer': module.params['lines'][0]['answer']} + commands = [module.jsonify(cmd)] + else: + if commands: + commands = commands.split('\n') + + if module.params['before']: + commands[:0], before_banners = extract_banners(module.params['before']) + if before_banners: + banners.insert(0, before_banners) + + if module.params['after']: + commands_after, after_banners = extract_banners(module.params['after']) + commands.extend(commands_after) + if after_banners: + banners.insert(len(banners), after_banners) + + if not module.check_mode and module.params['update'] == 'merge': + if commands: + load_config(module, commands) + if banners: + result_banners = load_banners(module, banners) + else: + result_banners = [] + + result['changed'] = True + result['commands'] = commands + result['updates'] = commands if commands else [] + result['banners'] = result_banners + if result['banners']: + result['updates'].extend(result_banners) + + if module.params['save']: + result['changed'] = True + if not module.check_mode: + cmd = {'command': 'copy running-config startup-config', + 'prompt': r'\(y/n\)\s?$', 'answer': 'y'} + run_commands(module, [cmd]) + result['saved'] = True + else: + module.warn('Skipping command `copy running-config startup-config`' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os6/plugins/modules/os6_facts.py b/ansible_collections/dellemc/os6/plugins/modules/os6_facts.py new file mode 100644 index 00000000..ce439ad4 --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/modules/os6_facts.py @@ -0,0 +1,478 @@ +#!/usr/bin/python +# +# (c) 2020 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: os6_facts +author: "Abirami N (@abirami-n)" +short_description: Collect facts from devices running Dell EMC OS6 +description: + - Collects a base set of device facts from a remote device that + is running OS6. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: dellemc.os6.os6 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + type: list + default: [ '!config' ] +""" + +EXAMPLES = """ +# Collect all facts from the device +- os6_facts: + gather_subset: all +# Collect only the config and default facts +- os6_facts: + gather_subset: + - config +# Do not collect hardware facts +- os6_facts: + gather_subset: + - "!interfaces" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device. + returned: always. + type: list +# default +ansible_net_model: + description: The model name returned from the device. + returned: always. + type: str +ansible_net_serialnum: + description: The serial number of the remote device. + returned: always. + type: str +ansible_net_version: + description: The operating system version running on the remote device. + returned: always. + type: str +ansible_net_hostname: + description: The configured hostname of the device. + returned: always. + type: str +ansible_net_image: + description: The image file that the device is running. + returned: always + type: str +# hardware +ansible_net_memfree_mb: + description: The available free memory on the remote device in MB. + returned: When hardware is configured. + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in MB. + returned: When hardware is configured. + type: int +# config +ansible_net_config: + description: The current active config from the device. + returned: When config is configured. + type: str +# interfaces +ansible_net_interfaces: + description: A hash of all interfaces running on the system. + returned: When interfaces is configured. + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device. + returned: When interfaces is configured. + type: dict +""" +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import run_commands +from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_argument_spec, check_args +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version', + 'show running-config | include hostname' + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + hdata = self.responses[1] + self.facts['hostname'] = self.parse_hostname(hdata) + + def parse_version(self, data): + facts = dict() + match = re.search(r'HW Version(.+)\s(\d+)', data) + temp, temp_next = data.split('---- ----------- ----------- -------------- --------------') + for en in temp_next.splitlines(): + if en == '': + continue + match_image = re.search(r'^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', en) + version = match_image.group(4) + facts["Version"] = list() + fact = dict() + fact['HW Version'] = match.group(2) + fact['SW Version'] = match_image.group(4) + facts["Version"].append(fact) + return facts + + def parse_hostname(self, data): + match = re.search(r'\S+\s(\S+)', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'System Model ID(.+)\s([-A-Z0-9]*)\n', data, re.M) + if match: + return match.group(2) + + def parse_image(self, data): + match = re.search(r'Image File(.+)\s([A-Z0-9a-z_.]*)\n', data) + if match: + return match.group(2) + + def parse_serialnum(self, data): + match = re.search(r'Serial Number(.+)\s([A-Z0-9]*)\n', data) + if match: + return match.group(2) + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show memory cpu' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + match = re.findall(r'\s(\d+)\s', data) + if match: + self.facts['memtotal_mb'] = int(match[0]) // 1024 + self.facts['memfree_mb'] = int(match[1]) // 1024 + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + self.facts['config'] = self.responses[0] + + +class Interfaces(FactsBase): + COMMANDS = [ + 'show interfaces', + 'show interfaces status', + 'show interfaces transceiver properties', + 'show ip int', + 'show lldp', + 'show lldp remote-device all', + 'show version' + ] + + def populate(self): + vlan_info = dict() + super(Interfaces, self).populate() + data = self.responses[0] + interfaces = self.parse_interfaces(data) + desc = self.responses[1] + properties = self.responses[2] + vlan = self.responses[3] + version_info = self.responses[6] + vlan_info = self.parse_vlan(vlan, version_info) + self.facts['interfaces'] = self.populate_interfaces(interfaces, desc, properties) + self.facts['interfaces'].update(vlan_info) + if 'LLDP is not enabled' not in self.responses[4]: + neighbors = self.responses[5] + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def parse_vlan(self, vlan, version_info): + facts = dict() + if "N11" in version_info: + match = re.search(r'IP Address(.+)\s([0-9.]*)\n', vlan) + mask = re.search(r'Subnet Mask(.+)\s([0-9.]*)\n', vlan) + vlan_id_match = re.search(r'Management VLAN ID(.+)\s(\d+)', vlan) + vlan_id = "Vl" + vlan_id_match.group(2) + if vlan_id not in facts: + facts[vlan_id] = list() + fact = dict() + fact['address'] = match.group(2) + fact['masklen'] = mask.group(2) + facts[vlan_id].append(fact) + else: + vlan_info, vlan_info_next = vlan.split('---------- ----- --------------- --------------- -------') + for en in vlan_info_next.splitlines(): + if en == '': + continue + match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en) + intf = match.group(1) + if intf not in facts: + facts[intf] = list() + fact = dict() + matc = re.search(r'^([\w+\s\d]*)\s+(\S+)\s+(\S+)', en) + fact['address'] = matc.group(2) + fact['masklen'] = matc.group(3) + facts[intf].append(fact) + return facts + + def populate_interfaces(self, interfaces, desc, properties): + facts = dict() + for key, value in interfaces.items(): + intf = dict() + intf['description'] = self.parse_description(key, desc) + intf['macaddress'] = self.parse_macaddress(value) + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['mediatype'] = self.parse_mediatype(key, properties) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(key, properties) + facts[key] = intf + return facts + + def parse_neighbors(self, neighbors): + facts = dict() + neighbor, neighbor_next = neighbors.split('--------- ------- ------------------- ----------------- -----------------') + for en in neighbor_next.splitlines(): + if en == '': + continue + intf = self.parse_lldp_intf(en.split()[0]) + if intf not in facts: + facts[intf] = list() + fact = dict() + if len(en.split()) > 2: + fact['port'] = self.parse_lldp_port(en.split()[3]) + if (len(en.split()) > 4): + fact['host'] = self.parse_lldp_host(en.split()[4]) + else: + fact['host'] = "Null" + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + for line in data.split('\n'): + if len(line) == 0: + continue + match = re.match(r'Interface Name(.+)\s([A-Za-z0-9/]*)', line, re.IGNORECASE) + if match: + key = match.group(2) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_description(self, key, desc): + desc_val, desc_info = "", "" + desc = re.split(r'[-+\s](?:-+\s)[-+\s].*', desc) + for desc_val in desc: + if desc_val: + for en in desc_val.splitlines(): + if key in en: + match = re.search(r'^(\S+)\s+(\S+)', en) + if match.group(2) in ['Full', 'N/A']: + return "Null" + else: + return match.group(2) + + def parse_macaddress(self, data): + match = re.search(r'Burned In MAC Address(.+)\s([A-Z0-9.]*)\n', data) + if match: + return match.group(2) + + def parse_mtu(self, data): + match = re.search(r'MTU Size(.+)\s(\d+)\n', data) + if match: + return int(match.group(2)) + + def parse_bandwidth(self, data): + match = re.search(r'Port Speed\s*[:\s\.]+\s(\d+)\n', data) + if match: + return int(match.group(1)) + + def parse_duplex(self, data): + match = re.search(r'Port Mode\s([A-Za-z]*)(.+)\s([A-Za-z/]*)\n', data) + if match: + return match.group(3) + + def parse_mediatype(self, key, properties): + mediatype, mediatype_next = properties.split('--------- ------- --------------------- --------------------- --------------') + flag = 1 + for en in mediatype_next.splitlines(): + if key in en: + flag = 0 + match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en) + if match: + strval = match.group(3) + return strval + if flag == 1: + return "null" + + def parse_type(self, key, properties): + type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------') + flag = 1 + for en in type_val_next.splitlines(): + if key in en: + flag = 0 + match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en) + if match: + strval = match.group(2) + return strval + if flag == 1: + return "null" + + def parse_lineprotocol(self, data): + data = data.splitlines() + for d in data: + match = re.search(r'^Link Status\s*[:\s\.]+\s(\S+)', d) + if match: + return match.group(1) + + def parse_operstatus(self, data): + data = data.splitlines() + for d in data: + match = re.search(r'^Link Status\s*[:\s\.]+\s(\S+)', d) + if match: + return match.group(1) + + def parse_lldp_intf(self, data): + match = re.search(r'^([A-Za-z0-9/]*)', data) + if match: + return match.group(1) + + def parse_lldp_host(self, data): + match = re.search(r'^([A-Za-z0-9-]*)', data) + if match: + return match.group(1) + + def parse_lldp_port(self, data): + match = re.search(r'^([A-Za-z0-9/]*)', data) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + argument_spec.update(os6_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + check_args(module, warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os6/plugins/terminal/__init__.py b/ansible_collections/dellemc/os6/plugins/terminal/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os6/plugins/terminal/os6.py b/ansible_collections/dellemc/os6/plugins/terminal/os6.py new file mode 100644 index 00000000..b1199552 --- /dev/null +++ b/ansible_collections/dellemc/os6/plugins/terminal/os6.py @@ -0,0 +1,95 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import json + +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), + re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Bad secret"), + re.compile(br"(\bInterface is part of a port-channel\b)"), + re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"\bParameter length should be exactly 32 characters\b"), + re.compile(br"'[^']' +returned error code: ?\d+"), + re.compile(br"Invalid|invalid.*$", re.I), + re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I), + re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)|(\bDeactivate\b)).*", re.I), + + ] + + terminal_initial_prompt = br"\(y/n\)" + + terminal_initial_answer = b"y" + + terminal_inital_prompt_newline = False + + def on_open_shell(self): + try: + if self._get_prompt().endswith(b'#'): + self._exec_cli_command(b'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict') + cmd[u'answer'] = passwd + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to elevate privilege to enable mode') + # in os6 the terminal settings are accepted after the privilege mode + try: + self._exec_cli_command(b'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if prompt.strip().endswith(b')#'): + self._exec_cli_command(b'end') + self._exec_cli_command(b'disable') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'disable') diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE b/ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/README.md b/ansible_collections/dellemc/os6/roles/os6_aaa/README.md new file mode 100644 index 00000000..1e142f37 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/README.md @@ -0,0 +1,210 @@ +AAA role +======= + +This role facilitates the configuration of authentication, authorization, and acccounting (AAA), and supports the configuration of RADIUS and TACACS servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The AAA role requires an SSH connection for connectivity to Dell EMC OS6. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as the value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_aaa keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os6 | +| ``radius_server.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os6 | +| ``radius_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *radius_server.key* is 7 or 0 | os6 | +| ``radius_server.retransmit`` | integer | Configures the number of retransmissions; field to be left blank to remove the retransimission configuration for RADIUS server authentication | os6 | +| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions, timeout must be an integer 1 and 30; field needs to be left blank to remove the timeout configurations for RADIUS server authentication | os6 | +| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os6 | +| ``host.ip`` | string | Configures the RADIUS server host address | os6 | +| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os6 | +| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 | +| ``host.retransmit`` | integer | Configures the number of retransmissions | os6 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 | +| ``host.timeout`` | integer | Configures timeout for retransmissions | os6 | +| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os6 | +| ``radius_server.acct`` | dictionary | Configures the RADIUS server acct (see ``host.*``) | os6 | +| ``acct.ip`` | string | Configures the RADIUS server acct address | os6 | +| ``acct.key`` | string (required); 0,7,LINE | Configures the authentication key | os6 | +| ``acct.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 | +| ``acct.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 | +| ``acct.state`` | string: present,absent | Removes the RADIUS server acct if set to absent | os6 | +| ``radius_server.auth`` | dictionary | Configures the RADIUS server auth (see ``auth.*``) | os6 | +| ``auth.ip`` | string | Configures the RADIUS server host address | os6 | +| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os6 | +| ``auth.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 | +| ``auth.name`` | string (required) | Configures the auth name of the RADIUS servers | os6 | +| ``auth.usage`` | string (required) | Configures the usage type of the RADIUS servers | os6 | +| ``auth.priority`` | integer | Configures the number of priority | os6 | +| ``auth.retransmit`` | integer | Configures the number of retransmissions | os6 | +| ``auth.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 | +| ``auth.timeout`` | integer | Configures timeout for retransmissions | os6 | +| ``auth.deadtime`` | integer | Configures the number of deadtime | os6 | +| ``auth.attribute`` | dictionary | Configures the RADIUS server auth (see ``attribute.*``) | os6 | +| ``attribute.id`` | integer | Configures the RADIUS server attribute ID (see ``attribute.*``) | os6 | +| ``attribute.type`` | integer | Configures the RADIUS server attribute type based on ID | os6 | +| ``attribute.state`` | string: present,absent | Removes the RADIUS server attribute if set to absent | os6 | +| ``auth.state`` | string: present,absent | Removes the radius server auth if set to absent | os6 | +| ``radius_server.attribute`` | dictionary | Configures the RADIUS server auth (see ``attribute.*``) | os6 | +| ``attribute.id`` | integer | Configures the RADIUS server attribute ID (see ``attribute.*``) | os6 | +| ``attribute.type`` | integer | Configures the RADIUS server attribute type based on ID | os6 | +| ``attribute.state`` | string: present,absent | Removes the RADIUS server attribute if set to absent | os6 | +| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``)| os6 | +| ``tacacs_server.key`` | string (required): 0,7,LINE | Configures the authentication key for TACACS server | os6 | +| ``tacacs_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *tacacs_server.key* is 7 or 0 | os6 | +| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os6 | +| ``host.ip`` | string | Configures the TACACS sever host address | os6 | +| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key | os6 | +| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 | +| ``host.timeout`` | integer | Configures the timeout for retransmissions | os6 | +| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os6 | +| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os6 | +| ``aaa_accounting.dot1x`` | string: none,start-stop,stop-only,wait-start | Configures accounting for dot1x events | os6 | +| ``aaa_authorization`` | dictionary | Configures authorization parameters (see ``aaa_authorization.*``) | os6 | +| ``aaa_authorization.exec`` | list | Configures authorization for EXEC (shell) commands (see ``exec.*``) | os6 | +| ``exec.authorization_list_name`` | string | Configures named authorization list for EXEC commands | os6 | +| ``exec.authorization_method`` | string: none | Configures no authorization of EXEC commands | os6 | +| ``exec.use_data`` | string: local,tacacs, radius | Configures data used for authorization | os6 | +| ``exec.state`` | string: present,absent | Removes the named authorization list for the EXEC commands if set to absent | os6 | +| ``aaa_authorization.network`` | string: none,radius,ias | Configures authorization for network events | os6 | +| ``aaa_authentication.auth_list`` | list | Configures named authentication list for hosts (see ``host.*``) | os6 | +| ``auth_list.name`` | string | Configures named authentication list | os6 | +| ``auth_list.login_or_enable`` | string: enable,login | Configures authentication list for login or enable | os6 | +| ``auth_list.server`` | string: radius,tacacs | Configures AAA to use this list of all server hosts | os6 | +| ``auth_list.use_password`` | string: line,local,enable,none | Configures password to use for authentication | os6 | +| ``auth_list.state`` | string: present,absent | Removes the named authentication list if set to absent | os6 | +| ``aaa_authentication.dot1x`` | string: none,radius,ias | Configures authentication for dot1x events | os6 | +| ``aaa_server`` | dictionary | Configures the AAA server (see ``aaa_server.*``) | os6 | +| ``radius`` | dictionary | Configures the RADIUS server (see ``radius.*``) | os6 | +| ``dynamic_author`` | dictionary | Configures the RADIUS server (see ``dynamic_author.*``) | os6 | +| ``dynamic_author.auth_type`` | string | Configures the authentication type for the radius server | os6 | +| ``dynamic_author.client`` | list | Configures the client for the RADIUS server | os6 | +| ``client.ip`` | string | Configures the client IP for the radius server | os6 | +| ``client.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os6 | +| ``client.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *radius_server.key* is 7 or 0 | os6 | +| ``client.state`` | string: present,absent | Removes the accounting of client if set to absent | os6 | +| ``dynamic_author.state`` | string: present,absent | Removes the accounting of client if set to absent | os6 | +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. | +| ``ansible_network_os`` | yes | os6, null\* | This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_aaa* role to configure AAA for RADIUS and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os6_aaa* role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_aaa: + radius_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fb + retransmit: 5 + timeout: 25 + host: + - ip: 10.0.0.1 + key: 0 + key_string: aaa + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + tacacs_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa + host: + - ip: 10.0.0.50 + key: 0 + key_string: aaa + auth_port: 3 + timeout: 2 + state: present + aaa_accounting: + dot1x: none + aaa_authorization: + exec: + - authorization_list_name: aaa + authorization_method: none + use_data: local + state: present + network: radius + aaa_authentication: + auth_list: + - name: default + login_or_enable: login + server: radius + use_password: local + state: present + - name: console + server: tacacs + login_or_enable: login + use_password: local + state: present + aaa_server: + radius: + dynamic_author: + auth_type: + client: + - ip: 10.0.0.1 + key: 0 + key_string: aaskjsksdkjsdda + state: present + - ip: 10.0.0.2 + key: + key_string: aaskjsksdkjsdda + state: present + state: present + + + +**Simple playbook to setup system — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_aaa + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml new file mode 100644 index 00000000..40a48c74 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# defaults file for dellemc.os6.os6_aaa +attribute_type: + mandatory: mandatory + on_for_login_auth: on-for-login-auth + include_in_access_req: include-in-access-req + mac: "mac format" + mac_ietf: "mac format ietf" + mac_ietf_lower_case: "mac format ietf lower-case" + mac_ietf_upper_case: "mac format ietf upper-case" + mac_legacy: "mac format legacy" + mac_legacy_lower_case: "mac format legacy lower-case" + mac_legacy_upper_case: "mac format legacy upper-case" + mac_unformatted: "mac format unformatted" + mac_unformatted_lower_case: "mac format unformatted lower-case" + mac_unformatted_upper_case: "mac format unformatted upper-case" \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml new file mode 100644 index 00000000..a0318e7f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_aaa diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml new file mode 100644 index 00000000..5d089cb2 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os6_aaa role facilitates the configuration of Authentication Authorization Acccounting (AAA) attributes + in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml new file mode 100644 index 00000000..36a416e3 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating AAA configuration for os6" + template: + src: os6_aaa.j2 + dest: "{{ build_dir }}/aaa6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning AAA configuration for os6" + dellemc.os6.os6_config: + src: os6_aaa.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2 b/ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2 new file mode 100644 index 00000000..54188456 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2 @@ -0,0 +1,437 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# + +Purpose: +Configure AAA commands for os6 Devices + +os6_aaa: + tacacs_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa + timeout: 10 + host: + - ip: 10.0.0.50 + key: 0 + key_string: aaa + port: 3 + timeout: 2 + state: present + radius_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fb + retransmit: 5 + timeout: 10 + deadtime: 2000 + host: + - ip: 10.0.0.1 + key: 0 + key_string: aaa + name: radius + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + acct: + - ip: 10.0.0.1 + key: 0 + key_string: aasdvsdvfssfsfa + name: radius + auth-port: 2 + state: present + auth: + - ip: 10.0.0.2 + key: 0 + key_string: asdnksfnfnksnddjknsdn + name: radius + usage: all + priority: 2 + timeout: 2 + retransmit: 5 + auth_port: 3 + deadtime: 6 + attribute: + - id: 6 + type: {{attribute_type.mandatory}} + state: present + - id: 31 + type: {{attribute_type.mac_ietf_lower_case}} + state: present + state: present + attribute: + - id: 6 + type: {{attribute_type.mandatory}} + state: present + - id: 31 + type: {{attribute_type.mac_ietf_lower_case}} + state: present + + + aaa_authentication: + auth_list: + - name: default + login_or_enable: login + server: tacacs + use_password: local + state: present + - name: console + server: radius + login_or_enable: login + use_password: local + dot1x: none + aaa_authorization: + exec: + - authorization_list_name: aaa + authorization_method: none + use_data: local + state: present + network: radius + aaa_accounting: + dot1x: none + aaa_server: + radius: + dynamic_author: + auth_type: all + client: + - ip: 10.0.0.1 + key: 0 + key_string: aaa + state: present + state: present + +##################################################} +{% if os6_aaa is defined and os6_aaa %} +{% for key in os6_aaa.keys() %} + {% set aaa_vars = os6_aaa[key] %} + {% if key == "tacacs_server" %} + {% set server = "tacacs-server" %} + {% endif %} + {% if key == "radius_server" %} + {% set server = "radius server" %} + {% endif %} + {% if server is defined and server %} + {% if aaa_vars %} + {% set item = aaa_vars %} + {% if item.timeout is defined %} + {% if item.timeout %} +{{ server }} timeout {{ item.timeout }} + {% else %} +no {{ server }} timeout + {% endif %} + {% endif %} + {% if item.retransmit is defined and server == "radius server" %} + {% if item.retransmit %} +{{ server }} retransmit {{ item.retransmit }} + {% else %} +no {{ server }} retransmit + {% endif %} + {% endif %} + {% if item.deadtime is defined and server == "radius server" %} + {% if item.deadtime %} +{{ server }} deadtime {{ item.deadtime }} + {% else %} +no {{ server }} deadtime + {% endif %} + {% endif %} + {% if item.key is defined %} + {% if item.key == 0 or item.key == 7 %} + {% if item.key_string is defined and item.key_string%} +{{ server }} key {{ item.key }} {{ item.key_string }} + {% endif %} + {% elif item.key %} +{{ server }} key {{ item.key }} + {% else %} +no {{ server }} key + {% endif %} + {% endif %} + {% if item.host is defined and item.host %} + {% for hostlist in item.host %} + {% if hostlist.ip is defined and hostlist.ip %} + {% if hostlist.state is defined and hostlist.state == "absent" %} + {% if server == "tacacs-server" %} +no {{ server }} host {{ hostlist.ip }} + {% else %} +no {{ server }} {{ hostlist.ip }} + {% endif %} + {% else %} + {% if server == "tacacs-server" %} +{{ server }} host {{ hostlist.ip }} + {% if (hostlist.key is defined) %} + {% if hostlist.key == 0 or hostlist.key == 7 %} + {% if hostlist.key_string is defined and hostlist.key_string %} +key {{ hostlist.key }} {{ hostlist.key_string }} + {% endif %} + {% elif hostlist.key %} +key {{ hostlist.key }} + {% else %} +no key + {% endif %} + {% endif %} + {% if (hostlist.timeout is defined and hostlist.timeout) %} +timeout {{ hostlist.timeout }} + {% endif %} + {% if (hostlist.auth_port is defined and hostlist.auth_port) %} +port {{ hostlist.auth_port }} + {% endif %} +exit + {% elif server == "radius server" %} +{{ server }} {{ hostlist.ip }} + {% if (hostlist.key is defined) %} + {% if hostlist.key == 0 or hostlist.key == 7 %} + {% if hostlist.key_string is defined and hostlist.key_string %} +key {{ hostlist.key }} {{ hostlist.key_string }} + {% endif %} + {% elif hostlist.key %} +key {{ hostlist.key }} + {% else %} +no key + {% endif %} + {% endif %} + {% if (hostlist.name is defined) %} + {% if (hostlist.name) %} +name "{{ hostlist.name }}" + {% else %} +no name + {% endif %} + {% endif %} + {% if (hostlist.timeout is defined and hostlist.timeout) %} +timeout {{ hostlist.timeout }} + {% endif %} + {% if (hostlist.auth_port is defined and hostlist.auth_port) %} +auth-port {{ hostlist.auth_port }} + {% endif %} +exit + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if item.acct is defined and item.acct and server == "radius server" %} + {% for acctlist in item.acct %} + {% if acctlist.ip is defined and acctlist.ip %} + {% if acctlist.state is defined and acctlist.state == "absent" %} +no {{ server }} acct {{ acctlist.ip }} + {% else %} +{{ server }} acct {{ acctlist.ip }} + {% if (acctlist.key is defined) %} + {% if acctlist.key == 0 or acctlist.key == 7 %} + {% if acctlist.key_string is defined and acctlist.key_string %} +key {{ acctlist.key }} {{ acctlist.key_string }} + {% endif %} + {% elif acctlist.key %} +key {{ acctlist.key }} + {% else %} +no key + {% endif %} + {% endif %} + {% if (acctlist.name is defined) %} + {% if (acctlist.name) %} +name "{{ acctlist.name }}" + {% else %} +no name + {% endif %} + {% endif %} + {% if (acctlist.auth_port is defined and acctlist.auth_port) %} +acct-port {{ acctlist.auth_port }} + {% endif %} +exit + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if item.auth is defined and item.auth and server == "radius server" %} + {% for authlist in item.auth %} + {% if authlist.ip is defined and authlist.ip %} + {% if authlist.state is defined and authlist.state == "absent" %} +no {{ server }} auth {{ authlist.ip }} + {% else %} +{{ server }} auth {{ authlist.ip }} + {% if (authlist.key is defined) %} + {% if authlist.key == 0 or authlist.key == 7 %} + {% if authlist.key_string is defined and authlist.key_string %} +key {{ authlist.key }} {{ authlist.key_string }} + {% endif %} + {% elif authlist.key %} +key {{ authlist.key }} + {% else %} +no key + {% endif %} + {% endif %} + {% if (authlist.name is defined) %} + {% if (authlist.name) %} +name "{{ authlist.name }}" + {% else %} +no name + {% endif %} + {% endif %} + {% if (authlist.auth_port is defined and authlist.auth_port) %} +auth-port {{ authlist.auth_port }} + {% endif %} + {% if (authlist.priority is defined and authlist.priority) %} +priority {{ authlist.priority }} + {% endif %} + {% if (authlist.timeout is defined and authlist.timeout) %} +timeout {{ authlist.timeout }} + {% endif %} + {% if (authlist.retransmit is defined and authlist.retransmit) %} +retransmit {{ authlist.retransmit }} + {% endif %} + {% if (authlist.deadtime is defined and authlist.deadtime) %} +deadtime {{ authlist.deadtime }} + {% endif %} + {% if (authlist.usage is defined and authlist.usage) %} +usage {{ authlist.usage }} + {% endif %} + {% if authlist.attribute is defined and authlist.attribute and server == "radius server" %} + {% for attributelist in authlist.attribute %} + {% if attributelist.id is defined and attributelist.id and attributelist.type is defined %} + {% if attributelist.state is defined and attributelist.state == "absent" %} +no attribute {{ attributelist.id }} {{ attributelist.type }} + {% else %} + {% if attributelist.type %} +attribute {{ attributelist.id }} {{ attributelist.type }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +exit + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if item.attribute is defined and item.attribute and server == "radius server" %} + {% for attributelist in item.attribute %} + {% if attributelist.id is defined and attributelist.id and attributelist.type is defined %} + {% if attributelist.state is defined and attributelist.state == "absent" %} +no {{ server }} attribute {{ attributelist.id }} {{ attributelist.type }} + {% else %} + {% if attributelist.type %} +{{ server }} attribute {{ attributelist.id }} {{ attributelist.type }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} +{% endfor %} + {% if os6_aaa.aaa_authentication is defined and os6_aaa.aaa_authentication %} + {% if os6_aaa.aaa_authentication.auth_list is defined and os6_aaa.aaa_authentication.auth_list %} + {% for auth_list in os6_aaa.aaa_authentication.auth_list %} + {% if auth_list.login_or_enable is defined and auth_list.login_or_enable %} + {% if auth_list.name is defined and auth_list.name %} + {% if auth_list.state is defined and auth_list.state == "absent" %} +no aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} + {% else %} + {% if auth_list.server is defined and auth_list.server %} + {% if auth_list.use_password is defined and auth_list.use_password %} +aaa authentication {{ auth_list.login_or_enable }} "{{ auth_list.name }}" {{ auth_list.server }} {{ auth_list.use_password }} + {% else %} +aaa authentication {{ auth_list.login_or_enable }} "{{ auth_list.name }}" {{ auth_list.server }} + {% endif %} + {% else %} + {% if auth_list.use_password is defined and auth_list.use_password %} +aaa authentication {{ auth_list.login_or_enable }} "{{ auth_list.name }}" {{ auth_list.use_password }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if os6_aaa.aaa_authentication.dot1x is defined %} + {% set aaa_authentication = os6_aaa.aaa_authentication %} + {% if aaa_authentication.dot1x == "none" or aaa_authentication.dot1x == "radius" or aaa_authentication.dot1x == "ias" %} +aaa authentication dot1x default {{ aaa_authentication.dot1x }} + {% else %} +no aaa authentication dot1x default + {% endif %} + {% endif %} + {% endif %} + {% if os6_aaa.aaa_authorization is defined and os6_aaa.aaa_authorization %} + {% set aaa_authorization = os6_aaa.aaa_authorization %} + {% if aaa_authorization.exec is defined and aaa_authorization.exec %} + {% for command in aaa_authorization.exec %} + {% if command.authorization_list_name is defined and command.authorization_list_name %} + {% if command.state is defined and command.state == "absent" %} +no aaa authorization exec {{ command.authorization_list_name }} + {% else %} + {% if command.use_data is defined and command.use_data %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization exec "{{ command.authorization_list_name }}" {{ command.use_data }} {{ command.authorization_method }} + {% else %} +aaa authorization exec "{{ command.authorization_list_name }}" {{ command.use_data }} + {% endif %} + {% else %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization exec "{{ command.authorization_list_name }}" {{ command.authorization_method }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if os6_aaa.aaa_authorization.network is defined %} + {% set aaa_authorization = os6_aaa.aaa_authorization %} + {% if aaa_authorization.network %} +aaa authorization network default {{ aaa_authorization.network }} + {% else %} +no aaa authorization network default radius + {% endif %} + {% endif %} + {% endif %} + {% if os6_aaa.aaa_accounting is defined and os6_aaa.aaa_accounting %} + {% set aaa_accounting = os6_aaa.aaa_accounting %} + {% if aaa_accounting.dot1x is defined %} + {% if aaa_accounting.dot1x == "none" %} +aaa accounting dot1x default none + {% elif aaa_accounting.dot1x %} +aaa accounting dot1x default {{ aaa_accounting.dot1x }} radius + {% else %} +no aaa accounting dot1x default + {% endif %} + {% endif %} + {% endif %} + {% if os6_aaa.aaa_server is defined and os6_aaa.aaa_server %} + {% set aaa_server = os6_aaa.aaa_server %} + {% if aaa_server.radius is defined and aaa_server.radius %} + {% if aaa_server.radius.dynamic_author is defined and aaa_server.radius.dynamic_author %} + {% set dynamic_author = aaa_server.radius.dynamic_author %} + {% if dynamic_author.state is defined %} + {% if dynamic_author.state == "absent" %} +no aaa server radius dynamic-author + {% else %} +aaa server radius dynamic-author + {% if dynamic_author.client is defined and dynamic_author.client %} + {% for client in dynamic_author.client %} + {% if ((client.state is defined and client.state) and (client.ip is defined and client.ip)) %} + {% if client.state == "absent" %} +no client {{ client.ip }} + {% else %} + {% if client.key is defined and (client.key == 0 or client.key == 7) %} + {% if client.key_string is defined and client.key_string %} +client {{ client.ip }} server-key {{ client.key }} {{ client.key_string }} + {% endif %} + {% elif client.key_string is defined and client.key_string %} +client {{ client.ip }} server-key {{ client.key_string }} + {% else %} +client {{ client.ip }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if dynamic_author.auth_type is defined %} + {% if dynamic_author.auth_type %} +auth-type {{ dynamic_author.auth_type }} + {% else %} +no auth-type + {% endif %} + {% endif %} +exit + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml new file mode 100644 index 00000000..9fceb389 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml @@ -0,0 +1,111 @@ +--- +# vars file for dellemc.os6.os6_aaa, +# below gives a sample configuration +# Sample variables for OS6 device + +os6_aaa: + tacacs_server: + key: 0 + key_string: aacdsvdfsvfsfvfsv + host: + - ip: 30.0.0.10 + key: + key_string: ahvksjvskjvbkjsd + auth_port: 6 + timeout: 6 + state: absent + radius_server: + key: 0 + key_string: ahvksjvskjvb + retransmit: 4 + timeout: 5 + host: + - ip: 40.0.0.10 + key: 0 + key_string: ahvksjvskjvbkjsd + retransmit: 4 + auth_port: 6 + timeout: 6 + state: absent + acct: + - ip: 10.0.0.1 + key: 0 + key_string: asvkbjfssvfsf + auth_port: 2 + state: present + auth: + - ip: 10.0.0.2 + key: 0 + key_string: asdnksfnfnksnddjknsdn + name: radius + usage: all + priority: 2 + timeout: 2 + retransmit: 5 + auth_port: 3 + deadtime: 6 + attribute: + - id: 6 + type: "{{attribute_type.mandatory}}" + state: present + - id: 31 + type: "{{attribute_type.mac_ietf_lower_case}}" + state: present + - id: 31 + type: "{{attribute_type.mac}}" + state: absent + state: present + attribute: + - id: 6 + type: "{{attribute_type.mandatory}}" + state: present + - id: 31 + type: "{{attribute_type.mac_ietf_lower_case}}" + state: present + + + aaa_authentication: + auth_list: + - name: default + login_or_enable: login + server: tacacs + use_password: local + state: absent + - name: console + server: radius + login_or_enable: login + use_password: local + state: absent + - name: tacp + server: tacacs + login_or_enable: enable + use_password: enable + state: absent + dot1x: none + aaa_authorization: + exec: + - authorization_list_name: aaa + authorization_method: none + use_data: local + state: absent + network: radius + aaa_accounting: + dot1x: none + aaa_server: + radius: + dynamic_author: + auth_type: + client: + - ip: 10.0.0.1 + key: 0 + key_string: aaskjsksdkjsdda + state: present + - ip: 10.0.0.2 + key: + key_string: aaskjsksdkjsdda + state: present + - ip: 10.0.0.3 + key: + key_string: + state: present + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml new file mode 100644 index 00000000..e0cf41fc --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_aaa diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml new file mode 100644 index 00000000..dedb2f7d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_aaa \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/LICENSE b/ansible_collections/dellemc/os6/roles/os6_acl/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/README.md b/ansible_collections/dellemc/os6/roles/os6_acl/README.md new file mode 100644 index 00000000..f8d97abe --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/README.md @@ -0,0 +1,118 @@ +ACL role +======== + +This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The ACL role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_acl keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os6 | +| ``name`` | string (required) | Configures the name of the access-control list | os6 | +| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os6 | +| ``remark.description`` | string | Configures the remark description | os6 | +| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os6 | +| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os6 | +| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os6 | +| ``entries.seq_number`` | integer (required) | Specifies the sequence number of the ACL rule | os6 | +| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os6 | +| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os6 | +| ``entries.match_condition`` | string (required): any/ \/ \/ \/\ | Specifies the command in string format | os6 | +| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os6 | +| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os6 | +| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os6 | +| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os6 | +| ``stage_ingress.seq_number`` | integer | Configure the sequence number (greater than 0) to rank precedence for this interface and direction | os6 | +| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os6 | +| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os6 | +| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os6 | +| ``stage_egress.seq_number`` | integer | Configure the sequence number (greater than 0) to rank precedence for this interface and direction | os6 | +| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os6_acl* role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + os6_acl: + - type: ipv4 + name: ssh-only + remark: + - description: "ipv4remark" + state: present + entries: + - number: 4 + seq_number: 1000 + permit: true + protocol: tcp + match_condition: any any + state: present + stage_ingress: + - name: vlan 30 + state: present + seq_number: 50 + stage_egress: + - name: vlan 40 + state: present + seq_number: 40 + state: present + +**Simple playbook to setup system - switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_acl + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml new file mode 100644 index 00000000..92931d8b --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_acl \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml new file mode 100644 index 00000000..eeab7f6f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_acl \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml new file mode 100644 index 00000000..c7abf91b --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2017-2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_acl role facilitates the configuration of access control list (ACL) attributes in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml new file mode 100644 index 00000000..6ead1fa9 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating ACL configuration for os6" + template: + src: os6_acl.j2 + dest: "{{ build_dir }}/acl6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning ACL configuration for os6" + dellemc.os6.os6_config: + src: os6_acl.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2 b/ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2 new file mode 100644 index 00000000..3d47c2eb --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2 @@ -0,0 +1,202 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure ACL commands for os6 devices + +os6_acl: + - name: macl-implicit + type: mac + remark: + - description: 1 + number: 3 + state: present + entries: + - number: 4 + seq_number: 1000 + permit: false + protocol: + match_condition: any 0000.1F3D.084B 0000.0000.0000 + state: present + - number: 5 + seq_number: 1001 + permit: true + protocol: + match_condition: any any 0x0806 + state: present + - number: 6 + seq_number: 2002 + permit: deny + protocol: + match_condition: any any + state: + stage_ingress: + - name: vlan 30 + state: present + seq_number: 40 + - name: vlan 50 + state: present + seq_number: 50 + stage_egress: + - name: + state: + seq_number: + state: present +#####################################} +{% if os6_acl is defined and os6_acl %} +{% set acl_dict = {} %} + {% for val in os6_acl %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} + {% if val.type is defined and val.type == "ipv4" %} +no ip access-list {{ val.name }} + {% elif val.type is defined and val.type == "ipv6" %} +no ipv6 access-list {{ val.name }} + {% elif val.type is defined and val.type == "mac" %} +no mac access-list extended {{ val.name }} + {% endif %} + {% else %} + {% if val.type is defined and (val.type == "ipv4" or val.type == "ipv6" or val.type == "mac") %} + {% if val.type == "mac" %} +{{ val.type }} access-list extended {{ val.name }} + {% elif val.type == "ipv4" %} +ip access-list {{ val.name }} + {% else %} +{{ val.type }} access-list {{ val.name }} + {% endif %} + {% if val.remark is defined and val.remark %} + {% for remark in val.remark %} + {% if remark.description is defined and remark.description %} + {% if remark.state is defined and remark.state == "absent" %} +no remark {{ remark.description }} + {% else %} +remark {{ remark.description }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if val.entries is defined and val.entries %} + {% for rule in val.entries %} + {% if rule.seq_number is defined and rule.seq_number %} + {% if rule.state is defined and rule.state == "absent" %} +no {{ rule.seq_number }} + {% else %} +{% set seq_num = rule.seq_number %} + {% if rule.permit is defined %} + {% if rule.permit %} + {% set is_permit = "permit" %} + {% else %} + {% set is_permit = "deny" %} + {% endif %} + {% endif %} + {% if rule.protocol is defined and rule.protocol %} + {% set protocol = rule.protocol %} + {% else %} + {% set protocol = "" %} + {% endif %} + {% if rule.protocol is defined and rule.protocol and rule.match_condition is defined and rule.match_condition %} +{{ seq_num }} {{ is_permit }} {{ protocol }} {{ rule.match_condition }} + {% elif rule.protocol is defined and rule.protocol %} +{{ seq_num }} {{ is_permit }} {{ protocol }} + {% elif rule.match_condition is defined and rule.match_condition %} +{{ seq_num }} {{ is_permit }} {{ rule.match_condition }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +exit + {% if val.stage_ingress is defined and val.stage_ingress %} + {% for intf in val.stage_ingress %} + {% if intf.state is defined and intf.state == "absent" %} + {% if intf.name is defined and intf.name %} +{% set key = intf.name %} +{% set key_val_list = acl_dict.setdefault(key, []) %} + {% if val.type is defined and val.type == "mac" %} +{% set value = ("no mac access-group " + val.name + " " + "in") %} +{% set acl_val = acl_dict[key].append(value) %} + {% elif val.type is defined and val.type == "ipv4" %} +{% set value = ("no ip access-group " + val.name + " " + "in") %} +{% set acl_val = acl_dict[key].append(value) %} + {% endif %} + {% endif %} + {% else %} + {% if intf.name is defined and intf.name %} +{% set key = intf.name %} +{% set key_val_list = acl_dict.setdefault(key, []) %} + {% if val.type is defined and val.type == "mac" %} + {% if intf.seq_number is defined and intf.seq_number %} +{% set value1 = intf.seq_number|string %} +{% set value = ("mac access-group " + val.name + " " + "in " + value1) %} +{% set acl_val = acl_dict[key].append(value) %} + {% else %} +{% set value = ("mac access-group " + val.name + " " + "in ") %} +{% set acl_val = acl_dict[key].append(value) %} + {% endif %} + {% elif val.type is defined and val.type == "ipv4" %} + {% if intf.seq_number is defined and intf.seq_number %} +{% set value1 = intf.seq_number|string %} +{% set value = ("ip access-group " + val.name + " " + "in " + value1) %} +{% set acl_val = acl_dict[key].append(value) %} + {% else %} +{% set value = ("ip access-group " + val.name + " " + "in ") %} +{% set acl_val = acl_dict[key].append(value) %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if val.stage_egress is defined and val.stage_egress %} + {% for intf in val.stage_egress %} + {% if intf.state is defined and intf.state == "absent" %} + {% if intf.name is defined and intf.name %} +{% set key = intf.name %} +{% set key_val_list = acl_dict.setdefault(key, []) %} + {% if val.type is defined and val.type == "mac" %} +{% set value = ("no mac access-group " + val.name + " " + "out") %} +{% set acl_val = acl_dict[key].append(value) %} + {% elif val.type is defined and val.type == "ipv4" %} +{% set value = ("no ip access-group " + val.name + " " + "out") %} +{% set acl_val = acl_dict[key].append(value) %} + {% endif %} + {% endif %} + {% else %} + {% if intf.name is defined and intf.name %} +{% set key = intf.name %} +{% set key_val_list = acl_dict.setdefault(key, []) %} + {% if val.type is defined and val.type == "mac" %} + {% if intf.seq_number is defined and intf.seq_number %} +{% set value1 = intf.seq_number|string %} +{% set value = ("mac access-group " + val.name + " " + "out " + value1) %} +{% set acl_val = acl_dict[key].append(value) %} + {% else %} +{% set value = ("mac access-group " + val.name + " " + "out ") %} +{% set acl_val = acl_dict[key].append(value) %} + {% endif %} + {% elif val.type is defined and val.type == "ipv4" %} + {% if intf.seq_number is defined and intf.seq_number %} +{% set value1 = intf.seq_number|string %} +{% set value = ("ip access-group " + val.name + " " + "out " + value1) %} +{% set acl_val = acl_dict[key].append(value) %} + {% else %} +{% set value = ("ip access-group " + val.name + " " + "out ") %} +{% set acl_val = acl_dict[key].append(value) %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% for intf_name, acl_list in acl_dict.items() %} +interface {{ intf_name }} + {% for acl in acl_list %} +{{ acl }} + {% endfor %} +exit + {% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml new file mode 100644 index 00000000..43c3f17c --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml @@ -0,0 +1,43 @@ +--- +# vars file for dellemc.os6.os6_acl, +# below gives a sample configuration +# Sample variables for OS6 device + + +os6_acl: + - name: macl-implicit + type: mac + remark: + - description: 1 + state: present + entries: + - number: 4 + seq_number: 1000 + permit: false + protocol: + match_condition: any 0000.1F3D.084B 0000.0000.0000 + state: present + - number: 5 + seq_number: 1001 + permit: true + protocol: + match_condition: any any 0x0806 + state: present + - number: 6 + seq_number: 2002 + permit: deny + protocol: + match_condition: any any + state: + stage_ingress: + - name: vlan 30 + state: present + seq_number: 40 + - name: vlan 50 + state: present + seq_number: 50 + stage_egress: + - name: vlan 40 + state: present + seq_number: + state: present diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml new file mode 100644 index 00000000..f8ccf3be --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_acl diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml new file mode 100644 index 00000000..f294863a --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_acl diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/README.md b/ansible_collections/dellemc/os6/roles/os6_bgp/README.md new file mode 100644 index 00000000..8b7b6f6c --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/README.md @@ -0,0 +1,153 @@ +BGP role +======== + +This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The BGP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If variable `os6_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +> **NOTE**: IP routing needs to be enabled on the switch prior to configuring BGP via the *os6_bgp* role. + +**os6_bgp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os6 | +| ``router_id`` | string | Configures the IP address of the local BGP router instance | os6 | +| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os6 | +| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os6 | +| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os6 | +| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os6 | +| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os6 | +| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os6 | +| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os6 | +| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os6 | +| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os6 | +| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os6 | +| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os6 | +| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os6 | +| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os6 | +| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os6 | +| ``neighbor.timer`` | string | Configures neighbor timers ( ); 5 10, where 5 is the keepalive interval and 10 is the holdtime, field needs to be left blank to remove the timer configurations | os6 | +| ``neighbor.default_originate`` | boolean: true, false\* | Configures default originate routes to the BGP neighbor, field needs to be left blank to remove the default originate routes | os6 | +| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os6 | +| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os6 | +| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os6 | +| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os6 | +| ``neighbor.src_loopback_state`` | string: absent,present\* | Deletes the source for routing packets if set to absent | os6 | +| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255), field needs to be left blank to remove the maximum hop count value | os6 | +| ``neighbor.subnet`` | string (required) | Configures the passive BGP neighbor to this subnet | os6 | +| ``neighbor.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4 BGP neighbor if set to absent | os6 | +| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os6 | +| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os6 | +| ``redistribute.route_type`` | string (required): static,connected | Configures the name of the routing protocol to redistribute | os6 | +| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os6 | +| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os6 | +| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. | +| ``ansible_network_os`` | yes | os6, null\* | This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_bgp* role to configure the BGP network and neighbors. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os6_bgp* role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_bgp: + asn: 11 + router_id: 192.168.3.100 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + ipv4_network: + - address: 102.1.1.0 255.255.255.255 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - ip: 192.168.10.2 + type: ipv4 + remote_asn: 12 + timer: 5 10 + default_originate: False + peergroup: per + admin: up + state: present + - ip: 2001:4898:5808:ffa2::1 + type: ipv6 + remote_asn: 14 + peergroup: per + state: present + - name: peer1 + type: peergroup + remote_asn: 14 + ebgp_multihop: 4 + subnet: 10.128.5.192/27 + state: present + - ip: 172.20.12.1 + type: ipv4 + remote_asn: 64640 + timer: 3 9 + redistribute: + - route_type: static + address_type: ipv4 + state: present + - route_type: connected + address_type: ipv6 + state: present + state: present + +**Simple playbook to configure BGP — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_bgp + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml new file mode 100644 index 00000000..58e963bb --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_bgp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml new file mode 100644 index 00000000..e43b3fd0 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_bgp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml new file mode 100644 index 00000000..eb2d26ee --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_bgp role facilitates the configuration of BGP attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml new file mode 100644 index 00000000..acc2257f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating BGP configuration for os6" + template: + src: os6_bgp.j2 + dest: "{{ build_dir }}/bgp6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning BGP configuration for os6" + dellemc.os6.os6_config: + src: os6_bgp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2 b/ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2 new file mode 100644 index 00000000..4dd10197 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2 @@ -0,0 +1,255 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ + +Purpose: +Configure BGP commands for os6 Devices + +os6_bgp: + asn: 11 + router_id: 1.1.1.1 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + ipv4_network: + - address: 101.1.2.0 255.255.255.0 + state: present + ipv6_network: + - address: 2001:4898:5808:ffa0::/126 + state: present + neighbor: + - type: ipv4 + ip: 10.10.234.16 + remote_asn: 64818 + timer: 2 5 + default_originate: True + peergroup: MUX_HNV_ACCESS + admin: up + state: present + - type: ipv6 + ip: 2001:4898:5808:ffa2::1 + timer: 2 4 + default_originate: True + remote_asn: 64818 + peergroup: MUX_HNV_ACCESS + state: present + - type: peergroup + name: MUX_HNV_ACCESS + ebgp_multihop: 4 + subnet: 10.128.5.192/27 + remote_asn: 64918 + state: present + redistribute: + - route_type: connected + address_type: ipv6 + state: present + state: present + +################################} +{% if os6_bgp is defined and os6_bgp %} +{% set bgp_vars = os6_bgp %} +{% if bgp_vars.asn is defined and bgp_vars.asn %} + {% if bgp_vars.state is defined and bgp_vars.state=="absent" %} +no router bgp {{ bgp_vars.asn }} + {% else %} +{# Add Feature to the switch #} +router bgp {{ bgp_vars.asn }} + {% if bgp_vars.router_id is defined %} + {% if bgp_vars.router_id %} +bgp router-id {{ bgp_vars.router_id }} + {% else %} +no bgp router-id + {% endif %} + {% endif %} + {% if bgp_vars.maxpath_ebgp is defined %} + {% if bgp_vars.maxpath_ebgp %} +maximum-paths {{ bgp_vars.maxpath_ebgp }} + {% else %} +no maximum-paths + {% endif %} + {% endif %} + {% if bgp_vars.maxpath_ibgp is defined %} + {% if bgp_vars.maxpath_ibgp %} +maximum-paths ibgp {{ bgp_vars.maxpath_ibgp }} + {% else %} +no maximum-paths ibgp + {% endif %} + {% endif %} + {% if bgp_vars.ipv4_network is defined and bgp_vars.ipv4_network %} + {% for net in bgp_vars.ipv4_network %} + {% if net.address is defined and net.address %} + {% set ip_and_mask= net.address.split(" ") %} + {% if net.state is defined and net.state=="absent" %} +{# remove BGP network announcement #} +no network {{ ip_and_mask[0] }} mask {{ ip_and_mask[1] }} +{# Add BGP network announcement #} + {% else %} +network {{ ip_and_mask[0] }} mask {{ ip_and_mask[1] }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if bgp_vars.neighbor is defined and bgp_vars.neighbor %} + {% for neighbor in bgp_vars.neighbor %} + {% if neighbor.type is defined %} + {% if neighbor.type == "ipv4" or neighbor.type =="ipv6" %} + {% if neighbor.ip is defined and neighbor.ip %} + {% set tag_or_ip = neighbor.ip %} + {% if neighbor.remote_asn is defined and neighbor.remote_asn %} + {% if neighbor.state is defined and neighbor.state == "absent" %} +no neighbor {{ tag_or_ip }} remote-as + {% if neighbor.peergroup is defined and neighbor.peergroup %} + {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %} +no neighbor {{ tag_or_ip }} inherit peer {{ neighbor.peergroup }} + {% endif %} + {% endif %} + {% else %} +neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }} + {% if neighbor.timer is defined %} + {% if neighbor.timer %} +neighbor {{ tag_or_ip }} timers {{ neighbor.timer }} + {% else %} +no neighbor {{ tag_or_ip }} timers + {% endif %} + {% endif %} + {% if neighbor.default_originate is defined %} + {% if neighbor.default_originate %} +neighbor {{ tag_or_ip }} default-originate + {% else %} +no neighbor {{ tag_or_ip }} default-originate + {% endif %} + {% endif %} + {% if neighbor.ebgp_multihop is defined %} + {% if neighbor.ebgp_multihop %} +neighbor {{ tag_or_ip }} ebgp-multihop {{ neighbor.ebgp_multihop }} + {% else %} +no neighbor {{ tag_or_ip }} ebgp-multihop + {% endif %} + {% endif %} + {% if neighbor.src_loopback is defined and neighbor.src_loopback|int(-1) != -1 %} + {% if neighbor.src_loopback_state is defined and neighbor.src_loopback_state == "absent" %} +no neighbor {{ tag_or_ip }} update-source + {% else %} +neighbor {{ tag_or_ip }} update-source Loopback {{ neighbor.src_loopback }} + {% endif %} + {% endif %} + {% if neighbor.peergroup is defined and neighbor.peergroup %} + {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %} +no neighbor {{ tag_or_ip }} inherit peer {{ neighbor.peergroup }} + {% else %} +neighbor {{ tag_or_ip }} inherit peer {{ neighbor.peergroup }} + {% endif %} + {% endif %} + {% if neighbor.admin is defined %} + {% if neighbor.admin == "up" %} +no neighbor {{ tag_or_ip }} shutdown + {% elif neighbor.admin =="down" %} +neighbor {{ tag_or_ip }} shutdown + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% elif neighbor.type == "peergroup" %} + {% if neighbor.name is defined and neighbor.name %} + {% set tag_or_ip = neighbor.name %} + {% if neighbor.state is defined and neighbor.state == "absent" %} +no template peer {{ tag_or_ip }} + {% else %} + {% if neighbor.subnet is defined and neighbor.subnet %} + {% if neighbor.subnet_state is defined and neighbor.subnet_state == "absent" %} +no bgp listen range {{ neighbor.subnet }} + {% else %} +bgp listen range {{ neighbor.subnet }} inherit peer {{ tag_or_ip }} + {% endif %} + {% endif %} +template peer {{ tag_or_ip }} + {% if neighbor.remote_asn is defined and neighbor.remote_asn %} + {% if neighbor.remote_asn_state is defined and neighbor.remote_asn_state == "absent" %} +no remote-as {{ neighbor.remote_asn }} + {% else %} +remote-as {{ neighbor.remote_asn }} + {% endif %} + {% endif %} + {% if neighbor.timer is defined %} + {% if neighbor.timer %} +timers {{ neighbor.timer }} + {% else %} +no timers + {% endif %} + {% endif %} + {% if neighbor.ebgp_multihop is defined %} + {% if neighbor.ebgp_multihop %} +ebgp-multihop {{ neighbor.ebgp_multihop }} + {% else %} +no ebgp-multihop + {% endif %} + {% endif %} + {% if neighbor.src_loopback is defined and neighbor.src_loopback|int(-1) != -1 %} + {% if neighbor.src_loopback_state is defined and neighbor.src_loopback_state == "absent" %} +no update-source + {% else %} +update-source Lo{{ neighbor.src_loopback }} + {% endif %} + {% endif %} + {% if neighbor.admin is defined %} + {% if neighbor.admin == "up" %} +no shutdown + {% elif neighbor.admin =="down" %} +shutdown + {% endif %} + {% endif %} + {% if neighbor.default_originate is defined %} +address-family ipv4 + {% if neighbor.default_originate %} +default-originate + {% else %} +no default-originate + {% endif %} +exit + {% endif %} +exit + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if bgp_vars.ipv6_network is defined and bgp_vars.ipv6_network %} + {% for net in bgp_vars.ipv6_network %} + {% if net.address is defined and net.address %} +address-family ipv6 + {% if net.state is defined and net.state=="absent" %} +{# remove BGP network announcement #} +no network {{ net.address }} +{# Add BGP network announcement #} + {% else %} +network {{ net.address }} + {% endif %} +exit + {% endif %} + {% endfor %} + {% endif %} + {% if bgp_vars.redistribute is defined and bgp_vars.redistribute %} + {% for routes in bgp_vars.redistribute %} + {% if routes.route_type is defined and routes.route_type %} + {% if routes.address_type is defined and routes.address_type=="ipv6" %} +address-family ipv6 + {% if routes.state is defined and routes.state =="absent" %} +no redistribute {{ routes.route_type }} + {% else %} +redistribute {{ routes.route_type }} + {% endif %} +exit + {% elif routes.address_type is defined and routes.address_type=="ipv4" %} + {% if routes.state is defined and routes.state =="absent" %} +no redistribute {{ routes.route_type }} + {% else %} +redistribute {{ routes.route_type }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +exit +{% endif %} +{% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml new file mode 100644 index 00000000..388cb490 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml @@ -0,0 +1,7 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml new file mode 100644 index 00000000..c7625c60 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml @@ -0,0 +1,47 @@ +--- +# vars file for dellemc.os6.os6_bgp, +# below gives a sample configuration +# Sample variables for OS6 device + + os6_bgp: + asn: 11 + router_id: 1.1.1.1 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + ipv4_network: + - address: 101.1.2.0 255.255.255.0 + state: present + ipv6_network: + - address: 2001:4898:5808:ffa0::/126 + state: present + neighbor: + - type: ipv4 + ip: 10.10.234.16 + remote_asn: 64818 + timer: 2 5 + default_originate: True + peergroup: MUX_HNV_ACCESS + admin: up + src_loopback: 2 + src_loopback_state: present + state: present + - type: ipv6 + ip: 2001:4898:5808:ffa2::1 + timer: 2 4 + default_originate: True + ebgp_multihop: 3 + remote_asn: 64818 + peergroup: MUX_HNV_ACCESS + state: present + - type: peergroup + name: MUX_HNV_ACCESS + timer: 2 4 + ebgp_multihop: 4 + remote_asn: 64918 + subnet: 10.128.5.192/27 + state: present + redistribute: + - route_type: connected + address_type: ipv6 + state: present + state: present diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml new file mode 100644 index 00000000..b92fb6ca --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_bgp diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml new file mode 100644 index 00000000..22d0d344 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_bgp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/LICENSE b/ansible_collections/dellemc/os6/roles/os6_interface/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/README.md b/ansible_collections/dellemc/os6/roles/os6_interface/README.md new file mode 100644 index 00000000..2c6b359f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/README.md @@ -0,0 +1,110 @@ +Interface role +============== + +This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra, and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The interface role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os6_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name +- For physical interfaces, the interface name must be in * * format; for logical interfaces, the interface must be in * * format; physical interface name can be *Te1/0/1* for os6 devices +- For interface ranges, the interface name must be in *range * format +- Logical interface names can be *vlan 1* or *port-channel 1* +- Variables and values are case-sensitive + +> **NOTE**: Only define supported variables for the interface type. For example, do not define the *switchport* variable for a logical interface, and do not define an IP address for physical interfaces in OS6 devices. + +**os6_interface name keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``desc`` | string | Configures a single line interface description | os6 | +| ``portmode`` | string | Configures port-mode according to the device type | os6 (access and trunk) | +| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os6 | +| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os6 | +| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os6 | +| ``ip_and_mask`` | string | configures the specified IP address to the interface VLAN on os6 devices (192.168.11.1 255.255.255.0 format) | os6 | +| ``ipv6_and_mask`` | string | configures a specified IP address to the interface VLAN on os6 devices (2001:4898:5808:ffa2::1/126 format) | os6 | +| ``ipv6_reachabletime`` | integer | Configures the reachability time for IPv6 neighbor discovery (0 to 3600000), field needs to be left blank to remove the reachability time | os6 | +| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os6 | +| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os6 | +| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os6 | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. | +| ``ansible_network_os`` | yes | os6, null\* | This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6-interface* role to set up description, MTU, admin status, portmode, and switchport details for an interface. The example creates a *hosts* file with the switch details and orresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os6-interface* role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: "switch1" + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_interface: + Te1/0/8: + desc: "Connected to Spine1" + portmode: trunk + admin: up + vlan 100: + admin: down + ip_and_mask: 3.3.3.3 255.255.255.0 + ipv6_and_mask: 2002:4898:5408:faaf::1/64 + suppress_ra: present + ip_helper: + - ip: 10.0.0.36 + state: absent + ipv6_reachabletime: 600000 + vlan 20: + suppress_ra: absent + admin: up + +**Simple playbook to setup system — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_interface + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml new file mode 100644 index 00000000..076dd792 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_interface \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml new file mode 100644 index 00000000..a46800e0 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_interface diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml new file mode 100644 index 00000000..ed39e191 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_interface role facilitates the configuration of interface attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml new file mode 100644 index 00000000..198d8600 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating interface configuration for os6" + template: + src: os6_interface.j2 + dest: "{{ build_dir }}/intf6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning interface configuration for os6" + dellemc.os6.os6_config: + src: os6_interface.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j2 b/ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j2 new file mode 100644 index 00000000..72e72eaa --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j2 @@ -0,0 +1,94 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ + +Purpose: +Configure interface commands for os6 Devices. + +os6_interface: + Te1/0/1: + desc: "connected to spine1" + portmode: trunk + admin: up + vlan 100: + ip_type_dynamic: False + ip_and_mask: 3.3.3.3 255.255.255.0 + suppress_ra: present + vlan 101: + ipv6_and_mask: 2001:db8:3c4d:15::/64 + ipv6_reachabletime: 6000 + ip_helper: + - ip: 10.0.0.33 + state: present + vlan 102: + ip_type_dynamic: True + +################################} +{% if os6_interface is defined and os6_interface %} +{% for key in os6_interface.keys() %} +interface {{ key }} +{% set intf_vars = os6_interface[key] %} +{% if intf_vars.desc is defined %} + {% if intf_vars.desc %} +description "{{ intf_vars.desc }}" + {% else %} +no description + {% endif %} +{% endif %} +{% if intf_vars.portmode is defined %} + {% if intf_vars.portmode %} +switchport mode {{ intf_vars.portmode }} + {% else %} +no switchport mode + {% endif %} +{% endif %} +{% if intf_vars.admin is defined %} + {% if intf_vars.admin == "up"%} +no shutdown + {% elif intf_vars.admin == "down" %} +shutdown + {% endif %} +{% endif %} +{% if intf_vars.ip_type_dynamic is defined and intf_vars.ip_type_dynamic %} +ip address dhcp +{% elif intf_vars.ip_and_mask is defined %} + {% if intf_vars.ip_and_mask %} +ip address {{ intf_vars.ip_and_mask }} + {% else %} +no ip address + {% endif %} +{% endif %} +{% if intf_vars.suppress_ra is defined %} + {% if intf_vars.suppress_ra == "present" %} +ipv6 nd suppress-ra + {% else %} +no ipv6 nd suppress-ra + {% endif %} +{% endif %} +{% if intf_vars.ipv6_and_mask is defined %} + {% if intf_vars.ipv6_and_mask %} +ipv6 address {{ intf_vars.ipv6_and_mask }} + {% else %} +no ipv6 address + {% endif %} +{% endif %} +{% if intf_vars.ipv6_reachabletime is defined %} + {% if intf_vars.ipv6_reachabletime %} +ipv6 nd reachable-time {{ intf_vars.ipv6_reachabletime }} + {% else %} +no ipv6 nd reachable-time + {% endif %} +{% endif %} +{% if intf_vars.ip_helper is defined and intf_vars.ip_helper %} + {% for helper in intf_vars.ip_helper %} + {% if helper.ip is defined and helper.ip %} + {% if helper.state is defined and helper.state=="absent" %} +no ip helper-address {{ helper.ip }} + {% else %} +ip helper-address {{ helper.ip }} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +exit +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml new file mode 100644 index 00000000..5513a7eb --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml @@ -0,0 +1,28 @@ +# vars file for dellemc.os6.os6_interface +# Sample variables for OS6 device + +os6_interface: + Te1/0/2: + desc: "Connected to Spine1" + portmode: trunk + admin: up + Te1/0/1: + desc: "Connected to Access" + portmode: access + admin: up + vlan 100: + ip_type_dynamic: True + suppress_ra: present + ip_and_mask: 3.3.3.3 255.255.255.0 + ip_helper: + - ip: 10.0.0.36 + state: present + ipv6_reachabletime: 600000 + vlan 101: + ipv6_and_mask: 2001:db8:3c4d:15::/64 + suppress_ra: absent + vlan 102: + ip_type_dynamic: True + suppress_ra: + vlan 105: + ip_and_mask: 1.1.1.1 255.255.255.0 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml new file mode 100644 index 00000000..ffd90b31 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_interface \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml new file mode 100644 index 00000000..ab9d1f82 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_interface \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/LICENSE b/ansible_collections/dellemc/os6/roles/os6_lag/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/README.md b/ansible_collections/dellemc/os6/roles/os6_lag/README.md new file mode 100644 index 00000000..ff82984d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/README.md @@ -0,0 +1,96 @@ +LAG role +======== + +This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG, hash scheme in os6 devices, and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The LAG role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- Object drives the tasks in this role +- `os6_lag` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- `os6_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po ` format (1 to 128 for os6) +- Variables and values are case-sensitive + +**port-channel ID keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os6 | +| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 8), field needs to be left blank to remove the minimum number of links | os6 | +| ``hash`` | integer | Configures the hash value for OS6 devices (1 to 7), field needs to be left blank to remove the hash value | os6 | +| ``channel_members`` | list | Specifies the list of port members to be associated to the port channel (see ``channel_members.*``) | os6 | +| ``channel_members.port`` | string | Specifies valid OS6 interface names to be configured as port channel members | os6 | +| ``channel_members.state`` | string: absent,present | Deletes the port member association if set to absent | os6 | +| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port channel ID if set to absent | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device. | +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6-lag* role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_lag: + Po 127: + type: static + hash: 7 + min_links: 3 + channel_members: + - port: Fo4/0/1 + state: present + - port: Fo4/0/1 + state: present + state: present + +**Simple playbook to setup system — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_lag + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml new file mode 100644 index 00000000..e9a1b31f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_lag \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml new file mode 100644 index 00000000..859d5c2b --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_lag diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml new file mode 100644 index 00000000..897a47ae --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_lag role facilitates the configuration of LAG attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml new file mode 100644 index 00000000..832d5493 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating LAG configuration for os6" + template: + src: os6_lag.j2 + dest: "{{ build_dir }}/lag6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning LAG configuration for os6" + dellemc.os6.os6_config: + src: os6_lag.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j2 b/ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j2 new file mode 100644 index 00000000..39b0a53b --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j2 @@ -0,0 +1,78 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ + +Purpose: +Configure LAG commands for os6 Devices. + +os6_lag: + Po 1: + type: static + min_links: 2 + hash: 7 + channel_members: + - port: Te1/0/2 + state: present + - port: Te1/0/1 + state: absent + state: present + +################################} +{% if os6_lag is defined and os6_lag %} +{% for key in os6_lag.keys() %} +{% set channel_id = key.split(" ") %} +{% set lag_vars = os6_lag[key] %} + {% if lag_vars.state is defined and lag_vars.state=="absent" %} +interface port-channel {{ channel_id[1] }} +no shutdown +no description +no hashing-mode +exit + {% else %} +interface port-channel {{ channel_id[1] }} + {% if lag_vars.hash is defined %} + {% if lag_vars.hash %} +hashing-mode {{ lag_vars.hash }} + {% else %} +no hashing-mode + {% endif %} + {% endif %} + {% if lag_vars.min_links is defined %} + {% if lag_vars.min_links %} +port-channel min-links {{ lag_vars.min_links }} + {% else %} +no port-channel min-links + {% endif %} + {% endif %} +exit + {% if lag_vars.channel_members is defined %} + {% for ports in lag_vars.channel_members %} + {% if lag_vars.type is defined and lag_vars.type == "static" %} + {% if ports.port is defined and ports.port %} + {% if ports.state is defined and ports.state=="absent" %} +interface {{ ports.port }} +no channel-group +exit + {% else %} +interface {{ ports.port }} +channel-group {{ channel_id[1] }} mode on +exit + {% endif %} + {% endif %} + {% elif lag_vars.type is defined and lag_vars.type == "dynamic" %} + {% if ports.port is defined and ports.port %} + {% if ports.state is defined and ports.state=="absent" %} +interface {{ ports.port }} +no channel-group +exit + {% else %} +interface {{ ports.port }} +channel-group {{ channel_id[1] }} mode active +exit + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml new file mode 100644 index 00000000..77728edb --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml @@ -0,0 +1,15 @@ +--- +# vars file for dellemc.os6.os6_lag +# Sample variables for OS6 device + +os6_lag: + Po 128: + type: static + hash: 7 + min_links: 3 + channel_members: + - port: Te1/0/1 + state: absent + - port: Te1/0/2 + state: present + state: present diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml new file mode 100644 index 00000000..44ee544e --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_lag diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml new file mode 100644 index 00000000..9cf92917 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_lag \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/README.md b/ansible_collections/dellemc/os6/roles/os6_lldp/README.md new file mode 100644 index 00000000..d29653b2 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/README.md @@ -0,0 +1,114 @@ +LLDP role +========= + +This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, and iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The LLDP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_lldp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``timers`` | dictionary | Configures the LLDP global timer value | os6 | +| ``timers.interval`` | integer | Configures the interval in seconds to transmit local LLDP data (5 to 32768), field needs to be left blank to remove the interval | os6 | +| ``timers.hold`` | integer | Configures the interval multiplier to set local LLDP data TTL (2 to 10), field needs to be left blank to remove the interval multiplier | os6 | +| ``timers.reinit`` | integer | Configures the reinit value (1 to 10), field needs to be left blank to remove the reinit value | os6 | +| ``notification_interval`` | integer | Configures the minimum interval to send remote data change notifications (5 to 3600), field needs to be left blank to remove the minimum interval | os6 | +| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os6 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | os6 | +| ``med.global_med`` | boolean | Configures global MED TLVs advertisement | os6 | +| ``med.fast_start_repeat_count`` | integer | Configures MED fast start repeat count value (1 to 10), field needs to be left blank to remove the value | os6 | +| ``med.config_notification`` | boolean | Configure all the ports to send the topology change notification | os6 | +| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os6 | +| ``local_interface.`` | dictionary | Configures LLDP at the interface level (see ``.*``) | os6 | +| ``.mode`` | dictionary: rx,tx | Configures LLDP mode configuration at the interface level | os6 | +| ``.mode.tx`` | boolean | Enables/disables LLDP transmit capability at interface level | os6 | +| ``.mode.rx`` | boolean | Enables/disables LLDP receive capability at interface level | os6 | +| ``.notification`` | boolean | Enables/disables LLDP remote data change notifications at interface level | os6 | +| ``.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os6 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os6 | +| ``med.enable`` | boolean | Enables interface level MED capabilities | os6 | +| ``med.config_notification`` | boolean | Configures sending the topology change notification |os6 | + + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_lldp* role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/os6 + os6_lldp: + timers: + reinit: 2 + interval: 5 + hold: 5 + notification_interval: 5 + advertise: + med: + global_med: true + fast_start_repeat_count: 4 + config_notification: true + local_interface: + Gi1/0/1: + mode: + tx: true + rx: false + notification: true + advertise: + med: + config_notification: true + enable: true + + +**Simple playbook to setup system — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_lldp + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml new file mode 100644 index 00000000..b2f3b089 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_lldp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml new file mode 100644 index 00000000..d1beaa3d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_lldp diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml new file mode 100644 index 00000000..044e7b46 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os6_lldp role facilitates the configuration of Link Layer Discovery Protocol(LLDP) attributes in devices + running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml new file mode 100644 index 00000000..c84ca513 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating LLDP configuration for os6" + template: + src: os6_lldp.j2 + dest: "{{ build_dir }}/lldp6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning LLDP configuration for os6" + dellemc.os6.os6_config: + src: os6_lldp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2 b/ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2 new file mode 100644 index 00000000..e8d2e94c --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2 @@ -0,0 +1,159 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################################### +Purpose: +Configure LLDP commands for os6 Devices. + +os6_lldp: + timers: + reinit: 2 + interval: 5 + hold: 5 + notification_interval: 5 + advertise: + med: + global_med: true + fast_start_repeat_count: 4 + config_notification: true + local_interface: + Gi1/0/1: + mode: + tx: true + rx: false + notification: true + advertise: + med: + config_notification: true + enable: true + + +{###############################################################################################} +{% if os6_lldp is defined and os6_lldp %} +{% for key,value in os6_lldp.items() %} + {% if key == "timers" %} + {% if value %} + {% set item = os6_lldp.timers %} + {% if item.reinit is defined and item.reinit and item.interval is defined and item.interval and item.hold is defined and item.hold %} +lldp timers interval {{ item.interval }} hold {{ item.hold }} reinit {{ item.reinit }} + {% elif item.reinit is defined and item.reinit and item.interval is defined and item.interval %} +lldp timers interval {{ item.interval }} reinit {{ item.reinit }} + {% elif item.reinit is defined and item.reinit and item.hold is defined and item.hold %} +lldp timers hold {{ item.hold }} reinit {{ item.reinit }} + {% elif item.interval is defined and item.interval and item.hold is defined and item.hold %} +lldp timers interval {{ item.interval }} hold {{ item.hold }} + {% else %} + {% if item.reinit is defined %} + {% if item.reinit %} +lldp timers reinit {{ item.reinit }} + {% else %} +no lldp timers reinit {{ item.reinit }} + {% endif %} + {% endif %} + {% if item.interval is defined %} + {% if item.interval %} +lldp timers interval {{ item.interval }} + {% else %} +no lldp timers interval {{ item.interval }} + {% endif %} + {% endif %} + {% if item.hold is defined %} + {% if item.hold %} +lldp timers hold {{ item.hold }} + {% else %} +no lldp timers hold {{ item.hold }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% elif key == "notification_interval" %} + {% if value %} +lldp notification-interval {{ value }} + {% else %} +no lldp notification-interval + {% endif %} + {% elif key == "advertise" %} + {% if value %} + {% for ke,valu in value.items() %} + {% if ke == "med" %} + {% if valu %} + {% for med,val in valu.items() %} + {% if med == "fast_start_repeat_count" %} + {% if val %} +lldp med faststartrepeatcount {{ val }} + {% else %} +no lldp med faststartrepeatcount + {% endif %} + {% elif med == "config_notification" %} + {% if val %} +lldp med confignotification all + {% else %} +no lldp med confignotification all + {% endif %} + {% elif med == "global_med" %} + {% if val %} +lldp med all + {% else %} +no lldp med all + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endfor %} +{% endif %} +{% if os6_lldp is defined and os6_lldp %} +{% for key in os6_lldp.keys() %} +{% set lldp_vars = os6_lldp[key] %} +{% if key == "local_interface" %} + {% for intf in lldp_vars.keys() %} + {% set intf_vars = lldp_vars[intf] %} +interface {{ intf }} + {% if intf_vars.mode is defined and intf_vars.mode %} + {% set intf_vars_mode = intf_vars.mode %} + {% if intf_vars_mode.tx is defined %} + {% if intf_vars_mode.tx %} +lldp transmit + {% else %} +no lldp transmit + {% endif %} + {% endif %} + {% if intf_vars_mode.rx is defined %} + {% if intf_vars_mode.rx %} +lldp receive + {% else %} +no lldp receive + {% endif %} + {% endif %} + {% endif %} + {% if intf_vars.notification is defined %} + {% if intf_vars.notification %} +lldp notification + {% else %} +no lldp notification + {% endif %} + {% endif %} + {% if intf_vars.advertise is defined and intf_vars.advertise %} + {% if intf_vars.advertise.med is defined and intf_vars.advertise.med %} + {% if intf_vars.advertise.med.enable is defined %} + {% if intf_vars.advertise.med.enable %} +lldp med + {% else %} +no lldp med + {% endif %} + {% endif %} + {% if intf_vars.advertise.med.config_notification is defined %} + {% if intf_vars.advertise.med.config_notification %} +lldp med confignotification + {% else %} +no lldp med confignotification + {% endif %} + {% endif %} + {% endif %} + {% endif %} +exit + {% endfor %} +{% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml new file mode 100644 index 00000000..4d630fe4 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml @@ -0,0 +1,26 @@ +--- +# vars file for dellemc.os6.os6_lldp, +# below gives a sample configuration + # Sample variables for OS6 device + +os6_lldp: + timers: + reinit: 2 + interval: 5 + hold: 5 + notification_interval: 5 + advertise: + med: + global_med: true + fast_start_repeat_count: 4 + config_notification: true + local_interface: + Gi1/0/1: + mode: + tx: true + rx: false + notification: true + advertise: + med: + config_notification: true + enable: true \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml new file mode 100644 index 00000000..b443e046 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_lldp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml new file mode 100644 index 00000000..d602eda7 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_lldp diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/LICENSE b/ansible_collections/dellemc/os6/roles/os6_logging/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/README.md b/ansible_collections/dellemc/os6/roles/os6_logging/README.md new file mode 100644 index 00000000..037d3781 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/README.md @@ -0,0 +1,89 @@ +Logging role +============ + +This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The Logging role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If the `os6_cfg_generate` variable is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_logging keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``logging`` | list | Configures the logging server (see ``logging.*``) | os6 | +| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os6 | +| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os6 | +| ``source_interface`` | string | Configures the source interface for logging, it can take values as loopback interface, vlan ID, out-of-band interface and tunnel ID only, field needs to be left blank to remove the source iterface | os6 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. + +**Sample hosts file** + + switch1 ansible_host= + +#### Sample host_vars/switch1 + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_logging: + logging: + - ip : 1.1.1.1 + state: present + - ip: 2.2.2.2 + state: present + - ip: 3.3.3.3 + state: present + source_interface: "vlan 10" + +**Simple playbook to setup logging — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_logging + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml new file mode 100644 index 00000000..14475f6d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_logging \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml new file mode 100644 index 00000000..f88d8db5 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_logging diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml new file mode 100644 index 00000000..518c92a3 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_logging role facilitates the configuration of logging attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml new file mode 100644 index 00000000..eb47e41e --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating logging configuration for os6" + template: + src: os6_logging.j2 + dest: "{{ build_dir }}/logging6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning logging configuration for os6" + dellemc.os6.os6_config: + src: os6_logging.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j2 b/ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j2 new file mode 100644 index 00000000..a1a30f25 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j2 @@ -0,0 +1,36 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure logging commands for os6 Devices + +os6_logging: + logging: + - ip: 1.1.1.1 + state: absent + source_interface: "vlan 30" + +#####################################} +{% if os6_logging is defined and os6_logging %} + +{% for key,value in os6_logging.items() %} + {% if key == "logging" %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% if item.state is defined and item.state == "absent" %} +no logging {{ item.ip }} + {% else %} +logging {{ item.ip }} +exit + {% endif %} + {% endif %} + {% endfor %} + {% elif key == "source_interface" %} + {% if value %} +logging source-interface {{ value }} + {% else %} +no logging source-interface + {% endif %} + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml new file mode 100644 index 00000000..550ed665 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml @@ -0,0 +1,10 @@ +--- +# vars file for dellemc.os6.os6_logging, +# below gives a sample configuration +# Sample variables for OS6 device + +os6_logging: + logging: + - ip: 1.1.1.1 + state: present + source_interface: "vlan 30" \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml new file mode 100644 index 00000000..59ec49c1 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_logging diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml new file mode 100644 index 00000000..329db6af --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_logging diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/README.md b/ansible_collections/dellemc/os6/roles/os6_ntp/README.md new file mode 100644 index 00000000..6fbdec55 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/README.md @@ -0,0 +1,82 @@ +NTP role +======== + +This role facilitates the configuration of network time protocol (NTP) attributes, and it specifically enables configuration of NTP server. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The NTP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_ntp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``server`` | list | Configures the NTP server (see ``server.*``) | os6 | +| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os6 | +| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When the `os6_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os6_ntp* role. +By including the role, you automatically get access to all of the tasks to configure NTP attributes. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + host: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_ntp: + server: + - ip: 2.2.2.2 + state: absent + +**Simple playbook to setup NTP — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_ntp + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml new file mode 100644 index 00000000..5cc2de16 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_ntp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml new file mode 100644 index 00000000..8a8a2a41 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_ntp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml new file mode 100644 index 00000000..a6ba48d3 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_ntp role facilitates the configuration of NTP attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml new file mode 100644 index 00000000..3ba29704 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating NTP configuration for os6" + template: + src: os6_ntp.j2 + dest: "{{ build_dir }}/ntp6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False'))| bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning NTP configuration for os6" + dellemc.os6.os6_config: + src: os6_ntp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2 b/ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2 new file mode 100644 index 00000000..94e4561f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2 @@ -0,0 +1,27 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure NTP commands for os6 devices + +os6_ntp: + server: + - ip: 2.2.2.2 + state: absent + +#####################################} +{% if os6_ntp is defined and os6_ntp %} + {% for key,value in os6_ntp.items() %} + {% if key == "server" and value %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% if item.state is defined and item.state == "absent" %} +no sntp server {{ item.ip }} + {% else %} +sntp server {{ item.ip }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/inventory b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/inventory new file mode 100644 index 00000000..878877b0 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml new file mode 100644 index 00000000..92475504 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml @@ -0,0 +1,9 @@ +--- +# vars file for dellemc.os6.os6_ntp, +# below gives a sample configuration +# Sample variables for OS6 device + +os6_ntp: + server: + - ip: 2.2.2.2 + state: present diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml new file mode 100644 index 00000000..d24e3b53 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os6.os6_ntp diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml new file mode 100644 index 00000000..4ec591c9 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_ntp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/LICENSE b/ansible_collections/dellemc/os6/roles/os6_qos/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/README.md b/ansible_collections/dellemc/os6/roles/os6_qos/README.md new file mode 100644 index 00000000..31be2719 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/README.md @@ -0,0 +1,102 @@ +QoS role +======== + +This role facilitates the configuration of quality of service (QoS) attributes like policy-map and class-map. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The QoS role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take a `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_qos keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``policy_map`` | list | Configures the policy-map (see ``policy_map.*``) | os6 | +| ``policy_map.name`` | string (required) | Configures the policy-map name | os6 | +| ``policy_map.type`` | string: in, out in os6 | Configures the policy-map type | os6 | +| ``policy_map.class_instances`` | list | Specifies the class instances for the policy | os6 | +| ``class_instances.name`` | string | Specifies name of class instance | os6 | +| ``class_instances.policy`` | list | Specifies list of associated policies for the class | os6 | +| ``policy_map.state`` | string: present\*,absent | Deletes the policy-map if set to absent | os6 | +| ``class_map`` | list | Configures the class-map (see ``class_map.*``) | os6 | +| ``class_map.name`` | string (required) | Configures the class-map name | os6 | +| ``class_map.type`` | string: match-all, match-any in os6 | Configures the class-map type | os6 | +| ``class-map.match_condition`` | list | Specifies the type of match-conditions required for the class | os6 | +| ``class_map.state`` | string: present\*,absent | Deletes the class-map if set to absent | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_qos* role to configure the policy-map class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_qos* role. By including the role, you automatically get access to all of the tasks to configure QoS features. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_qos: + policy_map: + - name: testpolicy + type: qos + class_instances: + - name: video + policy: + - assign-queue 1 + state: present + class_map: + - name: testclass + type: application + match_condition: + - ip dscp 26 + state: present + +**Simple playbook to setup qos — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_qos + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml new file mode 100644 index 00000000..1b87a8f3 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# defaults file for dellemc.os6.os6_qos +match_type: + match_all: match-all + match_any: match-any diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml new file mode 100644 index 00000000..1998b3a7 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_qos \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml new file mode 100644 index 00000000..47fff33e --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_qos role facilitates the configuration of qos attributes in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml new file mode 100644 index 00000000..1c7d6282 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Provisioning Qos configuration for os6" + dellemc.os6.os6_config: + src: os6_qos.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output + + - name: "Generating Qos configuration for os6" + template: + src: os6_qos.j2 + dest: "{{ build_dir }}/qos6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j2 b/ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j2 new file mode 100644 index 00000000..abb9ec7b --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j2 @@ -0,0 +1,97 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{##################################################### + +Purpose: +Configure qos commands for os6 Devices. + +os6_qos: + class_map: + - name: CONTROL + type: match-all + match_condition: + - ip dscp 40 + state: present + - name: testclass + type: match-all + match_condition: + - vlan 4 + state: present + - name: test + type: match-any + match_condition: + - vlan 5 + state: present + policy_map: + - name: testpolicy + type: in + class_instances: + - name: testclass + policy: + - assign-queue 4 + - mirror Po1 + state: present + - name: test + policy: + - assign-queue 4 + state: present + state: present + +#####################################################} +{% if os6_qos is defined and os6_qos %} +{% for key in os6_qos.keys() %} + {% if key =="class_map" %} + {% for vars in os6_qos[key] %} + {% if vars.name is defined and vars.name %} + {% if vars.state is defined and vars.state == "absent" %} +no class-map {{ vars.name }} + {% else %} + {% if vars.type is defined and vars.type %} +class-map {{ vars.type }} {{ vars.name }} + {% else %} +class-map {{ vars.name }} + {% endif %} + {% if vars.match_condition is defined and vars.match_condition %} + {% for match in vars.match_condition %} +match {{ match }} + {% endfor %} + {% endif %} +exit + {% endif %} + {% endif %} + {% endfor %} + {% elif key =="policy_map" %} + {% for vars in os6_qos[key] %} + {% if vars.name is defined and vars.name %} + {% if vars.state is defined and vars.state == "absent" %} +no policy-map {{ vars.name }} + {% else %} + {% if vars.type is defined and vars.type %} +policy-map {{ vars.name }} {{ vars.type }} + {% else %} +policy-map {{ vars.name }} + {% endif %} + {% if vars.class_instances is defined and vars.class_instances %} + {% for instance in vars.class_instances %} + {% if instance.name is defined and instance.name %} + {% if instance.state is defined and instance.state == "absent"%} +no class {{ instance.name }} + {% else %} +class {{ instance.name }} + {% endif %} + {% if instance.policy is defined and instance.policy %} + {% for policy in instance.policy %} +{{ policy }} + {% endfor %} + {% endif %} +exit + {% endif %} + {% endfor %} + {% endif %} +exit + {% endif %} + {% endif %} + {% endfor %} + + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tests/inventory b/ansible_collections/dellemc/os6/roles/os6_qos/tests/inventory new file mode 100644 index 00000000..878877b0 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml new file mode 100644 index 00000000..7c8d983a --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml @@ -0,0 +1,73 @@ +--- +# Sample variables for OS6 device + +os6_qos: + class_map: + - name: CONTROL + type: match-all + match_condition: + - ip dscp 40 + state: present + - name: VIDEO + type: match-all + match_condition: + - ip dscp 34 + state: present + - name: VOICE-TRAFFIC + type: match-all + match_condition: + - ip dscp ef + state: present + - name: DATA-TRAFFIC + type: match-any + match_condition: + - vlan 100 + - protocol tcp + - cos 5 + - ip tos 11 11 + state: present + policy_map: + - name: Ingress_QoS + type: in + class_instances: + - name: VIDEO + policy: + - assign-queue 1 + - name: CONTROL + policy: + - assign-queue 2 + - name: VOICE-TRAFFIC + policy: + - assign-queue 3 + state: present + - name: Egress_QoS + type: out + class_instances: + - name: VIDEO + policy: + - assign-queue 5 + - name: CONTROL + policy: + - assign-queue 5 + - name: VOICE-TRAFFIC + policy: + - assign-queue 5 + state: present + - name: QoS_In + type: in + class_instances: + - name: DATA-TRAFFIC + policy: + - assign-queue 1 + - redirect Gi1/0/3 + - police-simple 512000 64 conform-action set-cos-transmit 5 violate-action drop + state: present + - name: QoS_Out + type: out + class_instances: + - name: DATA-TRAFFIC + policy: + - assign-queue 6 + - redirect Gi1/0/3 + - police-simple 512000 64 conform-action set-cos-transmit 5 violate-action drop + state: present diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml b/ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml new file mode 100644 index 00000000..69562c92 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os6.os6_qos diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml new file mode 100644 index 00000000..5048d9b4 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_qos \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/README.md b/ansible_collections/dellemc/os6/roles/os6_snmp/README.md new file mode 100644 index 00000000..22da4f71 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/README.md @@ -0,0 +1,108 @@ +SNMP role +========= + +This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The SNMP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_snmp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``snmp_contact`` | string | Configures SNMP contact information, field needs to be left blank to remove the contact information | os6 | +| ``snmp_location`` | string | Configures SNMP location information, field needs to be left blank to remove the location | os6 | +| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os6 | +| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os6 | +| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os6 | +| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os6 | +| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os6 | +| ``snmp_host.ip`` | string | Configures the IP address of the SNMP trap host | os6 | +| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host | os6 | +| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os6 | +| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os6 | +| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os6 | +| ``snmp_traps.name`` | string | Enables SNMP traps | os6 | +| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_snmp: + snmp_contact: test + snmp_location: Santa Clara + snmp_community: + - name: public + access_mode: ro + state: present + - name: private + access_mode: rw + state: present + snmp_host: + - ip: 10.0.0.1 + communitystring: public + udpport: 1 + state: absent + snmp_traps: + - name: config + state: present + +**Simple playbook to setup snmp — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_snmp + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml new file mode 100644 index 00000000..994291fa --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_snmp diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml new file mode 100644 index 00000000..9dbd6173 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_snmp diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml new file mode 100644 index 00000000..6c26f3d3 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_snmp role facilitates the configuration of snmp attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml new file mode 100644 index 00000000..051ba034 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating SNMP configuration for os6" + template: + src: os6_snmp.j2 + dest: "{{ build_dir }}/snmp6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning SNMP configuration for os6" + dellemc.os6.os6_config: + src: os6_snmp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j2 b/ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j2 new file mode 100644 index 00000000..bf13fc37 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j2 @@ -0,0 +1,94 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure snmp commands for os6 Devices + +os6_snmp: + snmp_contact: test + snmp_location: Santa Clara + snmp_community: + - name: public + access_mode: ro + state: absent + - name: private + access_mode: rw + state: absent + snmp_traps: + - name: all + state: present + snmp_host: + - ip: 4.4.4.4 + communitystring: public + udpport: 1 + state: absent + +#####################################} +{% if os6_snmp is defined and os6_snmp %} + +{% for key,value in os6_snmp|dictsort %} + {% if key=="snmp_contact" %} + {% if value %} +snmp-server contact "{{ value }}" + {% else %} +no snmp-server contact + {% endif %} + {% elif key == "snmp_location" %} + {% if value %} +snmp-server location "{{ value }}" + {% else %} +no snmp-server location + {% endif %} + {% elif key == "snmp_community" %} + {% if value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server community {{ item.name }} + {% else %} + {% if item.access_mode is defined and item.access_mode %} +snmp-server community "{{ item.name }}" {{ item.access_mode }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% elif key == "snmp_host" and value %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server host {{ item.ip }} traps + {% else %} + {% if item.communitystring is defined and item.communitystring %} + {% if item.udpport is defined and item.udpport %} +snmp-server host {{ item.ip }} "{{ item.communitystring }}" udp-port {{ item.udpport }} + {% else %} +snmp-server host {{ item.ip }} "{{ item.communitystring }}" + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor%} + {% elif key == "snmp_traps" %} + {% if value %} + {% for val in value %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} +no snmp-server enable traps {{ val.name }} + {% else %} + {% if val.name == "all" %} + {% set trap_list = ['bgp state-changes limited','dvmrp','captive-portal','pim','captive-portal client-auth-failure','captive-portal client-connect','captive-portal client-db-full','captive-portal client-disconnect'] %} + {% for name in trap_list %} +snmp-server enable traps {{ name }} + {% endfor %} + {% else %} +snmp-server enable traps {{ val.name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml new file mode 100644 index 00000000..59f8399d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml @@ -0,0 +1,23 @@ +--- +# vars file for dellemc.os6.os6_snmp, +# below gives a sample configuration + # Sample variables for OS6 device + +os6_snmp: + snmp_contact: test + snmp_location: Santa Clara + snmp_community: + - name: public + access_mode: ro + state: absent + - name: private + access_mode: rw + state: absent + snmp_traps: + - name: all + state: present + snmp_host: + - ip: 4.4.4.4 + communitystring: public + udpport: 1 + state: absent \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml new file mode 100644 index 00000000..22e0b3d7 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_snmp diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml new file mode 100644 index 00000000..361ecad5 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_snmp diff --git a/ansible_collections/dellemc/os6/roles/os6_system/LICENSE b/ansible_collections/dellemc/os6/roles/os6_system/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_system/README.md b/ansible_collections/dellemc/os6/roles/os6_system/README.md new file mode 100644 index 00000000..2e70b192 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/README.md @@ -0,0 +1,83 @@ +System role +=========== + +This role facilitates the configuration of global system attributes. It specifically enables configuration of hostname and enable password for OS6. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The System role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_system keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``hostname`` | string | Configures a hostname to the device (no negate command) | os6 | +| ``enable_password`` | string | Configures the enable password, field needs to be left blank to remove the enable password from the system | os6 | +| ``mtu`` | integer | Configures the maximum transmission unit (MTU) for all interfaces, field needs to be left blank to remove the MTU configurations from the system | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +******************** + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os6_system* role. By including the role, you automatically get access to all of the tasks to configure system features. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_system: + hostname: host1 + enable_password: dell + mtu: 2000 + + +**Simple playbook to setup system — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_system + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml new file mode 100644 index 00000000..c720d5cc --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_system \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml new file mode 100644 index 00000000..a042201b --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_system \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml new file mode 100644 index 00000000..ca8e0bcf --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_system role facilitates the configuration of system attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml new file mode 100644 index 00000000..c10a7390 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating system configuration for os6" + template: + src: os6_system.j2 + dest: "{{ build_dir }}/system6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning system configuration for os6" + dellemc.os6.os6_config: + src: os6_system.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j2 b/ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j2 new file mode 100644 index 00000000..b6ca686d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j2 @@ -0,0 +1,34 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure system commands for os6 devices + +os6_system: + hostname: os6 + enable_password: force10 + mtu: 2000 + +#####################################} +{% if os6_system is defined and os6_system %} + +{% if os6_system.hostname is defined and os6_system.hostname %} +hostname "{{ os6_system.hostname }}" +{% endif %} +{% for key,value in os6_system.items() %} + + {% if key == "enable_password" %} + {% if value %} +enable password {{ value }} + {% else %} +no enable password + {% endif %} + {% elif key== "mtu" %} + {% if value %} +system jumbo mtu {{ value }} + {% else %} +no system jumbo mtu + {% endif %} + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml new file mode 100644 index 00000000..0665ae6b --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml @@ -0,0 +1,9 @@ +--- +# vars file for dellemc.os6.os6_system, +# below gives a sample configuration +# Sample variables for OS6 device + +os6_system: + hostname: os6 + enable_password: force10 + mtu: 2000 diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml new file mode 100644 index 00000000..4a7a41de --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_system \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml new file mode 100644 index 00000000..773a8950 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_system, diff --git a/ansible_collections/dellemc/os6/roles/os6_users/LICENSE b/ansible_collections/dellemc/os6/roles/os6_users/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_users/README.md b/ansible_collections/dellemc/os6/roles/os6_users/README.md new file mode 100644 index 00000000..2146cff8 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/README.md @@ -0,0 +1,93 @@ +Users role +========== + +This role facilitates the configuration of global system user attributes. It supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The Users role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os6_users list keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os6 | +| ``password`` | string | Configures the password set for the username; password length must be at least eight character | os6 | +| ``privilege`` | int | Configures the privilege level for the user; either 0, 1, or 15; if this key is ommitted, the default privilege is 1 | os6 | +| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses *os6_users* role to configure user attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS6 name. + +If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os6_users* role. By including the role, you automatically get access to all of the tasks to configure user features. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_users: + - username: u1 + privilege: 0 + state: absent + - username: u1 + password: dell@force10 + password: false + privilege: 1 + state: present + - username: u2 + password: test1234567 + privilege: 3 + state: present + +**Simple playbook to setup users — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_users + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml new file mode 100644 index 00000000..ab2367be --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_users diff --git a/ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml new file mode 100644 index 00000000..c1d47b7c --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_users diff --git a/ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml new file mode 100644 index 00000000..cde049b5 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_users role facilitates the configuration of user attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml new file mode 100644 index 00000000..f94f356f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating users configuration for os6" + template: + src: os6_users.j2 + dest: "{{ build_dir }}/users6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning users configuration for os6" + dellemc.os6.os6_config: + src: os6_users.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j2 b/ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j2 new file mode 100644 index 00000000..52ff6880 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j2 @@ -0,0 +1,37 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure users commands for os6 Devices + +os6_users: + - username: test + password: test + privilege: 0 + state: absent + +#####################################} +{% if os6_users is defined and os6_users %} + +{% for item in os6_users %} + {% if item.username is defined %} + {% if item.state is defined and item.state == "absent" %} +no username {{ item.username }} + {% else %} + {% if item.password is defined and item.password %} + {% if item.privilege is defined and item.privilege %} +username {{ item.username }} password {{ item.password }} privilege {{ item.privilege }} + {% else %} +username {{ item.username }} password {{ item.password }} + {% endif %} + {% elif not item.password %} + {% if item.privilege is defined and item.privilege %} +username {{ item.username }} nopassword privilege {{ item.privilege }} + {% else %} +username {{ item.username }} nopassword + {% endif %} + {% endif %} + {% endif %} + {% endif %} +{% endfor %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml new file mode 100644 index 00000000..abf69756 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml @@ -0,0 +1,10 @@ +--- +# vars file for dellemc.os6.os6_users, +# below gives a sample configuration +# Sample variables for OS6 device + +os6_users: + - username: test + password: test + privilege: 0 + state: absent \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml new file mode 100644 index 00000000..b07c1dd8 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_users diff --git a/ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml new file mode 100644 index 00000000..e9c84b96 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_users diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE b/ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/README.md b/ansible_collections/dellemc/os6/roles/os6_vlan/README.md new file mode 100644 index 00000000..5ef09a9f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/README.md @@ -0,0 +1,104 @@ +VLAN role +========= + +This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The VLAN role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration +- `os6_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key. +- VLAN ID key should be in format "vlan " (1 to 4094) +- Variables and values are case-sensitive + + +**VLAN ID keys** + +| Key | Type | Notes | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``tagged_members_append`` | boolean: true,false | appends the tagged vlan members to the existing list on the interfaces | os6 | +| ``tagged_members_state`` | string: absent,present | removes all tagged members | os6 | +| ``vlan `` | string | specifiy the vlan to be configured (see ``vlan .*``) | os6 | +| ``vlan .name`` | string | Configures the name of the VLAN, field needs to be left blank to remove the user defined name and assign the default name | os6 | +| ``vlan .tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os6 | +| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os6 | +| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os6 | +| ``vlan .untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os6 | +| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os6 | +| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os6 | +| ``vlan .state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os6 | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the ANSIBLE_REMOTE_PORT option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +## Example playbook + +This example uses the *os6_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the os6_vlan role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + os6_vlan: + tagged_members_append: False + tagged_members_state: present + vlan 100: + name: "Mgmt Network" + tagged_members: + - port: Te1/0/30 + state: absent + untagged_members: + - port: Fo1/0/14 + state: present + state: present + + +**Simple playbook to setup system — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_vlan + +**Run** + + ansible-playbook -i hosts switch1.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml new file mode 100644 index 00000000..623b2076 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_vlan \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml new file mode 100644 index 00000000..e3d581be --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_vlan diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml new file mode 100644 index 00000000..0022966d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_vlan role facilitates the configuration of VLAN attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os6 diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml new file mode 100644 index 00000000..44d0b537 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating VLAN configuration for os6" + template: + src: os6_vlan.j2 + dest: "{{ build_dir }}/vlan6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning VLAN configuration for os6" + dellemc.os6.os6_config: + src: os6_vlan.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2 b/ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2 new file mode 100644 index 00000000..b0cbe6ff --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2 @@ -0,0 +1,135 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{######################################### + +Purpose: +Configure VLAN Interface commands for os6 Devices + +os6_vlan: + tagged_members_append: False + tagged_members_state: present + vlan 2: + name: "os6vlan" + tagged_members: + - port: Gi1/0/1 + state: present + - port: Gi1/0/2 + state: absent + untagged_members: + - port: Gi1/0/3 + state: present + - port: Gi1/0/4 + state: absent + state: present + +#########################################} +{% if os6_vlan is defined and os6_vlan -%} +{%- for key in os6_vlan.keys() -%} +{% if 'vlan' in key %} +{%- set vlan_id = key.split(" ") -%} +{%- set vlan_vars = os6_vlan[key] -%} + {% if vlan_vars.state is defined and vlan_vars.state=="absent" -%} +no vlan {{ vlan_id[1] }} + {%- else -%} +vlan {{ vlan_id[1] }} + {% if vlan_vars.name is defined -%} + {% if vlan_vars.name-%} +name "{{ vlan_vars.name }}" + {% else -%} +no name + {% endif %} + {% endif %} +exit + {% if vlan_vars.untagged_members is defined -%} + {%- for ports in vlan_vars.untagged_members -%} + {% if ports.port is defined and ports.port -%} + {% if ports.state is defined and ports.state == "absent" -%} +interface {{ ports.port }} +no switchport access vlan + {% else -%} +interface {{ ports.port }} +switchport access vlan {{ vlan_id[1] }} + {% endif -%} + {% endif -%} +exit + {% endfor -%} + {% endif -%} + {% endif -%} +{% endif -%} +{% endfor -%} +{%- set cmd_dict = {} -%} +{%- for key in os6_vlan.keys() -%} +{% if 'vlan' in key %} +{%- set vlan_id = key.split(" ") -%} +{%- set vlan_vars = os6_vlan[key] -%} +{%- set tagged_vlans = [] -%} +{%- set tagged_members_present = [] -%} +{%- set tagged_members_absent= [] -%} +{% if vlan_vars.tagged_members is defined and vlan_vars.tagged_members -%} + {%- for ports in vlan_vars.tagged_members -%} + {% if ports.port is defined and ports.port -%} + {%- set port = ports.port -%} + {% if ports.state is defined and ports.state == 'absent' -%} + {% if port in cmd_dict and 'absent' in cmd_dict[port] -%} + {%- set tmp_vlan_list=cmd_dict[port]['absent'] -%} + {%- set x=tmp_vlan_list.extend([vlan_id[1]]) -%} + {%- set x=cmd_dict[port].update({'absent': tmp_vlan_list}) -%} + {%- elif port in cmd_dict and 'absent' not in cmd_dict[port] -%} + {%- set x=cmd_dict[port].update({'absent': [vlan_id[1]]}) -%} + {%- else -%} + {%- set x=cmd_dict.update({port: {'absent': [vlan_id[1]]}}) -%} + {% endif -%} + {%- else -%} + {% if port in cmd_dict and 'present' in cmd_dict[port] -%} + {%- set tmp_vlan_list=cmd_dict[port]['present'] -%} + {%- set x=tmp_vlan_list.extend([vlan_id[1]]) -%} + {%- set x=cmd_dict[port].update({'present': tmp_vlan_list}) -%} + {%- elif port in cmd_dict and 'present' not in cmd_dict[port] -%} + {%- set x=cmd_dict[port].update({'present': [vlan_id[1]]}) -%} + {%- else -%} + {%- set x=cmd_dict.update({port: {'present': [vlan_id[1]]}}) -%} + {% endif -%} + {% endif -%} + {% endif -%} + {% endfor -%} +{% endif -%} +{% endif -%} + {% endfor -%} +{%- for cmd in cmd_dict -%} +interface {{cmd}} +{% if 'tagged_members_state' in os6_vlan and os6_vlan['tagged_members_state']=='absent' %} +no switchport trunk allowed vlan +{% else %} +{% for cmd_item in cmd_dict[cmd] %} +{% if 'present' == cmd_item -%} +{% set sort_list = cmd_dict[cmd]['present']| sort %} +{% elif 'absent' in cmd_item -%} +{% set sort_list = cmd_dict[cmd]['absent']| sort %} +{% endif %} +{% set range_list = [] %} +{% set temp = {'temp': []} %} +{% for i in range(sort_list|length) %} +{% set x=temp['temp'].extend([sort_list[i]]) %} +{% if (i != sort_list|length -1 and sort_list[i+1]|int - sort_list[i]|int > 1) or (i == sort_list|length -1) %} +{% if temp['temp']|first != temp['temp']|last %} +{% set x=range_list.extend([temp['temp']|first|string+'-'+temp['temp']|last|string]) %} +{% set x=temp.update({'temp': []}) %} +{% else %} +{% set x=range_list.extend([temp['temp']|last|string]) %} +{% set x=temp.update({'temp': []}) %} +{% endif %} +{% endif %} +{% endfor %} +{% if 'present' == cmd_item -%} +{% if 'tagged_members_append' in os6_vlan and os6_vlan['tagged_members_append'] %} +switchport trunk allowed vlan add {{ range_list| join(',') }} +{% else %} +switchport trunk allowed vlan {{ range_list| join(',') }} +{% endif -%} +{% elif 'absent' == cmd_item -%} +switchport trunk allowed vlan remove {{ range_list| join(',') }} +{% endif -%} +{% endfor -%} +exit +{% endif -%} +{% endfor -%} +{% endif -%} diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml new file mode 100644 index 00000000..5b0f68cf --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml @@ -0,0 +1,21 @@ +--- +# vars file for dellemc.os6.os6_vlan, +# below gives a example configuration +# Sample variables for OS6 device + +os6_vlan: + vlan 100: + tagged_members_append: False + tagged_members_state: present + name: "Blue Network" + tagged_members: + - port: Te1/0/1 + state: present + - port: Te1/0/2 + state: present + untagged_members: + - port: Te1/0/3 + state: present + - port: Te1/0/4 + state: present + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml new file mode 100644 index 00000000..03697d94 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_vlan diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml new file mode 100644 index 00000000..acd743a7 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_vlan diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/README.md b/ansible_collections/dellemc/os6/roles/os6_vrrp/README.md new file mode 100644 index 00000000..9fd9f861 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/README.md @@ -0,0 +1,92 @@ +VRRP role +========= + +This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for OS6. + +The VRRP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os6_vrrp` (dictionary) holds a dictionary with the interface name key +- Interface name can correspond to any of the valid os6 interface with a unique interface identifier name +- Physical interfaces names must be in * * format (for example *Fo1/0/1*) +- Variables and values are case-sensitive + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``vrrp_group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os6 | +| ``description`` | string | Configures a single line description for the VRRP group | os6 | +| ``virtual_address`` | string | Configures a virtual address to the VRRP group (A.B.C.D format) | os6 | +| ``enable`` | boolean: true,false | Enables/disables the VRRP group at the interface | os6 | +| ``preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os6 | +| ``priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100), field needs to be left blank to remove the priority | os6 | +| ``state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent; VRRP group needs to be disabled to delete the VRRP group from the interface | os6 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name. + +When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_vrrp* role. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/os6 + os6_vrrp: + vlan 4: + - vrrp_group_id: 4 + state: present + description: "Interface-vrrp4" + virtual_address: 10.2.0.1 + enable: true + priority: 120 + preempt: false + + +**Simple playbook to setup system — switch1.yaml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_vrrp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml new file mode 100644 index 00000000..ab5dc0ab --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_vrrp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml new file mode 100644 index 00000000..2e0b8336 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_vrrp diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml new file mode 100644 index 00000000..8b1bc5a1 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os6_vrrp role facilitates the configuration of Virtual Router Redundancy Protocol (VRRP) attributes in + devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - dellemc + - emc + - os6 diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml new file mode 100644 index 00000000..72a07a4d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os6 + - name: "Generating VRRP configuration for os6" + template: + src: os6_vrrp.j2 + dest: "{{ build_dir }}/vrrp6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False'))| bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning VRRP configuration for os6" + dellemc.os6.os6_config: + src: os6_vrrp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j2 b/ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j2 new file mode 100644 index 00000000..03cb3b80 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j2 @@ -0,0 +1,72 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{######################################### +Purpose: +Configure VRRP Interface on OS6 Devices +Variable file example: +--- +# VLAN Interface +os6_vrrp: + vlan 4: + - vrrp_group_id: 4 + state: present + description: "Interface-vrrp4" + virtual_address: 10.2.0.1 + enable: true + priority: 120 + preempt: false +####################################### +#} +{% if os6_vrrp is defined and os6_vrrp %} +{% for key in os6_vrrp.keys() %} + {% set vrrp_vars = os6_vrrp[key] %} + {% if vrrp_vars %} +interface {{ key }} + {% for group in vrrp_vars %} + {% if group.vrrp_group_id is defined and group.vrrp_group_id %} + {% if group.state is defined and group.state == "absent" %} + {% if group.enable is defined and not group.enable %} +no vrrp {{ group.vrrp_group_id }} mode + {% endif %} +no vrrp {{ group.vrrp_group_id }} + {% else %} +vrrp {{ group.vrrp_group_id }} + {% if group.virtual_address is defined %} + {% if group.virtual_address %} +vrrp {{ group.vrrp_group_id }} ip {{ group.virtual_address }} + {% endif %} + {% endif %} + {% if group.description is defined %} + {% if group.description %} +vrrp {{ group.vrrp_group_id }} description {{ group.description }} + {% else %} +no vrrp {{ group.vrrp_group_id }} description + {% endif %} + {% endif %} + {% if group.preempt is defined %} + {% if group.preempt %} +vrrp {{ group.vrrp_group_id }} preempt + {% else %} +no vrrp {{ group.vrrp_group_id }} preempt + {% endif %} + {% endif %} + {% if group.enable is defined %} + {% if group.enable %} +vrrp {{ group.vrrp_group_id }} mode + {% else %} +no vrrp {{ group.vrrp_group_id }} mode + {% endif %} + {% endif %} + {% if group.priority is defined %} + {% if group.priority %} +vrrp {{ group.vrrp_group_id }} priority {{ group.priority }} + {% else %} +no vrrp {{ group.vrrp_group_id }} priority + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +exit + {% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml new file mode 100644 index 00000000..b20d3376 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml @@ -0,0 +1,14 @@ +--- +# vars file for dellemc.os6.os6_vrrp, +# below gives a sample configuration +# Sample variables for OS6 device + +os6_vrrp: + vlan 4: + - vrrp_group_id: 4 + state: present + description: "Interface-vrrp4" + virtual_address: 10.2.0.1 + enable: true + priority: 120 + preempt: false \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml new file mode 100644 index 00000000..660d49b3 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_vrrp diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml new file mode 100644 index 00000000..9eb17b5d --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_vrrp diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/README.md b/ansible_collections/dellemc/os6/roles/os6_xstp/README.md new file mode 100644 index 00000000..38adc0f3 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/README.md @@ -0,0 +1,117 @@ +# xSTP role + +This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6. + +The xSTP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value +- `os6_xstp` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**hostname keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|----------------------| +| ``type`` | string (required) | Configures the type of spanning-tree mode specified that can vary according to the OS device; include RSTP, rapid-PVST, and MST | os6 | +| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os6 | +| ``stp`` | dictionary | Configures simple spanning-tree protocol (see ``stp.* keys``) | os6 | +| ``stp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os6 | +| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os6 | +| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os6 | +| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os6 | +| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os6 | +| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os6 | +| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os6 | +| ``vlan.state`` | string: absent, present\* | Deletes the configured PVST VLAN with ID if set to absent | os6 | +| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os6 | +| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os6 | +| ``mstp_instances.number`` | integer | Configures the multiple spanning-tree instance number | os6 | +| ``mstp_instances.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to the instance number | os6 | +| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os6 | +| ``mstp_instances.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os6 | +| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os6 | +| ``intf ``| dictionary | Configures the interface name (see ``intf..*``) | os6 | +| ``intf..edge_port`` | boolean: true,false | Enables port fast at the interface level if set to true | os6 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os6_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path. +It writes a simple playbook that only references the *os6_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP. + +**Sample hosts file** + + switch1 ansible_host= + +**Sample host_vars/switch1** + + hostname: switch1 + ansible_become: yes + ansible_become_method: enable + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os6.os6 + build_dir: ../temp/temp_os6 + + +**Sample vars/main.yml** + + os6_xstp: + type: stp + enable: true + stp: + bridge_priority: 4096 + pvst: + vlan: + - range_or_id: 10 + bridge_priority: 4096 + state: present + mstp: + mstp_instances: + - number: 1 + vlans: 10,12 + bridge_priority: 4096 + vlans_state: present + intf: + Fo4/0/1: + edge_port: true + +**Simple playbook to setup system — switch1.yml** + + - hosts: switch1 + roles: + - dellemc.os6.os6_xstp + +**Run** + + ansible-playbook -i hosts switch1.yml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml new file mode 100644 index 00000000..92da22de --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os6.os6_xstp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml new file mode 100644 index 00000000..03d5fa49 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os6.os6_xstp \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml new file mode 100644 index 00000000..dd1200be --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os6_xstp role facilitates the configuration of STP attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os6 + + galaxy_tags: + - networking + - dell + - dellemc + - emc + - os6 diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml new file mode 100644 index 00000000..7fe379cf --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os6 + + - name: "Generating xSTP configuration for os6" + template: + src: os6_xstp.j2 + dest: "{{ build_dir }}/xstp6_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool) +# notify: save config os6 + register: generate_output + + - name: "Provisioning xSTP configuration for os6" + dellemc.os6.os6_config: + src: os6_xstp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") +# notify: save config os6 + register: output \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2 b/ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2 new file mode 100644 index 00000000..2c6f482f --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2 @@ -0,0 +1,129 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################################## +PURPOSE: Configure xSTP commands for os6 devices + +os6_xstp: + type: stp + enable: true + stp: + bridge_priority: 4096 + pvst: + vlan: + - range_or_id: 10 + bridge_priority: 4096 + state: present + mstp: + mstp_instances: + - number: 1 + vlans: 10,12 + vlans_state: present + bridge_priority: 4096 + intf: + Te1/1/8: + edge_port: true + +#####################################################} +{% if os6_xstp is defined and os6_xstp %} +{% set xstp_vars = os6_xstp %} +{% if xstp_vars.type is defined and xstp_vars.type %} + {% if xstp_vars.enable is defined %} + {% if xstp_vars.enable %} +spanning-tree mode {{ xstp_vars.type }} + {% else %} +no spanning-tree + {% endif %} + {% endif %} +{% endif %} +{% if xstp_vars.stp is defined and xstp_vars.stp %} + {% set val = xstp_vars.stp %} + {% if val.bridge_priority is defined %} + {% if val.bridge_priority == 0 or val.bridge_priority %} +spanning-tree priority {{ val.bridge_priority }} + {% else %} +no spanning-tree priority + {% endif %} + {% endif %} +{% endif %} + +{% if xstp_vars.pvst is defined and xstp_vars.pvst %} + {% set val = xstp_vars.pvst %} + {% if val.vlan is defined and val.vlan %} + {% for vlan in val.vlan %} + {% if vlan.range_or_id is defined and vlan.range_or_id %} + {% if "-" in (vlan.range_or_id|string) %} + {% set vlan_start_end = (vlan.range_or_id|string).split("-") %} + {% set vlans = [] %} + {% for id in range(vlan_start_end[0]|int,vlan_start_end[1]|int+1) %} + {{ vlans.append(id) }} + {% endfor %} + {% else %} + {% set vlans = (vlan.range_or_id|string).split(",") %} + {% endif %} + {% for vlanid in vlans %} + {% if vlan.state is defined and vlan.state == "absent" %} + {% if vlan.bridge_priority is defined %} + {% if not vlan.bridge_priority %} +no spanning-tree vlan {{ vlanid}} priority + {% endif %} + {% endif %} + {% else %} + {% if vlan.bridge_priority is defined %} + {% if vlan.bridge_priority == 0 or vlan.bridge_priority %} +spanning-tree vlan {{ vlanid }} priority {{ vlan.bridge_priority }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} +{% if xstp_vars.mstp is defined and xstp_vars.mstp %} + {% set val = xstp_vars.mstp %} + {% if val.mstp_instances is defined and val.mstp_instances %} + {% for instance in val.mstp_instances %} + {% if instance.number is defined and instance.number %} + {% if instance.bridge_priority is defined %} + {% if instance.bridge_priority == 0 or instance.bridge_priority %} +spanning-tree mst {{ instance.number }} priority {{ instance.bridge_priority }} + {% else %} +no spanning-tree mst {{ instance.number }} priority + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if val.mstp_instances is defined and val.mstp_instances %} +spanning-tree mst configuration + {% for instance in val.mstp_instances %} + {% if instance.number is defined and instance.number %} + {% if instance.vlans is defined and instance.vlans %} + {% set vlans = (instance.vlans|string).split(",") %} + {% for vlan in vlans %} + {% if instance.vlans_state is defined and instance.vlans_state == "absent" %} +instance {{ instance.number }} remove vlan {{ vlan }} + {% else %} +instance {{ instance.number }} add vlan {{ vlan }} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} +exit + {% endif %} +{% endif %} +{% if xstp_vars.intf is defined and xstp_vars.intf %} + {% for intr in xstp_vars.intf.keys() %} + {% set intf_vars = xstp_vars.intf[intr] %} +interface {{ intr }} + {% if intf_vars.edge_port is defined %} + {% if not intf_vars.edge_port %} +no spanning-tree portfast + {% else %} +spanning-tree portfast + {% endif %} +exit + {% endif %} + {% endfor %} +{% endif %} +{% endif %} diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml new file mode 100644 index 00000000..2980eb65 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml @@ -0,0 +1,6 @@ +switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6" +switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6" + +[os6switches] +switch1 +switch2 \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml new file mode 100644 index 00000000..5d11b69a --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml @@ -0,0 +1,24 @@ +--- +# vars file for dellemc.os6.os6_xstp, +# below gives a sample configuration +# Sample variables for OS6 device + +os6_xstp: + type: stp + enable: true + stp: + bridge_priority: 4096 + pvst: + vlan: + - range_or_id: 10 + bridge_priority: 4096 + state: present + mstp: + mstp_instances: + - number: 1 + vlans: 10,12 + bridge_priority: 4096 + vlans_state: present + intf: + Te1/0/5: + edge_port: true \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml new file mode 100644 index 00000000..4efc0f33 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: os6switches + connection: network_cli + roles: + - dellemc.os6.os6_xstp diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml new file mode 100644 index 00000000..bd62f2a5 --- /dev/null +++ b/ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os6.os6_xstp diff --git a/ansible_collections/dellemc/os6/tests/.gitignore b/ansible_collections/dellemc/os6/tests/.gitignore new file mode 100644 index 00000000..ea1472ec --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/.gitignore @@ -0,0 +1 @@ +output/ diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml new file mode 100644 index 00000000..4cf68f17 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml @@ -0,0 +1,13 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ item }}" + with_items: "{{ test_items }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml new file mode 100644 index 00000000..d4898c29 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml new file mode 100644 index 00000000..95770c6a --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml @@ -0,0 +1,20 @@ +--- +- debug: msg="START cli/bad_operator.yaml" + +- name: test bad operator + os6_command: + commands: + - show version + - show interfaces GigabitEthernet 1/0/1 + wait_for: + - "result[0] contains 'Description: Foo'" + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- debug: msg="END cli/bad_operator.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml new file mode 100644 index 00000000..dd0f7a78 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml @@ -0,0 +1,20 @@ +--- +- debug: msg="START cli/contains.yaml" + +- name: test contains operator + os6_command: + commands: + - show version + - show interfaces GigabitEthernet 1/0/1 + wait_for: + - "result[0] contains 5" + - "result[1] contains Access" + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- debug: msg="END cli/contains.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml new file mode 100644 index 00000000..493196df --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml @@ -0,0 +1,28 @@ +--- +- debug: msg="START cli/invalid.yaml" + +- name: run invalid command + os6_command: + commands: ['show foo'] + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed" + +- name: run commands that include invalid command + os6_command: + commands: + - show version + - show foo + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed" + +- debug: msg="END cli/invalid.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml new file mode 100644 index 00000000..8a87d5da --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml @@ -0,0 +1,29 @@ +--- +- debug: msg="START cli/output.yaml" + +- name: get output for single command + os6_command: + commands: ['show version'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for multiple commands + os6_command: + commands: + - show version + - show interfaces + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + - "result.stdout | length == 2" + +- debug: msg="END cli/output.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml new file mode 100644 index 00000000..f1ea515d --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml @@ -0,0 +1,19 @@ +--- +- debug: msg="START cli/timeout.yaml" + +- name: test bad condition + os6_command: + commands: + - show version + wait_for: + - "result[0] contains bad_value_string" + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- debug: msg="END cli/timeout.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml new file mode 100644 index 00000000..4cf68f17 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml @@ -0,0 +1,13 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ item }}" + with_items: "{{ test_items }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml new file mode 100644 index 00000000..d4898c29 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml new file mode 100644 index 00000000..20d81a3d --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml @@ -0,0 +1,53 @@ +--- +- debug: msg="START cli/backup.yaml" + +- name: setup + os6_config: + commands: + - no interface loopback 62 + provider: "{{ cli }}" + ignore_errors: yes + +- name: collect any backup files + find: + paths: "{{ role_path }}/backup" + pattern: "{{ inventory_hostname }}_config*" + register: backup_files + delegate_to: localhost + +- name: delete backup files + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ backup_files.files }}" + +- name: configure device with config + os6_config: + src: basic/config.j2 + backup: yes + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "result.updates is defined" + +- name: collect any backup files + find: + paths: "{{ role_path }}/backup" + pattern: "{{ inventory_hostname }}_config*" + register: backup_files + delegate_to: localhost + +- assert: + that: + - "backup_files.files is defined" + +- name: teardown + os6_config: + commands: + - no interface loopback 62 + provider: "{{ cli }}" + +- debug: msg="END cli/backup.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml new file mode 100644 index 00000000..f4b1d0b6 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml @@ -0,0 +1,38 @@ +--- +- debug: msg="START cli/basic.yaml" + +- name: setup + os6_config: + commands: + - no interface loopback 62 + provider: "{{ cli }}" + ignore_errors: yes + +- name: configure device with config + os6_config: + src: basic/config.j2 + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "result.updates is defined" + +- name: check device with config + os6_config: + src: basic/config.j2 + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.updates is defined" + +- name: teardown + os6_config: + commands: + - no interface loopback 62 + provider: "{{ cli }}" +- debug: msg="END cli/basic.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml new file mode 100644 index 00000000..9969a951 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml @@ -0,0 +1,43 @@ +--- +- debug: msg="START cli/defaults.yaml" + +- name: setup + os6_config: + commands: + - no interface loopback 63 + provider: "{{ cli }}" + ignore_errors: yes + +- name: configure device with defaults included + os6_config: + src: defaults/config.j2 + provider: "{{ cli }}" + register: result + +- debug: var=result + +- assert: + that: + - "result.changed == true" + - "result.updates is defined" + +- name: check device with defaults included + os6_config: + src: defaults/config.j2 + provider: "{{ cli }}" + register: result + +- debug: var=result + +- assert: + that: + - "result.changed == false" + - "result.updates is defined" + +- name: teardown + os6_config: + commands: + - no interface loopback 63 + provider: "{{ cli }}" + +- debug: msg="END cli/defaults.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml new file mode 100644 index 00000000..37c2c4b2 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml @@ -0,0 +1,41 @@ +--- +- debug: msg="START cli/force.yaml" + +- name: setup + os6_config: + commands: + - interface loopback 62 + provider: "{{ cli }}" + ignore_errors: yes + +- name: configure device with config + os6_config: + src: basic/config.j2 + provider: "{{ cli }}" + match: none + register: result + +- assert: + that: + - "result.changed == true" + - "result.updates is defined" + +- name: check device with config + os6_config: + src: basic/config.j2 + provider: "{{ cli }}" + match: none + register: result + +- assert: + that: + - "result.changed == true" + - "result.updates is defined" + +- name: teardown + os6_config: + commands: + - no interface loopback 62 + provider: "{{ cli }}" + +- debug: msg="END cli/force.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml new file mode 100644 index 00000000..b978e8b3 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml @@ -0,0 +1,42 @@ +--- +- debug: msg="START cli/sublevel.yaml" + +- name: setup test + os6_config: + lines: + - 'no ip access-list test' + provider: "{{ cli }}" + match: none + +- name: configure sub level command + os6_config: + lines: ['1000 permit every log'] + parents: ['ip access-list test'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'ip access-list test' in result.updates" + - "'1000 permit every log' in result.updates" + +- name: configure sub level command idempotent check + os6_config: + lines: ['1000 permit every log'] + parents: ['ip access-list test'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os6_config: + lines: + - 'no ip access-list test' + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/sublevel.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml new file mode 100644 index 00000000..db47989f --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml @@ -0,0 +1,62 @@ +--- +- debug: msg="START cli/sublevel_block.yaml" + +- name: setup + os6_config: + lines: + - permit ip 1.1.1.1 0.0.0.0 any log + - permit ip 2.2.2.2 0.0.0.0 any log + - permit ip 3.3.3.3 0.0.0.0 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + after: ['exit'] + provider: "{{ cli }}" + match: none + +- name: configure sub level command using block resplace + os6_config: + lines: + - 1000 permit ip 1.1.1.1 0.0.0.0 any log + - 1010 permit ip 2.2.2.2 0.0.0.0 any log + - 1020 permit ip 3.3.3.3 0.0.0.0 any log + - 1030 permit ip 4.4.4.4 0.0.0.0 any log + parents: ['ip access-list test'] + replace: block + after: ['exit'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'ip access-list test' in result.updates" + - "'1000 permit ip 1.1.1.1 0.0.0.0 any log' in result.updates" + - "'1010 permit ip 2.2.2.2 0.0.0.0 any log' in result.updates" + - "'1020 permit ip 3.3.3.3 0.0.0.0 any log' in result.updates" + - "'1030 permit ip 4.4.4.4 0.0.0.0 any log' in result.updates" + +- name: check sub level command using block replace + os6_config: + lines: + - 1000 permit ip 1.1.1.1 0.0.0.0 any log + - 1010 permit ip 2.2.2.2 0.0.0.0 any log + - 1020 permit ip 3.3.3.3 0.0.0.0 any log + - 1030 permit ip 4.4.4.4 0.0.0.0 any log + parents: ['ip access-list test'] + replace: block + after: ['exit'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os6_config: + lines: + - no ip access-list test + match: none + provider: "{{ cli }}" + +- debug: msg="END cli/sublevel_block.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml new file mode 100644 index 00000000..bafe24c5 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml @@ -0,0 +1,66 @@ +--- +- debug: msg="START cli/sublevel_exact.yaml" + +- name: setup + os6_config: + lines: + - permit ip 1.1.1.1 0.0.0.0 any log + - permit ip 2.2.2.2 0.0.0.0 any log + - permit ip 3.3.3.3 0.0.0.0 any log + - permit ip 4.4.4.4 0.0.0.0 any log + - permit ip 5.5.5.5 0.0.0.0 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + after: ['exit'] + provider: "{{ cli }}" + match: none + +- name: configure sub level command using exact match + os6_config: + lines: + - 1000 permit ip 1.1.1.1 0.0.0.0 any log + - 1010 permit ip 2.2.2.2 0.0.0.0 any log + - 1020 permit ip 3.3.3.3 0.0.0.0 any log + - 1030 permit ip 4.4.4.4 0.0.0.0 any log + parents: ['ip access-list test'] + after: ['exit'] + match: exact + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'ip access-list test' in result.updates" + - "'1000 permit ip 1.1.1.1 0.0.0.0 any log' in result.updates" + - "'1010 permit ip 2.2.2.2 0.0.0.0 any log' in result.updates" + - "'1020 permit ip 3.3.3.3 0.0.0.0 any log' in result.updates" + - "'1030 permit ip 4.4.4.4 0.0.0.0 any log' in result.updates" + - "'1040 permit ip 5.5.5.5 0.0.0.0 any log' not in result.updates" + +- name: check sub level command using exact match + os6_config: + lines: + - 1000 permit ip 1.1.1.1 0.0.0.0 any log + - 1010 permit ip 2.2.2.2 0.0.0.0 any log + - 1020 permit ip 3.3.3.3 0.0.0.0 any log + - 1030 permit ip 4.4.4.4 0.0.0.0 any log + - 1040 permit ip 5.5.5.5 0.0.0.0 any log + parents: ['ip access-list test'] + after: ['exit'] + match: exact + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os6_config: + lines: + - no ip access-list test + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/sublevel_exact.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml new file mode 100644 index 00000000..51049e5e --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml @@ -0,0 +1,63 @@ +--- +- debug: msg="START cli/sublevel_strict.yaml" + +- name: setup + os6_config: + lines: + - permit ip 1.1.1.1 0.0.0.0 any log + - permit ip 2.2.2.2 0.0.0.0 any log + - permit ip 3.3.3.3 0.0.0.0 any log + - permit ip 4.4.4.4 0.0.0.0 any log + - permit ip 5.5.5.5 0.0.0.0 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + after: ['exit'] + provider: "{{ cli }}" + match: none + +- name: configure sub level command using strict match + os6_config: + lines: + - 1000 permit ip 1.1.1.1 0.0.0.0 any log + - 1010 permit ip 2.2.2.2 0.0.0.0 any log + - 1020 permit ip 3.3.3.3 0.0.0.0 any log + - 1030 permit ip 4.4.4.4 0.0.0.0 any log + parents: ['ip access-list test'] + match: strict + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: check sub level command using strict match + os6_config: + lines: + - 1000 permit ip 1.1.1.1 0.0.0.0 any log + - 1010 permit ip 3.3.3.3 0.0.0.0 any log + - 1020 permit ip 2.2.2.2 0.0.0.0 any log + parents: ['ip access-list test'] + after: ['exit'] + match: strict + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'ip access-list test' in result.updates" + - "'1000 permit ip 1.1.1.1 0.0.0.0 any log' not in result.updates" + - "'1020 permit ip 2.2.2.2 0.0.0.0 any log' in result.updates" + - "'1010 permit ip 3.3.3.3 0.0.0.0 any log' in result.updates" + - "'1030 permit ip 4.4.4.4 0.0.0.0 any log' not in result.updates" + - "'1040 permit ip 5.5.5.5 0.0.0.0 any log' not in result.updates" + +- name: teardown + os6_config: + lines: + - no ip access-list test + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/sublevel_strict.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml new file mode 100644 index 00000000..36cdb9a4 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml @@ -0,0 +1,37 @@ +--- +- debug: msg="START cli/toplevel.yaml" + +- name: setup + os6_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + match: none + +- name: configure top level command + os6_config: + lines: ['hostname foo'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + +- name: configure top level command idempotent check + os6_config: + lines: ['hostname "foo"'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os6_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/toplevel.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml new file mode 100644 index 00000000..287bdb9a --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml @@ -0,0 +1,44 @@ +--- +- debug: msg="START cli/toplevel_after.yaml" + +- name: setup + os6_config: + lines: + - "snmp-server contact ansible" + - "hostname {{ inventory_hostname }}" + provider: "{{ cli }}" + match: none + +- name: configure top level command with before + os6_config: + lines: ['hostname foo'] + after: ['snmp-server contact bar'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + - "'snmp-server contact bar' in result.updates" + +- name: configure top level command with before idempotent check + os6_config: + lines: ['hostname "foo"'] + after: ['snmp-server contact foo'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os6_config: + lines: + - "no snmp-server contact" + - "hostname {{ inventory_hostname }}" + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/toplevel_after.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml new file mode 100644 index 00000000..d058abfb --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml @@ -0,0 +1,44 @@ +--- +- debug: msg="START cli/toplevel_before.yaml" + +- name: setup + os6_config: + lines: + - "snmp-server contact ansible" + - "hostname {{ inventory_hostname }}" + provider: "{{ cli }}" + match: none + +- name: configure top level command with before + os6_config: + lines: ['hostname foo'] + before: ['snmp-server contact bar'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + - "'snmp-server contact bar' in result.updates" + +- name: configure top level command with before idempotent check + os6_config: + lines: ['hostname "foo"'] + before: ['snmp-server contact foo'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os6_config: + lines: + - "no snmp-server contact" + - "hostname {{ inventory_hostname }}" + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/toplevel_before.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml new file mode 100644 index 00000000..d529e8d1 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml @@ -0,0 +1,39 @@ +--- +- debug: msg="START cli/toplevel_nonidempotent.yaml" + +- name: setup + os6_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + match: none + +- name: configure top level command + os6_config: + lines: ['hostname foo'] + provider: "{{ cli }}" + match: strict + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + +- name: configure top level command idempotent check + os6_config: + lines: ['hostname foo'] + provider: "{{ cli }}" + match: strict + register: result + +- assert: + that: + - "result.changed == true" + +- name: teardown + os6_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/toplevel_nonidempotent.yaml" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml new file mode 100644 index 00000000..7152815d --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml @@ -0,0 +1,14 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ item }}" + with_items: "{{ test_items }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml new file mode 100644 index 00000000..d4898c29 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml new file mode 100644 index 00000000..1834f7b1 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml @@ -0,0 +1,42 @@ +--- +- debug: msg="START cli/facts.yaml" + +- name: test all facts + os6_facts: + gather_subset: + - all + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts is defined" + +- name: test all facts except hardware + os6_facts: + gather_subset: + - "!hardware" + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts.ansible_net_memfree_mb is not defined" + +- name: test interface facts + os6_facts: + gather_subset: + - interfaces + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts.ansible_net_interfaces is defined" + - "result.ansible_facts.ansible_net_memfree_mb is not defined" + + +- debug: msg="END cli/facts.yaml" diff --git a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..6945f1c2 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt @@ -0,0 +1,4 @@ +plugins/action/os6.py action-plugin-docs +plugins/modules/os6_config.py validate-modules:parameter-list-no-elements +plugins/modules/os6_facts.py validate-modules:parameter-list-no-elements +plugins/modules/os6_command.py validate-modules:parameter-list-no-elements diff --git a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..91049edc --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt @@ -0,0 +1,4 @@ +plugins/action/os6.py action-plugin-docs +plugins/modules/os6_config.py validate-modules:parameter-list-no-elements +plugins/modules/os6_facts.py validate-modules:parameter-list-no-elements +plugins/modules/os6_command.py validate-modules:parameter-list-no-elements \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..99f52d2e --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt @@ -0,0 +1 @@ +plugins/action/os6.py action-plugin-docs \ No newline at end of file diff --git a/ansible_collections/dellemc/os6/tests/sanity/requirements.txt b/ansible_collections/dellemc/os6/tests/sanity/requirements.txt new file mode 100644 index 00000000..3e3a9669 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/sanity/requirements.txt @@ -0,0 +1,4 @@ +packaging # needed for update-bundled and changelog +sphinx ; python_version >= '3.5' # docs build requires python 3+ +sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+ +straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+ diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/__init__.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg new file mode 100644 index 00000000..a8ed721c --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg @@ -0,0 +1,16 @@ +! +hostname router +exit +! +interface Te1/0/1 +description "test_string" +exit +! +interface Te1/0/2 +no shutdown +exit +! +interface Te1/0/9 +switchport access vlan 2 +exit + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg new file mode 100644 index 00000000..70d5f665 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg @@ -0,0 +1,7 @@ +! +hostname foo +exit +! +interface Te1/0/2 +shutdown +exit diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces new file mode 100644 index 00000000..f6aede90 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces @@ -0,0 +1,41 @@ +Interface Name................................. Te1/0/1 +SOC Hardware Info.............................. BCM56842_A1 +Link Status.................................... Up /None +Keepalive Enabled.............................. FALSE +Err-disable Cause.............................. None +VLAN Membership Mode........................... Trunk Mode +VLAN Membership................................ (1),2-4096 +MTU Size....................................... 1518 +Port Mode [Duplex]............................. Full +Port Speed..................................... 1000 +Link Debounce Flaps............................ 0 +Auto-Negotation Status......................... Auto +Burned MAC Address............................. F8B1.565B.615E +L3 MAC Address................................. F8B1.565B.615F +Sample Load Interval........................... 300 +Received Input Rate Bits/Sec................... 0 +Received Input Rate Packets/Sec................ 0 +Transmitted Input Rate Bits/Sec................ 440 +Transmitted Input Rate Packets/Sec : .......... 0 +Total Packets Received Without Errors.......... 0 +Unicast Packets Received....................... 0 +Multicast Packets Received..................... 0 +Broadcast Packets Received..................... 0 +Total Packets Received with MAC Errors......... 0 +Jabbers Received............................... 0 +Fragments/Undersize Received................... 0 +Alignment Errors............................... 0 +FCS Errors..................................... 0 +Overruns....................................... 0 +Total Received Packets Not Forwarded........... 0 +Total Packets Transmitted Successfully......... 381302 +Unicast Packets Transmitted.................... 1 +Multicast Packets Transmitted.................. 351645 +Broadcast Packets Transmitted.................. 29656 +Transmit Packets Discarded..................... 0 +Total Transmit Errors.......................... 0 +Total Transmit Packets Discarded............... 0 +Single Collision Frames........................ 0 +Multiple Collision Frames...................... 0 +Excessive Collision Frames..................... 0 + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status new file mode 100644 index 00000000..28defda6 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status @@ -0,0 +1,48 @@ +Port Description Duplex Speed Neg Link Flow M VLAN + State Ctrl +--------- --------------- ------ ------- ---- ------ ----- -- ------------------- +Te1/0/1 connected to sp Full 1000 Auto Up Off T (1),2-4096 +Te1/0/2 to_NIC_1 Full 1000 Auto Up Off A 99 +Te1/0/3 N/A Unknown Auto Down Off A 1 +Te1/0/4 N/A Unknown Auto Down Off A 1 +Te1/0/5 N/A Unknown Auto Down Off A 1 +Te1/0/6 N/A Unknown Auto Down Off A 1 +Te1/0/7 N/A Unknown Auto Down Off A 1 +Te1/0/8 N/A Unknown Auto Down Off A 1 +Te1/0/9 N/A Unknown Auto Down Off A 2 +Te1/0/10 N/A Unknown Auto Down Off A 1 +Te1/0/11 N/A Unknown Auto Down Off A 1 +Te1/0/12 N/A Unknown Auto Down Off A 1 +Te1/0/13 N/A Unknown Auto Down Off A 1 +Te1/0/14 N/A Unknown Auto Down Off A 1 +Te1/0/15 N/A Unknown Auto Down Off A 1 +Te1/0/16 N/A Unknown Auto Down Off A 1 +Te1/0/17 N/A Unknown Auto Down Off A 1 +Te1/0/18 N/A Unknown Auto Down Off A 1 +Te1/0/19 N/A Unknown Auto Down Off A 1 +Te1/0/20 N/A Unknown Auto Down Off A 1 +Te1/0/21 N/A Unknown Auto Down Off A 1 +Te1/0/22 N/A Unknown Auto Down Off A 100 +Te1/0/23 N/A Unknown Auto Down Off A 1 +Te1/0/24 N/A Unknown Auto Down Off A 1 +Fo1/1/1 N/A N/A N/A Detach N/A +Fo1/1/2 Full 40000 Off Down Off A 1 +Te1/1/1 N/A N/A N/A Detach N/A +Te1/1/2 N/A N/A N/A Detach N/A +Te1/1/3 N/A N/A N/A Detach N/A +Te1/1/4 N/A N/A N/A Detach N/A +Te1/1/5 N/A N/A N/A Detach N/A +Te1/1/6 N/A N/A N/A Detach N/A +Te1/1/7 N/A N/A N/A Detach N/A +Te1/1/8 N/A N/A N/A Detach N/A + +Oob Type Link + State +--- ------------------------------ ----- +oob Out-Of-Band Up + + +Port Description Link M VLAN +Channel State +------- ------------------------------ ------- -- ------------------- + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties new file mode 100644 index 00000000..976f45a8 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties @@ -0,0 +1,6 @@ +Yes: Dell Qualified No: Not Qualified +N/A : Not Applicable +Port Type Media Serial Number Dell Qualified +--------- ------- --------------------- --------------------- -------------- + + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int new file mode 100644 index 00000000..043ee2cc --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int @@ -0,0 +1,15 @@ +Default Gateway................................ 0.0.0.0 +L3 MAC Address................................. F8B1.565B.615F + +Routing Interfaces: + +Interface State IP Address IP Mask Method +---------- ----- --------------- --------------- ------- +Vl1 Down 0.0.0.0 0.0.0.0 None +Vl2 Up 0.0.0.0 0.0.0.0 DHCP +Vl99 Up 10.99.1.2 255.255.0.0 Manual +Vl100 Up 3.3.3.3 255.255.255.0 Manual +Vl999 Up 10.250.1.2 255.255.255.0 Manual +Vl1010 Up 10.1.1.1 255.255.255.0 Manual +Vl1681 Up 192.168.100.1 255.255.255.0 Manual + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp new file mode 100644 index 00000000..be89c415 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp @@ -0,0 +1,11 @@ +LLDP Global Configuration + + +Transmit Interval............................ 30 seconds + +Transmit Hold Multiplier..................... 4 + +Reinit Delay................................. 2 seconds + +Notification Interval........................ 5 seconds + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all new file mode 100644 index 00000000..2a22f444 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all @@ -0,0 +1,10 @@ +LLDP Remote Device Summary + +Local +Interface RemID Chassis ID Port ID System Name +--------- ------- ------------------- ----------------- ----------------- +Te1/0/5 14 F8:B1:56:70:49:38 Gi1/0/5 MAA-N2048-6884 +Te1/0/6 15 F8:B1:56:70:49:38 Gi1/0/6 MAA-N2048-6884 + + + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu new file mode 100644 index 00000000..42657693 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu @@ -0,0 +1,3 @@ +Total Memory................................... 1723232 KBytes +Available Memory Space......................... 638144 KBytes + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config new file mode 100644 index 00000000..b589c296 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config @@ -0,0 +1,124 @@ +!Current Configuration: +!System Description "Dell Networking N4064F, 6.3.3.10, Linux 3.7.10-e54850e7" +!System Software Version 6.3.3.10 +!Cut-through mode is configured as disabled +! +configure +hostname "os6" +slot 1/0 5 ! Dell Networking N4064F +slot 1/1 8 ! Dell 10GBase-T Card +stack +member 1 4 ! N4064F +exit +interface out-of-band +ip address 10.16.148.73 255.255.0.0 10.16.144.254 +exit +no logging console +interface vlan 1 +ip address dhcp +exit +no passwords min-length +username "admin" password 21232f297a57a5a743894a0e4a801fc3 privilege 1 encrypted +line telnet +exec-timeout 0 +exit +ip ssh server +application install SupportAssist auto-restart start-on-boot +! +interface Te1/0/1 +no switchport port-security violation protect +exit +! +interface Te1/0/2 +no switchport port-security violation protect +exit +! +interface Te1/0/3 +no switchport port-security violation protect +exit +! +interface Te1/0/4 +no switchport port-security violation protect +exit +! +interface Te1/0/5 +no switchport port-security violation protect +exit +! +interface Te1/0/6 +no switchport port-security violation protect +exit +! +interface Te1/0/7 +no switchport port-security violation protect +exit +! +interface Te1/0/8 +no switchport port-security violation protect +exit +! +interface Te1/0/9 +no switchport port-security violation protect +exit +! +interface Te1/0/10 +no switchport port-security violation protect +exit +! +interface Te1/0/11 +no switchport port-security violation protect +exit +! +interface port-channel 1 +no switchport port-security violation protect +exit +! +interface port-channel 2 +no switchport port-security violation protect +exit +! +interface port-channel 3 +no switchport port-security violation protect +exit +! +interface port-channel 4 +no switchport port-security violation protect +exit +! +interface port-channel 5 +no switchport port-security violation protect +exit +! +snmp-server enable traps dvmrp +snmp-server enable traps pim +no snmp-server enable traps vrrp +no snmp-server enable traps acl +snmp-server enable traps captive-portal +snmp-server enable traps captive-portal client-auth-failure +snmp-server enable traps captive-portal client-connect +snmp-server enable traps captive-portal client-db-full +snmp-server enable traps captive-portal client-disconnect +router bgp 11 +bgp router-id 1.1.1.1 +maximum-paths 2 +maximum-paths ibgp 2 +network 101.1.2.0 mask 255.255.255.0 +template peer MUX_HNV_ACCESS +remote-as 64918 +exit +neighbor 10.10.234.16 remote-as 64818 +neighbor 10.10.234.16 default-originate +neighbor 10.10.234.16 timers 2 5 +neighbor 2001:4898:5808:ffa2::1 remote-as 64818 +neighbor 2001:4898:5808:ffa2::1 default-originate +neighbor 2001:4898:5808:ffa2::1 timers 2 4 +address-family ipv6 +network 2001:4898:5808:ffa0::/126 +redistribute connected +exit +exit +enable password c4f25f005187e9a85ad6480d3507a541 encrypted +openflow +exit +eula-consent support-assist reject +exit diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname new file mode 100644 index 00000000..2015aaf9 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname @@ -0,0 +1,3 @@ +hostname "os6_sw1" + + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version new file mode 100644 index 00000000..37c58e8b --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version @@ -0,0 +1,17 @@ +Machine Description............... Dell Networking Switch +System Model ID................... N4032 +Machine Type...................... Dell Networking N4032 +Serial Number..................... CN04G4FP282984AI0097A01 +Manufacturer...................... 0xbc00 +Burned In MAC Address............. F8B1.565B.615C +System Object ID.................. 1.3.6.1.4.1.674.10895.3042 +CPU Version....................... XLP308H-B2 +SOC Version....................... BCM56842_A1 +HW Version........................ 3 +CPLD Version...................... 17 + +unit active backup current-active next-active +---- ----------- ----------- -------------- -------------- +1 6.3.3.7 6.3.2.7 6.3.3.7 6.3.3.7 + + diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py new file mode 100644 index 00000000..4f8cb8c9 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py @@ -0,0 +1,88 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import json + +from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase + + +fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') +fixture_data = {} + + +def load_fixture(name): + path = os.path.join(fixture_path, name) + + if path in fixture_data: + return fixture_data[path] + + with open(path) as f: + data = f.read() + + try: + data = json.loads(data) + except Exception: + pass + + fixture_data[path] = data + return data + + +class TestDellos6Module(ModuleTestCase): + + def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False): + + self.load_fixtures(commands) + + if failed: + result = self.failed() + self.assertTrue(result['failed'], result) + else: + result = self.changed(changed) + self.assertEqual(result['changed'], changed, result) + + if commands is not None: + if sort: + self.assertEqual(sorted(commands), sorted(result['updates']), result['updates']) + else: + self.assertEqual(commands, result['updates'], result['updates']) + + return result + + def failed(self): + with self.assertRaises(AnsibleFailJson) as exc: + self.module.main() + + result = exc.exception.args[0] + self.assertTrue(result['failed'], result) + return result + + def changed(self, changed=False): + with self.assertRaises(AnsibleExitJson) as exc: + self.module.main() + + result = exc.exception.args[0] + self.assertEqual(result['changed'], changed, result) + return result + + def load_fixtures(self, commands=None): + pass diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py new file mode 100644 index 00000000..b1f3f23f --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py @@ -0,0 +1,108 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible.compat.tests.mock import patch +from ansible_collections.dellemc.os6.plugins.modules import os6_command +from units.modules.utils import set_module_args +from .os6_module import TestDellos6Module, load_fixture + + +class TestDellos6CommandModule(TestDellos6Module): + + module = os6_command + + def setUp(self): + super(TestDellos6CommandModule, self).setUp() + + self.mock_run_commands = patch('ansible.modules.network.os6.os6_command.run_commands') + self.run_commands = self.mock_run_commands.start() + + def tearDown(self): + super(TestDellos6CommandModule, self).tearDown() + self.mock_run_commands.stop() + + def load_fixtures(self, commands=None): + + def load_from_file(*args, **kwargs): + module, commands = args + output = list() + + for item in commands: + try: + obj = json.loads(item['command']) + command = obj['command'] + except ValueError: + command = item['command'] + filename = str(command).replace(' ', '_') + output.append(load_fixture(filename)) + return output + + self.run_commands.side_effect = load_from_file + + def test_os6_command_simple(self): + set_module_args(dict(commands=['show version'])) + result = self.execute_module() + self.assertEqual(len(result['stdout']), 1) + self.assertTrue(result['stdout'][0].startswith('Machine Description')) + + def test_os6_command_multiple(self): + set_module_args(dict(commands=['show version', 'show version'])) + result = self.execute_module() + self.assertEqual(len(result['stdout']), 2) + self.assertTrue(result['stdout'][0].startswith('Machine Description')) + + def test_os6_command_wait_for(self): + wait_for = 'result[0] contains "Machine Description"' + set_module_args(dict(commands=['show version'], wait_for=wait_for)) + self.execute_module() + + def test_os6_command_wait_for_fails(self): + wait_for = 'result[0] contains "test string"' + set_module_args(dict(commands=['show version'], wait_for=wait_for)) + self.execute_module(failed=True) + self.assertEqual(self.run_commands.call_count, 10) + + def test_os6_command_retries(self): + wait_for = 'result[0] contains "test string"' + set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2)) + self.execute_module(failed=True) + self.assertEqual(self.run_commands.call_count, 2) + + def test_os6_command_match_any(self): + wait_for = ['result[0] contains "Machine Description"', + 'result[0] contains "test string"'] + set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any')) + self.execute_module() + + def test_os6_command_match_all(self): + wait_for = ['result[0] contains "Machine Description"', + 'result[0] contains "Dell Networking"'] + set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all')) + self.execute_module() + + def test_os6_command_match_all_failure(self): + wait_for = ['result[0] contains "Machine Description"', + 'result[0] contains "test string"'] + commands = ['show version', 'show version'] + set_module_args(dict(commands=commands, wait_for=wait_for, match='all')) + self.execute_module(failed=True) diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py new file mode 100644 index 00000000..1d2f60eb --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py @@ -0,0 +1,146 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests.mock import patch +from ansible_collections.dellemc.os6.plugins.modules import os6_config +from units.modules.utils import set_module_args +from .os6_module import TestDellos6Module, load_fixture + + +class TestDellos6ConfigModule(TestDellos6Module): + + module = os6_config + + def setUp(self): + super(TestDellos6ConfigModule, self).setUp() + + self.mock_get_config = patch('ansible.modules.network.os6.os6_config.get_config') + self.get_config = self.mock_get_config.start() + + self.mock_load_config = patch('ansible.modules.network.os6.os6_config.load_config') + self.load_config = self.mock_load_config.start() + + self.mock_run_commands = patch('ansible.modules.network.os6.os6_config.run_commands') + self.run_commands = self.mock_run_commands.start() + + def tearDown(self): + super(TestDellos6ConfigModule, self).tearDown() + self.mock_get_config.stop() + self.mock_load_config.stop() + self.mock_run_commands.stop() + + def load_fixtures(self, commands=None): + config_file = 'os6_config_config.cfg' + self.get_config.return_value = load_fixture(config_file) + self.load_config.return_value = None + + def test_os6_config_unchanged(self): + src = load_fixture('os6_config_config.cfg') + set_module_args(dict(src=src)) + self.execute_module() + + def test_os6_config_src(self): + src = load_fixture('os6_config_src.cfg') + set_module_args(dict(src=src)) + commands = ['hostname foo', 'exit', 'interface Te1/0/2', 'shutdown', 'exit'] + self.execute_module(changed=True, commands=commands) + + def test_os6_config_backup(self): + set_module_args(dict(backup=True)) + result = self.execute_module() + self.assertIn('__backup__', result) + + def test_os6_config_save(self): + set_module_args(dict(save=True)) + self.execute_module(changed=True) + self.assertEqual(self.run_commands.call_count, 1) + self.assertEqual(self.get_config.call_count, 0) + self.assertEqual(self.load_config.call_count, 0) + args = self.run_commands.call_args[0][1] + self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0]) +# self.assertIn('copy running-config startup-config\r', args) + + def test_os6_config_lines_wo_parents(self): + set_module_args(dict(lines=['hostname foo'])) + commands = ['hostname foo'] + self.execute_module(changed=True, commands=commands) + + def test_os6_config_lines_w_parents(self): + set_module_args(dict(lines=['description "teest"', 'exit'], parents=['interface Te1/0/2'])) + commands = ['interface Te1/0/2', 'description "teest"', 'exit'] + self.execute_module(changed=True, commands=commands) + + def test_os6_config_before(self): + set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar'])) + commands = ['snmp-server contact bar', 'hostname foo'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os6_config_after(self): + set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar'])) + commands = ['hostname foo', 'snmp-server contact bar'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os6_config_before_after_no_change(self): + set_module_args(dict(lines=['hostname router'], + before=['snmp-server contact bar'], + after=['snmp-server location chennai'])) + self.execute_module() + + def test_os6_config_config(self): + config = 'hostname localhost' + set_module_args(dict(lines=['hostname router'], config=config)) + commands = ['hostname router'] + self.execute_module(changed=True, commands=commands) + + def test_os6_config_replace_block(self): + lines = ['description test string', 'shutdown'] + parents = ['interface Te1/0/2'] + set_module_args(dict(lines=lines, replace='block', parents=parents)) + commands = parents + lines + self.execute_module(changed=True, commands=commands) + + def test_os6_config_match_none(self): + lines = ['hostname router'] + set_module_args(dict(lines=lines, match='none')) + self.execute_module(changed=True, commands=lines) + + def test_os6_config_match_none(self): + lines = ['description test string', 'shutdown'] + parents = ['interface Te1/0/2'] + set_module_args(dict(lines=lines, parents=parents, match='none')) + commands = parents + lines + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os6_config_match_strict(self): + lines = ['description "test_string"', + 'shutdown'] + parents = ['interface Te1/0/1'] + set_module_args(dict(lines=lines, parents=parents, match='strict')) + commands = parents + ['shutdown'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os6_config_match_exact(self): + lines = ['description test_string', 'shutdown'] + parents = ['interface Te1/0/1'] + set_module_args(dict(lines=lines, parents=parents, match='exact')) + commands = parents + lines + self.execute_module(changed=True, commands=commands, sort=False) diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py new file mode 100644 index 00000000..ace3a8a8 --- /dev/null +++ b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py @@ -0,0 +1,105 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible.compat.tests.mock import patch +from units.modules.utils import set_module_args +from .os6_module import TestDellos6Module, load_fixture +from ansible_collections.dellemc.os6.plugins.modules import os6_facts + + +class TestDellos6Facts(TestDellos6Module): + + module = os6_facts + + def setUp(self): + super(TestDellos6Facts, self).setUp() + + self.mock_run_command = patch( + 'ansible.modules.network.os6.os6_facts.run_commands') + self.run_command = self.mock_run_command.start() + + def tearDown(self): + super(TestDellos6Facts, self).tearDown() + + self.mock_run_command.stop() + + def load_fixtures(self, commands=None): + + def load_from_file(*args, **kwargs): + module, commands = args + output = list() + + for item in commands: + try: + obj = json.loads(item) + command = obj['command'] + except ValueError: + command = item + if '|' in command: + command = str(command).replace('|', '') + filename = str(command).replace(' ', '_') + filename = filename.replace('/', '7') + output.append(load_fixture(filename)) + return output + + self.run_command.side_effect = load_from_file + + def test_os6_facts_gather_subset_default(self): + set_module_args(dict()) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('hardware', ansible_facts['ansible_net_gather_subset']) + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset']) + self.assertEquals('"os6_sw1"', ansible_facts['ansible_net_hostname']) + self.assertIn('Te1/0/1', ansible_facts['ansible_net_interfaces'].keys()) + self.assertEquals(1682, ansible_facts['ansible_net_memtotal_mb']) + self.assertEquals(623, ansible_facts['ansible_net_memfree_mb']) + + def test_os6_facts_gather_subset_config(self): + set_module_args({'gather_subset': 'config'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('config', ansible_facts['ansible_net_gather_subset']) + self.assertEquals('"os6_sw1"', ansible_facts['ansible_net_hostname']) + self.assertIn('ansible_net_config', ansible_facts) + + def test_os6_facts_gather_subset_hardware(self): + set_module_args({'gather_subset': 'hardware'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('hardware', ansible_facts['ansible_net_gather_subset']) + self.assertEquals(1682, ansible_facts['ansible_net_memtotal_mb']) + self.assertEquals(623, ansible_facts['ansible_net_memfree_mb']) + + def test_os6_facts_gather_subset_interfaces(self): + set_module_args({'gather_subset': 'interfaces'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset']) + self.assertIn('Te1/0/1', ansible_facts['ansible_net_interfaces'].keys()) + self.assertEquals(['Te1/0/5', 'Te1/0/6'], ansible_facts['ansible_net_neighbors'].keys()) + self.assertIn('ansible_net_interfaces', ansible_facts) diff --git a/ansible_collections/dellemc/os9/.ansible-lint b/ansible_collections/dellemc/os9/.ansible-lint new file mode 100644 index 00000000..d8c4900d --- /dev/null +++ b/ansible_collections/dellemc/os9/.ansible-lint @@ -0,0 +1,2 @@ +skip_list: + - '208' diff --git a/ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml new file mode 100644 index 00000000..6834c54a --- /dev/null +++ b/ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml @@ -0,0 +1,33 @@ +name: CI +on: +- pull_request + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - stable-2.10 + - devel + runs-on: ubuntu-latest + steps: + + - name: Check out code + uses: actions/checkout@v1 + with: + path: ansible_collections/dellemc/os9 + + - name: Set up Python 3.6 + uses: actions/setup-python@v1 + with: + python-version: 3.6 + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Install ansible_collections.ansible.netcommon + run: ansible-galaxy collection install ansible.netcommon -p ../../ + + - name: Run sanity tests + run: ansible-test sanity --docker -v --color --python 3.6 diff --git a/ansible_collections/dellemc/os9/.gitignore b/ansible_collections/dellemc/os9/.gitignore new file mode 100644 index 00000000..c6fc14ad --- /dev/null +++ b/ansible_collections/dellemc/os9/.gitignore @@ -0,0 +1,387 @@ + +# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv + +### dotenv ### +.env + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# + +### Linux ### + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### pydev ### +.pydevproject + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### WebStorm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### WebStorm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/**/sonarlint/ + +# SonarQube Plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator/ + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/ansible_collections/dellemc/os9/COPYING b/ansible_collections/dellemc/os9/COPYING new file mode 100644 index 00000000..10926e87 --- /dev/null +++ b/ansible_collections/dellemc/os9/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/ansible_collections/dellemc/os9/FILES.json b/ansible_collections/dellemc/os9/FILES.json new file mode 100644 index 00000000..de5777d5 --- /dev/null +++ b/ansible_collections/dellemc/os9/FILES.json @@ -0,0 +1,3953 @@ +{ + "files": [ + { + "format": 1, + "ftype": "dir", + "chksum_sha256": null, + "name": ".", + "chksum_type": null + }, + { + "ftype": "file", + "chksum_sha256": "0c29a1ae51505d7a5d1e7f80c5abac708f68c44c5bd96fc94f0afff2408daeca", + "name": ".ansible-lint", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/sanity", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "47b57717c6b01630d3628ebfd0288cb961d6c1ae43d050656ff40cca0c136831", + "name": "tests/sanity/ignore-2.9.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c8a4ac4bfdef88e75d6e748e35a42fb4915947dfa2b7dd788626fd829600e014", + "name": "tests/sanity/requirements.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "aa689a5caa0c4c0d15e13cc42590037dd2a70c8663d961b7d890b345cc175a99", + "name": "tests/sanity/ignore-2.10.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "aa689a5caa0c4c0d15e13cc42590037dd2a70c8663d961b7d890b345cc175a99", + "name": "tests/sanity/ignore-2.11.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_command", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_command/os9_command", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_command/os9_command/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_command/os9_command/tests/cli", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7fec66f82d7fc43d56da0eea8b53394eba52f26bf8f7059f12ea9703503b562f", + "name": "tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a1082ade2b6b3b60448649536e311691e159519934fad93cd473b334e07a01f9", + "name": "tests/integration/targets/os9_command/os9_command/tests/cli/contains", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bafdebd96db17a954899808696fa2e3d38ba09b03114638ada75f49a96acb588", + "name": "tests/integration/targets/os9_command/os9_command/tests/cli/output", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "29d2545c7c7de45bbc802b34e167797f5a9ff85cd8456ed30c2d4fe00cf80cb7", + "name": "tests/integration/targets/os9_command/os9_command/tests/cli/timeout", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cedd8e1102bc85d0a77a8e752253d2dd42276dc672b0d5eb8e51ce5011dc15a0", + "name": "tests/integration/targets/os9_command/os9_command/tests/cli/invalid", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "431660d4177c8289f53aa0487bd9195b0de7e1ed944bee9e09c665bd532cc8bb", + "name": "tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_command/os9_command/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c67b69d54f5ebc1de087ed737f6a0e4119d6f045229f64b7cbd1971e4d5eb14f", + "name": "tests/integration/targets/os9_command/os9_command/defaults/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_command/os9_command/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1", + "name": "tests/integration/targets/os9_command/os9_command/tasks/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "81ae4136ca3d879f645bc323268dd5af5a89467b0d776010965374f56ef07eb0", + "name": "tests/integration/targets/os9_command/os9_command/tasks/cli.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "tests/integration/targets/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_facts", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_facts/os9_facts", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_facts/os9_facts/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_facts/os9_facts/tests/cli", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2a1b00378f0b59a8b92d07b174826617ce1dde761f2f98cb0627737ca2895171", + "name": "tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_facts/os9_facts/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "name": "tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_facts/os9_facts/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47", + "name": "tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2267f2038f66e2be89d7a6c63ffdb80801f457c22193408b10bae86c3144670e", + "name": "tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_config", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_config/os9_config", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_config/os9_config/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_config/os9_config/tests/cli", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "71ff9b108a1e14b50a2cb5e71de55580c9a2a345cf53385391e95a359310508d", + "name": "tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3dcd20cbd7aa3ff27003ccf2feed4bdf6d5bb86f11438772fd73c89c9a1955f3", + "name": "tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "608a1218cafc5851413641f44f2109add4c0977d42b9e4a1b795bf89906a7155", + "name": "tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_config/os9_config/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032", + "name": "tests/integration/targets/os9_config/os9_config/defaults/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/integration/targets/os9_config/os9_config/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47", + "name": "tests/integration/targets/os9_config/os9_config/tasks/main.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2267f2038f66e2be89d7a6c63ffdb80801f457c22193408b10bae86c3144670e", + "name": "tests/integration/targets/os9_config/os9_config/tasks/cli.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600", + "name": "tests/.gitignore", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules/network", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules/network/os9", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d44b86de2cf6f1bc939fc743cf76dbec128e7b78ba872c2c8b6a9399c0acf3b5", + "name": "tests/unit/modules/network/os9/test_os9_config.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "tests/unit/modules/network/os9/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3987235f717a4104ac08ba54efdaf0917d4f70566176728717ca382e3de74856", + "name": "tests/unit/modules/network/os9/os9_module.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f776673004a00a68326154ede112ae99897541044cd83dc0162783c81019f050", + "name": "tests/unit/modules/network/os9/test_os9_command.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "tests/unit/modules/network/os9/fixtures", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e6feab0f1d65c9a751171208547a21763d80b4f3589893bf3c9a175d7b31e483", + "name": "tests/unit/modules/network/os9/fixtures/show_ipv6_interface", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "125417e5d4d2deca2272f11e4c5124579741cff5e35fdb749b63696e87e87d0b", + "name": "tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5fb0e1ad78714da4423b9c9a7ebd2938b0050febc7b10d32246a9afe1981ade7", + "name": "tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5a252359d7d8aaf0c632dcdfdce095984764c593871f6acb005f94dbcfa16aff", + "name": "tests/unit/modules/network/os9/fixtures/show_inventory", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d09c902762898fca26cae465dfa5d091d8cb255892c07e75b0462d7046cb5544", + "name": "tests/unit/modules/network/os9/fixtures/show_interfaces", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "182cb0ed4d2624e815ba3aca306283100e279b7b01305a771ac3dc9962839514", + "name": "tests/unit/modules/network/os9/fixtures/os9_config_src.cfg", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "baf72eb01ee429ea7599874957dd1398b25da53212c61f667b214b3bf2615fc9", + "name": "tests/unit/modules/network/os9/fixtures/show_version", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d895db603a8e35ece016395993b860fef4d8c04d3f5e316083203858592d338a", + "name": "tests/unit/modules/network/os9/fixtures/show_file-systems", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8e44f65026c6a56109e9e3ba30e6b9bfb866dedd3b475beb37841479d5e010f7", + "name": "tests/unit/modules/network/os9/fixtures/show_running-config", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "885553e6317b9e9da78f7230ba38e410b1ec7fe548d852d3d19c044bf29bfaa5", + "name": "tests/unit/modules/network/os9/fixtures/os9_config_config.cfg", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "315f7a32efeddc96e388092d8af26a4e6bd220a29e8f78dcfaf8ffed471c7861", + "name": "tests/unit/modules/network/os9/fixtures/show_memory__except_Processor", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2401872bfd39a36e786461d82e8672887258ba81a4e477c44ebd223ddaa8ba2d", + "name": "tests/unit/modules/network/os9/test_os9_facts.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "21e22101c2afb46bb6a916f2bf3df69eb5023903506152bbf6f0669e831a422c", + "name": "meta/runtime.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": ".github", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": ".github/workflows", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a72707b6d3f3810741fa9784222d3c56f48472b77aba15222ae780c652262eac", + "name": ".github/workflows/ansible-test.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/module_utils", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/module_utils/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/module_utils/network", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/module_utils/network/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "78158444072c124ca00b4cfee2471cc5ac35d0ac23f55063665afad9c700831d", + "name": "plugins/module_utils/network/os9.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/action", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/action/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2252271fdb4a68288f44c2a8d31c3693bca0141960ba8c5a1c01ab7a12147ba1", + "name": "plugins/action/os9.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/terminal", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/terminal/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cd7498d518117883d4ace3f39e8045f0023fe9a2c62bcc8277f35d35a0a87ad7", + "name": "plugins/terminal/os9.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/cliconf", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/cliconf/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "452eb0a83fa4a5adf85ca49e0c7b9f51e298d3b2bcb3a25009fc670fa4b3ecd7", + "name": "plugins/cliconf/os9.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/doc_fragments", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/doc_fragments/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d3bc65aabc3b22af8323623fc19dbb48fd65252b0505fa20bf7ac7e9b8171f33", + "name": "plugins/doc_fragments/os9.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "plugins/modules", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "88e781b42ea96e44fa81d08347c70f57139dfbb46c741879280f2b904638b29c", + "name": "plugins/modules/os9_facts.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "49b93b6c6e7ddbd76a1b1c89b8775062ec9bea4e67209cef7238585794c6cbbf", + "name": "plugins/modules/os9_config.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "name": "plugins/modules/__init__.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d1297f0656ee49abe886cf9ab77871a79b1f7196d51bc3d0a53aee675a0c8652", + "name": "plugins/modules/os9_command.py", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227", + "name": "COPYING", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720", + "name": ".gitignore", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "changelogs", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "70a7fbafc6e1175acdf70896d266f6e7c3cdaf7b6d786a6ccfd6bc6d84e46bae", + "name": "changelogs/config.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e05e09bd169818f68582946673a13a370158c2539a52a6a4b16e5ddf68e32668", + "name": "changelogs/changelog.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "349741eaeee3c184691fa268cae80843cdb9e7a8a4400ef1869d05d6f795bb87", + "name": "changelogs/CHANGELOG.rst", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "docs", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e81d64f1de9c3270f1f56d5ce253a45dd18e3fd85917cd72607ca597127d1f71", + "name": "docs/os9_dns.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "19771f0031148029ab0b196b35580c440fd07884c3abe6b8b86b27aefed11732", + "name": "docs/os9_aaa.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b4a5d8c724dfaa01e7b4a2a17ca38d3dd5f1bca9ce695666fcd829d0330f296b", + "name": "docs/os9_ntp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9aa3a01ae4c5a30649015623999722c1ee2d7dc555177cc80b676608afe6c4d0", + "name": "docs/roles.rst", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2ff6ab9e62f00e60c6c321476e3de23db595faf1f57b4b4d03df4ca27702b2b9", + "name": "docs/os9_xstp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7c1e34c1ae9189e3c0e9092a75f867ea5b4ae0c19e273f57d30a84d59418d1bb", + "name": "docs/os9_vlan.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d4ef7e561975060c0f8480f7ec0e7ca042ae250930eb4aaa2fe89ff1d5c935a6", + "name": "docs/os9_acl.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d1d39e1bd43a44fd8ea455be74fdfcf42732e5775916716399599402075523bf", + "name": "docs/os9_vrrp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "087ff2922aec2a9911d67440527e81e0ad1ea1fc079776f95e84badba9039843", + "name": "docs/os9_vrf.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "dd43845c1a91be5e8323914ab1aee26275dc2362fbf4520f8c9d31952e628be3", + "name": "docs/os9_interface.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b19735d8dc20ee8d5b79ec6f25244d5bf513ae93c0ec72cb130018a991e9d409", + "name": "docs/os9_prefix_list.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "858d5c07058ee5d90bdf5022439e788a9bc1e2daad0bfb7ce522edff282598a6", + "name": "docs/os9_lag.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b72d204f8dedd27d2988ecf0d53589a6547205774be69f6b298561f743b9b252", + "name": "docs/os9_ecmp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "19c514de5c3e29a0f885abb6da52fff413eb9078b9ef1fe87253705e2f919ad8", + "name": "docs/os9_lldp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a770aa97b66a8c49a948bf4ebd6dbec672b77b6c4c9e80d05973580b1ff13b12", + "name": "docs/os9_system.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3aac3568b9d528d747b0b59acec0b71f473c8d7c5254452b4eb44c5f8131dc8d", + "name": "docs/os9_dcb.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "440f5dfc57184ae41e73988b49e0391e678c1eabb534eb1a527fcbadc6ee1b76", + "name": "docs/os9_sflow.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "33fd4184f2648fcc0e69c524b3797a5013afabd703eb4a3ec40ee3abc1436939", + "name": "docs/os9_bgp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d000c615ec8d3366f408fc731d001055d85dd22912f064440e1331e0a86a94fb", + "name": "docs/os9_copy_config.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7b5fa6361cabee40c802df10c63dea98b38ff1221c0a9846f5823908d9689190", + "name": "docs/os9_snmp.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "88842853681202a3e8b4cc0712310dad87eba8c8f5b24f76fff101e93e7cd714", + "name": "docs/os9_vlt.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c1bf779e16b26773779b3cff23111b14c76913556729e019af7f1091d8fbb235", + "name": "docs/os9_users.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "fc49c2852e7bf0c11fa863341dd38dae4de133630a8de2af96c4348f52284c34", + "name": "docs/os9_logging.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "23e9d4c2fd8d62be40f9ddbfdac92465f4682886c2b1dd073f37097197f705d9", + "name": "README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks/clos_fabric_ebgp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks/clos_fabric_ebgp/group_vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8dfc56fa05d702dbc40efa71a43a4148cb1fabde2d61c27fdc5b652f96b47e7c", + "name": "playbooks/clos_fabric_ebgp/group_vars/all", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bc5ab983e078c9a5a8cd262b0379d890817589e29e324cf4ded2e2d87e157da6", + "name": "playbooks/clos_fabric_ebgp/group_vars/spine.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "playbooks/clos_fabric_ebgp/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "855a81733f763088f5326ac9cb210284edfab2e21d0a0886764c007c6878194d", + "name": "playbooks/clos_fabric_ebgp/datacenter.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ba9189063a69b9774a2aded0a7d4c2d7c36e9270bc5354bd21e62d146df6d881", + "name": "playbooks/clos_fabric_ebgp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "playbooks/clos_fabric_ebgp/host_vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0686aa905934de49d4403b0d8fbb27fb68dfac45043db2a509ce827e58c321db", + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6ad88e8d68755ee79d86f572586bb3b2bea037e8699d52acd699fe1584abc488", + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d1f8d83e73880a72c0206133da575b0fd416c12919d2a85b628534de88ce5009", + "name": "playbooks/clos_fabric_ebgp/host_vars/spine2.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "776939597c809d002ee820c5ed7f776df294a9bfe6be72b37e5aa4ee53512360", + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9c189687de2087d635359151f3c3d4f07bea2caed813e889ab55b462c9067326", + "name": "playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "18e827e2380304c8065de68d8db4b7808eb351b86601611610ca3340e6a42844", + "name": "playbooks/clos_fabric_ebgp/host_vars/spine1.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "328e38eb6243f1b7180e79b06a29b292fe36b3bfd9e47c3393f84e37393a79ad", + "name": "roles/os9_vrf/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_vrf/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_vrf/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "82c2dacec1c7e99ae63ebf20fafe0c16105959699e02239c5d579c963cc695d2", + "name": "roles/os9_vrf/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9d09f75a83662192bce545b4d03c876f1db00e79f9867e7e2875f765fd648cc5", + "name": "roles/os9_vrf/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a9d07572f5aaa73ea15a375fdbb317a443d10a524b63ab28fe4edb67f8d273dd", + "name": "roles/os9_vrf/templates/os9_vrf.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4791fe8f67d0b722e1af57aea187cde857443730901432f6f15da82d285474fc", + "name": "roles/os9_vrf/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e223f46d113fa7925d4c5bd9218810c1f241fe944302f2effc0e8728e3ef4f80", + "name": "roles/os9_vrf/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "af5620af058efa0916111f46705ab43205edef3bef05542d6da325ae47f2c120", + "name": "roles/os9_vrf/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0e4a9b4572288830796b6833d2053b416d6243cdfd0969b7f4b54f2d9e8622c5", + "name": "roles/os9_vrf/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrf/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "83170116a0504aba73704308942df782d7c6e342c0828ef9387342f4b0b3d079", + "name": "roles/os9_vrf/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a9f076780aa43672910f9aede19c73abd0c7b8e017167ad2c483b0d4d1c58636", + "name": "roles/os9_sflow/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_sflow/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_sflow/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a5fe3e746e1bc1b090970dea0271d25b6fadb8054efcd6250c1a8b49a59f697a", + "name": "roles/os9_sflow/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "62be668d4bddb7e6708bcdd68b931ab71a3976867c66eca68965425326096669", + "name": "roles/os9_sflow/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "82704f4f253f0d64ec1e43a38b38802c549256b3acd65417e7dfed1e98c4ae0e", + "name": "roles/os9_sflow/templates/os9_sflow.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "47cceedfd12b18197b1eb1d691e0e2e46f3a33b8d4ddda5c180720dd781fc29b", + "name": "roles/os9_sflow/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f0d4a18cc0ead67b6d26be2af0b2c7bffa0fcb82bf57b357f2823af5b237a5ec", + "name": "roles/os9_sflow/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c63b06d76b54b76a2b8221e04a2514b8a691679e799813345ee17a5cf7453341", + "name": "roles/os9_sflow/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b42edf22664bd7d9f44f6309b13faa376c197d95ced795193dda8ef8710de8fa", + "name": "roles/os9_sflow/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_sflow/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9be297ba0a1e316c69f0738d824bbfdce133f2edec183a22b750c25824f63879", + "name": "roles/os9_sflow/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e8ce2a6e7602821f89d3e8394dc3bfc8828df82be2512c63cbb442717b271a1b", + "name": "roles/os9_lag/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_lag/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_lag/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c551da3ea192239ad7cc7ed48f7e05cece38c80c9e1388fc673c349efd57acd4", + "name": "roles/os9_lag/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "47bfa1c48ee4cbeda5082df0d5d2ddaf3c0d5e2c8b6f55bb842cf07f425de331", + "name": "roles/os9_lag/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ee73098cbce745d8c9ef24794e73123c6c334a3a7b4eb8a8640e842a970e38ba", + "name": "roles/os9_lag/templates/os9_lag.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "01259b7234aa137d0d3873560d93fff1a2863472575dfb3c5386f9c2b5b6d395", + "name": "roles/os9_lag/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4757240d44c3b63d143c289627b337375f6b012d3df063bdfe3f3b75193e99c0", + "name": "roles/os9_lag/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ac08af08a2a8a3674ec86d5372661cfcd2cb2b59d22a92940f9193c5eef37897", + "name": "roles/os9_lag/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "459917ce56ac381180085455a66a6404ccc38b88ea59d5437fadf7884d177cc6", + "name": "roles/os9_lag/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lag/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e5e65e1af8aef78cd7ae46197210ef94d1800b2a7a0df70aa88efe338d6554b0", + "name": "roles/os9_lag/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4399f312689744d712656ba35d0e6380b22ec594527097aed5b5c542fb9959df", + "name": "roles/os9_bgp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_bgp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_bgp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6d8c0035b353b80ca85fe611ba5b38216a74a00176619fadfa3a1ad9f31c647f", + "name": "roles/os9_bgp/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cc84666798d17dae32dc929df414e0c170005b751c6f73c55cf1082d223c2e1a", + "name": "roles/os9_bgp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "402b54dfddd140ce7a39f22e52de134f82d10ee941beadfa0f1dd8b759fdbff1", + "name": "roles/os9_bgp/templates/os9_bgp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ddff5a7d8ae16b2005878b50e0c58be34a1adab7ef9549aff528bbd8914d7ff9", + "name": "roles/os9_bgp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e3bf13b2ab4c0bbc6dbf1c8317167024b9b242749a1fa358c8ffe28e58d95a64", + "name": "roles/os9_bgp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "794a1ca7042ebadf2e8dbd7eb696fec5cbc982453536654022f8348d9dd63ad4", + "name": "roles/os9_bgp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4b8eecc34bbb888dd110c345c6c469afb5f4ffcc5f0dedb8539b905c3df6e1e0", + "name": "roles/os9_bgp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_bgp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1c4af2285f60dcab13f5fdb4b2012dc924ad19df54fd56c8a3c5dc5c681a2af4", + "name": "roles/os9_bgp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5a6bec4eb07c0b5b105cf47a4e1d6a300a54a3c3705a01512755c86e11e8a6d0", + "name": "roles/os9_lldp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_lldp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_lldp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bacbc3d73ed0149d3539ff2a367127f439e4a746d419b14d6827ae8aa71ed1bb", + "name": "roles/os9_lldp/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2cccaad903053c4c7fb5306f2efc55c380add6c53af6219a533fd10fd26f42be", + "name": "roles/os9_lldp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "581c3f712c0f682b8e794768040cf2b87623f871984f618ebbc8c168e409329d", + "name": "roles/os9_lldp/templates/os9_lldp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a3c75160f8505b7530dd5f0a75ffb020ee40bd42b907cfb53f9a77d9e471d08a", + "name": "roles/os9_lldp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1f37d26dc0302e65ab7e6c64ead0fbde3a9af90300b41c50833d8fdd4afcbc11", + "name": "roles/os9_lldp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c07e9a3a9fa8e7ff8da33649e1e9dd70fb2946b56e1361c58f9f9183a006fcc5", + "name": "roles/os9_lldp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "62abbb6384e7e70179a32796857731fe713246323b7b3b09cb26bb8bdf217f3f", + "name": "roles/os9_lldp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_lldp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0e73bad322e68692969dd78a67a40a04e46405aa6101b7340ff3975dac554a1a", + "name": "roles/os9_lldp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "41505d1144bcec0a736a30bb7a675387edefcd3f43786c11642facd88debc46a", + "name": "roles/os9_aaa/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_aaa/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_aaa/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "46e62e6fdc007a49b8cd1bbb496047742c2d3634756addf6dee3222ada757f72", + "name": "roles/os9_aaa/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "67a167f69545c2e18aefed05f7278dd991363af0a58b655e760212a72c5bf2ce", + "name": "roles/os9_aaa/tests/main.os6.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d88597d2c9fef832b4c003757838c420f71fcb54c060deacbd753af87d46a333", + "name": "roles/os9_aaa/templates/os9_aaa.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a31f22a0a47898e35b56b9c5aa4f6b9fd7e36829809aebace774b9ecdc31f39b", + "name": "roles/os9_aaa/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ee4fc0e75c9bd541ce4202e29bb73201b79ae69c899abc65a802dd7769fdc5b4", + "name": "roles/os9_aaa/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "526df525101ddda741aea64cffc40cdd740376739703639e71e45073311c6274", + "name": "roles/os9_aaa/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3f916612c2b5d2833a97330e40f2075e952c62a0c8e24b023fa2c2415d09785b", + "name": "roles/os9_aaa/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_aaa/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d4f4ed469715df92037e7560cf7fb49044c246a4824b5a378a7747b16d1d5fcc", + "name": "roles/os9_aaa/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8b3be39873d380cac2259251db29c1ee0a27896283bc8d73e1b8fc9c6fa845d7", + "name": "roles/os9_system/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_system/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_system/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6781ea8f4437f79de909fb4035678cda7a57661f6e2de823148fd6031ee5b354", + "name": "roles/os9_system/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "80de0d1be31759897d13f274a29d667cc91900c51c3ea8461b0a13d6b53ec7e2", + "name": "roles/os9_system/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8ba71352c4c8293f02164dad4c1d5e3b84667ba877d826bb7519522437e38b6f", + "name": "roles/os9_system/templates/os9_system.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5af0a107d425ac7e7a6eabfb6244e036828c3e1e7fab4c7ebfc0019a80351c6e", + "name": "roles/os9_system/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2ccff3e3162348757f00101ccb0890cf12bd823b19cdb9a24ecd9ee6aa1cfc4a", + "name": "roles/os9_system/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9d32045c65e6b80d9cf45d4f17537caef70d61ee59ccc5005262889e3a40fd59", + "name": "roles/os9_system/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5c0a32bf0ea6008ed276ee38ead2c7d5e5354f669244f9129024c28339214ae5", + "name": "roles/os9_system/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_system/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "36fdd262d9e3162ff3da7d48de1dbe3267e8a1b5ee8bd6ddd29199e00c885e49", + "name": "roles/os9_system/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "55e506ce13e2f09e9e92329a0c636f1828a4aa20bb55a3f8f03d3405a02d2527", + "name": "roles/os9_prefix_list/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_prefix_list/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_prefix_list/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a977ff9d2a922d16bec06e5f95a81ef6fcaf996db42697d8102ce705f1e9d2ef", + "name": "roles/os9_prefix_list/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5a0ba2151e2353f32777b08b3e87d7747ba486e3582909e4942fc09b24444ff3", + "name": "roles/os9_prefix_list/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a1151c7962365507462e93f65755b778aed85ad023f606d7bfa1324c2fdb1416", + "name": "roles/os9_prefix_list/templates/os9_prefix_list.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "462f14295d91855188b42ea296e20728f58000fd97ac57cb3e98d0d93fc8342f", + "name": "roles/os9_prefix_list/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9aecebe122a66572a519557baca9944627a8c5ae508fc79846d530cc536985c5", + "name": "roles/os9_prefix_list/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "271f9da4247e0b36a3566d5c344e3c5c64b528c60dfe0d6077ed008fc22ee1c7", + "name": "roles/os9_prefix_list/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "992f92fcc74006e970557f814cd0146bbe6d2cefe1403791e2ebb7bb6eb51ad5", + "name": "roles/os9_prefix_list/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_prefix_list/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "212f31f556f2327e0479895aaeecb66bb21eb804e96a3c0d01c82476fdbca0a7", + "name": "roles/os9_prefix_list/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f63e7892ea088a676c9cf647f8be96b6163ee798d7a5241a8cc9851f28007ddc", + "name": "roles/os9_snmp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_snmp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_snmp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e470344c03bee986ef193b921efaf0af420e7d1b4d0d921ef9961f4142ec189f", + "name": "roles/os9_snmp/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "92fab363287fae218e2c8c9df27430c9024c0053a5d604777459089d40a33545", + "name": "roles/os9_snmp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "964ea7367c2996d49f9630e066c501d55fd429f60fc69c6546a4d3dc8212b622", + "name": "roles/os9_snmp/templates/os9_snmp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bb161a86e7babdd18253b1038616a326f03636a06536b475addb506c118281a4", + "name": "roles/os9_snmp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "be39ed1562711ed3b2437dea16f758d8bf517ab8a791446a4635e75b22bfbe21", + "name": "roles/os9_snmp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3ffe2fe6323ea23a7b1b036865798e9f56616752411fb44c1c48a90e74f6ed62", + "name": "roles/os9_snmp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7389af4080c0b463f08c67954423343121aa7c8ce9b99dacd944d88a590dc63a", + "name": "roles/os9_snmp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_snmp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "35f6eb0c04481e0ccfc6a0eb73eba0a790d441d56c5a5d79b0c2b246c4e14e8b", + "name": "roles/os9_snmp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2b38e87edc4d3f68932d5bf099ed5fd8ea70d93d704306d3ed049d40e37f0ca9", + "name": "roles/os9_vlt/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_vlt/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_vlt/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "132284efd21362e11003eef8bd1d7c459cb7b6784b33c32032d0a47114c6317f", + "name": "roles/os9_vlt/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "344709014b6edcfe10171bffac2bcbba099ac82c8370bd26d8baf4147f4b8ee7", + "name": "roles/os9_vlt/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "26098b1dfe90103b9b5e94d4777a2597c22b857dfcb54bf482c57c2432524f3e", + "name": "roles/os9_vlt/templates/os9_vlt.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8ee5cb1918cf36de31fe6c7c7fa937fb7b96f9e09e8a7cc21ee785143e8d2db3", + "name": "roles/os9_vlt/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b69245df09bbc3acf580b4bde39e5beda7de8427640510d66f7d15ddcc35fbb5", + "name": "roles/os9_vlt/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "affc8b0508a1d8811daec7893c9b0cb674a30820b09f317da0b0141abb1156c2", + "name": "roles/os9_vlt/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0f9dbde4e5e8fd21bbe18d8166c97829e32ce4672256901aebea6a55966865e1", + "name": "roles/os9_vlt/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlt/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "07578fbd5595dce89ce08b8b53a777fbf5533be6e08d8e8db05a362e4b1b3b48", + "name": "roles/os9_vlt/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "01ee1a797920557b814cf3554b45e74495243698d99d028df1800b325ae50483", + "name": "roles/os9_copy_config/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_copy_config/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_copy_config/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b", + "name": "roles/os9_copy_config/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1c90e7372af3606aff65055fd7a2f3be9b5c4dc4b38c17596d36beca0e164066", + "name": "roles/os9_copy_config/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5b9620be68039a5077ed58d7e1e044114af95a5c10d58b37f21efd1479d6ed55", + "name": "roles/os9_copy_config/templates/os9_copy_config.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1dd056b18de3a4536562c829ed620d26b9656b967f2d3f721a3db296ed492739", + "name": "roles/os9_copy_config/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d9b355a31983ad5f1acdeed96b95782584d363020a4f143b3118fd22c6c99125", + "name": "roles/os9_copy_config/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "77723ba59cb770095cd7c17ae3b98e14236b5175a0b93a43b7b1aaa2a16971ce", + "name": "roles/os9_copy_config/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d4f41f5def27074e2fe886235868e8a3fcd6fdf629f7e0ae9d0b4671b4bf64a4", + "name": "roles/os9_copy_config/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_copy_config/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "84527ffd86c46f3b873c6eb41c4ab3ec683a0705c4b283f1fe93921c60b1c685", + "name": "roles/os9_copy_config/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "789d2d2eaf9f1dea06ec0d24120f98cdc8efd8df706d217b8eef9394c9af4df6", + "name": "roles/os9_ntp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_ntp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_ntp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e15445ca6c63e463b3d16958cafd1b5487250d972a96b81499a8a638b1f54515", + "name": "roles/os9_ntp/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "17efc759f3a2873ac54a378e6193a9bdbc27625fadc9f6648ac9cb8375c76379", + "name": "roles/os9_ntp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ba6f6614f85e9de9725547367940e861552b47d37617be4b97dd78545314cd81", + "name": "roles/os9_ntp/templates/os9_ntp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1147c842c5f03689beb4084aa81ab416f8a8623c361e32a2f0033e0876ab7af4", + "name": "roles/os9_ntp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cc74d3f42e5a4f026b4e7abdc6816a024e30704cf83436258091e82677f71a28", + "name": "roles/os9_ntp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5d4b91ee81601f3ad72554958ae19c16ff85f748e756415eaa713da36fae664f", + "name": "roles/os9_ntp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "53b60c8aec93aca48d04eb336f7d6356933245223674d3121a47a2146b21c93f", + "name": "roles/os9_ntp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ntp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b242fdd811c59913a23a4e78fcfe8e83f3b263843c7dee460158630f542fed1b", + "name": "roles/os9_ntp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a718ec989264c1e6e7baf0aee663e100f3af2fe558ec05ea5715329c3b5b5d9a", + "name": "roles/os9_logging/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_logging/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_logging/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "697e597bf75d342ba4f3e60eb499e15bf4d092ad701b6684f9c5babd16da4dfb", + "name": "roles/os9_logging/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5af7cd9b0d509543a80873d15bc44225648be398d2c8be6353e4dcd0bdcf7ba2", + "name": "roles/os9_logging/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "23247f4a92c1f63e5bd036b930112a0d24e5cd06318ab78ae84c2238b24a30ce", + "name": "roles/os9_logging/templates/os9_logging.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ba0c438ab0fb041c432db690eb4d391f67ea7760763be98a203fbc4a56bc5173", + "name": "roles/os9_logging/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6fa31d2a1e3412f56aa2390f96d1b8876fb433764ffbf2f0dd4930ddc1f67646", + "name": "roles/os9_logging/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "df022a69119f4daf35333b84e4977bc3917504ae1ad258ee88a581f2d1b8fa71", + "name": "roles/os9_logging/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a0db5e2a60ed9d1f8ad1f73d05ba11f6f4e8be95aea985c152a8f94ed3969bc3", + "name": "roles/os9_logging/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_logging/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "177872b6c9f2d6a1c9bdf150b3b4a77072c4af8ab76c8c304305fdd5d4fdb0c9", + "name": "roles/os9_logging/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0b4f9c65d055ecdedd62f295d6582d792ee3023ef75203e131379180ca595b5f", + "name": "roles/os9_ecmp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_ecmp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_ecmp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2887f736ae9837d946286a7d3f37f2e344af45cbb6642d8f67bc250475628873", + "name": "roles/os9_ecmp/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0131fa28520315b169b620453dcc86c2fe369ae1843e605ca3ef160aac191192", + "name": "roles/os9_ecmp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b50beb983ed6d26e3a55e5f1226dae4bbe32fd8993191e2c5be7444b27d56590", + "name": "roles/os9_ecmp/templates/os9_ecmp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3403ca7b7b2dddecd069d176ccdc743262e0d9d4d1b71cbf55d08e3b9bd412f5", + "name": "roles/os9_ecmp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3ab88739f9e5506e3f03af1983e5889d465d6a8ec6fd4822ae1dd65abd58f718", + "name": "roles/os9_ecmp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ed8f4d75b1440fd46a57314230bf6b4e7940715fd6f06550e2eb348897a70d58", + "name": "roles/os9_ecmp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "af442ee7c7c9e2a752861ff664e25a342b0f1e798b22b0a3361dc4d91a1f81c6", + "name": "roles/os9_ecmp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_ecmp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2e33cb744140789803c982297776a734828e9b1b0a369a3f64687c67ba1a2b4e", + "name": "roles/os9_ecmp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b6dee9f01529a3945a1ef44678bd3ce2063c7b57359e72fac7526a4941ca61af", + "name": "roles/os9_dcb/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_dcb/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_dcb/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1879ecba3e800b09be3ccb02e2f506a5255fa7d05e6f4147ab821aa13d4d309a", + "name": "roles/os9_dcb/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2fd0382a4e49d7b34e4cdc1646bbae302aa4c71edadd5001bf7ea8ab3a4d4863", + "name": "roles/os9_dcb/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9132818aba64211491e368f6ce466d20daf8f41188425bae13bd2afe8c14fc45", + "name": "roles/os9_dcb/templates/os9_dcb.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "93d73f08eb7f5cddf5e3fde83039d0af6a7e7b1dc20da90bfc5bb79d68599829", + "name": "roles/os9_dcb/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8c65780c8a45b662cd6da234e12087283032cace0000763f245997a40ddf4cb1", + "name": "roles/os9_dcb/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d3fbc607d9d5654176b138ad3a3b8860bdd02efaee80857cc2c340fc47e012a3", + "name": "roles/os9_dcb/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f2a1463ec0a20576475fccaea3587b7c0021d64f9db67cb57bfd6bff1d97472d", + "name": "roles/os9_dcb/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dcb/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8c219c522f0f0e34e311993513c54c403a7f14fd5e7a46f796bde3211bcad04e", + "name": "roles/os9_dcb/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e14262571f041338ffb6ed5287842619eefc504e9a365ab0c5a2706733b59d97", + "name": "roles/os9_vrrp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_vrrp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_vrrp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "06c7945a53e3756ed9904678f7948c988a134b2e31367fd2ee061675ded0c28f", + "name": "roles/os9_vrrp/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "43c0e775030d8eceabbe51458243c9a6d4b8ac31ce327e790adb3537ac1a6dcb", + "name": "roles/os9_vrrp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "da5f553bf203e8bf1261a6c39f54474e6383f7fb198ce4525e27bb245f56f4d0", + "name": "roles/os9_vrrp/templates/os9_vrrp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "de4793fd6e3d27ceddcbc42a52f03ab2bfbfaadf09ca9f64384f1d4466b7e739", + "name": "roles/os9_vrrp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "8ad50a040d16c979338d20bc027c505d74973999fce4309cfb5effadbb48f2b5", + "name": "roles/os9_vrrp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6af7dbc1efceae78d4e59fbcc1a3dda1a2042742e41f15f446c5c14541f14307", + "name": "roles/os9_vrrp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d8bf1cbccecd2adca7d94456a1c682629720860e4ac71e8c53cc41b2f3d7265b", + "name": "roles/os9_vrrp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vrrp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c6c091fd261666fcb155aa3f53d435c8bce4f7afcc176124b7b8e3414d5357ea", + "name": "roles/os9_vrrp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "564947fa6f512be5164882ea2e712e74644f07952416745a3bec43d003222d09", + "name": "roles/os9_users/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_users/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_users/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2ef47d1df27ce87233757f2ac53dd95216fab5c1fba3a192c4840f81de19c91a", + "name": "roles/os9_users/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d6abe4a2dfa4ee140d104db11dfffcc20292e3dd7f946711bda115f922d3ac94", + "name": "roles/os9_users/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5acd0a378bc82e6c64d5e7491df5d0f6267e3d43fceedad591eddcb36acb5dac", + "name": "roles/os9_users/templates/os9_users.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b0979140519185b665b020d642c2bb243524e1a9a22ad8dd3d73d653ae96f951", + "name": "roles/os9_users/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f342917d465c3eb791bb41253bf90355047d6362f20d198b110f8a419d9e49a3", + "name": "roles/os9_users/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "61b015f4b27447519bcb59a438f438657bdba3089f542b1e663421875f21e210", + "name": "roles/os9_users/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "867fecb1d2f46cbef060883395f455fe9945458e38b16f7da343749ba2a66414", + "name": "roles/os9_users/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_users/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "1eada97cb8000ff2dc0d71168b591225e75b5643ca1d21d6ee4e5ba092b7b424", + "name": "roles/os9_users/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "06cb55093c7919c9b833928237314e617b3454c2e1f883d1fbd1042c08ba3b8c", + "name": "roles/os9_interface/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_interface/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_interface/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e9cad0fc65504bd7c7833ed3e9851563cd8de34e546ca88a4b628e54c84b3687", + "name": "roles/os9_interface/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bd827c9440da10af0908feb9fc80e9aee4050a858cba6f5d91b9d5506d4a4b44", + "name": "roles/os9_interface/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9f7b85676c476ea75a38bf9bd637592856684c5b791d2121777ba6bb0ef35aa0", + "name": "roles/os9_interface/templates/os9_interface.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "15ad26d58ac0d83592e74a9c7ed2ca686f3e6941ff3b8b7bf674fa4a74b90ad8", + "name": "roles/os9_interface/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f6669df8a0566976a72f43f5250f73d97c797362855aa6471aad87f2a2669fd5", + "name": "roles/os9_interface/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c1974d2c55eef43d63c133aca494e7272abdf73233bf1d5da6933bdab6078386", + "name": "roles/os9_interface/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "c6e2d02450c180c4b6046f3c8c1dd6182596cfd6e7c5b2ec8bc55ffebe02410d", + "name": "roles/os9_interface/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_interface/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4b256fa702eac321dd120c22381ebaf74fef91ad8cd1c846566afbb8e82a3fcf", + "name": "roles/os9_interface/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7f06b39a0d22144098a7436d20589fe60993b26fd57c3b30fb8b995351a026ec", + "name": "roles/os9_xstp/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_xstp/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_xstp/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "20f351bb4c6447e74fd6f694724b9c9880d5820f257b9cf017aab1b9357a22b3", + "name": "roles/os9_xstp/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "60a16999f83102fc0a60b1dc62c3d8da3d82cd925231be9a84dd582a05367961", + "name": "roles/os9_xstp/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "518f9666f56eee0374a345c8d66e95f89a6b54b8d437aaa4647e01b7f86317c4", + "name": "roles/os9_xstp/templates/os9_xstp.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ce35411b14512701202f5951dcfbec195650af9fddc89690594f08ba3a0889fe", + "name": "roles/os9_xstp/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b9c4e648b0f71e05a139a0a60e31834db399a4271420c8d015195d27d788eb92", + "name": "roles/os9_xstp/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e4acb30fac87a104e4acd23307b9d56243561919b56f13263734e4935aa724ef", + "name": "roles/os9_xstp/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "601f927b7dd50f6e6d4172e0b5bd895b16285bb81e3eec25571aa218cea59958", + "name": "roles/os9_xstp/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_xstp/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "0457d3e39544589ca342c73424ef9af18f01ea64ea7ee3bddf7f70d0c06c3148", + "name": "roles/os9_xstp/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d563505ac5f9e86cd31c8858f8d1f3280d0120df01d4b7cdbc294e32040c8963", + "name": "roles/os9_dns/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_dns/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_dns/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "91fb7feb04a43a6e745068a54b5399ab757cefdaf59fa6fd1e58bf046ae72997", + "name": "roles/os9_dns/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "17decf3eec309eb00c6836a4beedc8072a61befd67eec9c5972bca2a99ebc941", + "name": "roles/os9_dns/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ec177955fd27a81e864b82af4914c04e29ad1f5459cfb1fff6fcb213afb45f17", + "name": "roles/os9_dns/templates/os9_dns.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "6590574613a0d22d15672f3156f7aefb865b2543f7e7e3b2273e30f5a416b77a", + "name": "roles/os9_dns/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ed942b5afe9a98c6c297aa61685e7a2e02f14591bf97e0b88baec7d4bedba10e", + "name": "roles/os9_dns/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2e032670738e7258a61d77e685beb840702a2eacfdbef6ec3af3aca5fc945386", + "name": "roles/os9_dns/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "7f760f65685d6c512fbceeacabae0a50e3628efe3120973750c1addd1c4ff1d3", + "name": "roles/os9_dns/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_dns/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f3ad5e14b372d852332ad5869bf7872dec83160a5fb227a84d63d8d9f06708b3", + "name": "roles/os9_dns/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "cbbbd29017700d1dd2c88752c27a8cd1c49ec80a58ce65552bd220cf6130ae75", + "name": "roles/os9_vlan/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_vlan/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_vlan/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "9f9ef9b08b8e36ed3ba0179873751eecefabae0ffd664f351d22227d00fa9e0c", + "name": "roles/os9_vlan/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4018361707fab5667b43805963b21dfa1f8752ceb9a3dbecd52f65066811dce1", + "name": "roles/os9_vlan/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e8291586b745585ed2bc488e9aae24d989b95b9cc1fcd6cf450059ce63b82cb6", + "name": "roles/os9_vlan/templates/os9_vlan.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "d82b31e71f2000b209fc6a91dff8ee36448138c7a38524c5c5424b285a3604d9", + "name": "roles/os9_vlan/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "a5a44d45dc61f9efabc3f6fae37c33cb55bcab7f62a460c75f4f3fff42598ca2", + "name": "roles/os9_vlan/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "24b34bd5b0f9c8738728e8a3b937c89e462ea902c5288ac764a5da0d3a96f457", + "name": "roles/os9_vlan/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "3abddd14991d068ad364bd2e46437fff7398f71d0d7cf92b9a10b3dca5cbd9ff", + "name": "roles/os9_vlan/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_vlan/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "bfb597d511246cac6886b4639f4fa5858df32b4a49235b07cb12e6bb965b8684", + "name": "roles/os9_vlan/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl/handlers", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "41505d1144bcec0a736a30bb7a675387edefcd3f43786c11642facd88debc46a", + "name": "roles/os9_acl/handlers/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3", + "name": "roles/os9_acl/LICENSE", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl/tests", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770", + "name": "roles/os9_acl/tests/inventory.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "b66014feff883af307b5b3284a1da62a647dfb3bf47dba57c109e85ee247e456", + "name": "roles/os9_acl/tests/main.os9.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "2c34b288af25aa54fde46bae5baa21fedeb5cf7f0644a7e7ebd35b2900b14452", + "name": "roles/os9_acl/tests/test.yaml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl/templates", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ebad1f9043572155dc792e70fc54da5a9de28edd93de8bb16aa6a419b403f168", + "name": "roles/os9_acl/templates/os9_acl.j2", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl/meta", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "e06f7c300f06675328ca857b16b2d9752a508680b384975fba89792ca1575eaa", + "name": "roles/os9_acl/meta/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl/vars", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "ef238bf54c409cb4adce143ce09d457e00c60f6c59b6d7e1aee588ed6151cc7f", + "name": "roles/os9_acl/vars/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "40edb7024f466d237d8d32f1f5085e48359a3e178c035bc0fd0430b58e84990b", + "name": "roles/os9_acl/README.md", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl/defaults", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "4206de44d73c0016a24aa118927f90e4de3099612b386424eed651b33a28ad50", + "name": "roles/os9_acl/defaults/main.yml", + "chksum_type": "sha256", + "format": 1 + }, + { + "ftype": "dir", + "chksum_sha256": null, + "name": "roles/os9_acl/tasks", + "chksum_type": null, + "format": 1 + }, + { + "ftype": "file", + "chksum_sha256": "82b059098882a03cf7486e95d81a275c6d1cb89050e1330264864068d515a256", + "name": "roles/os9_acl/tasks/main.yml", + "chksum_type": "sha256", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/LICENSE b/ansible_collections/dellemc/os9/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/MANIFEST.json b/ansible_collections/dellemc/os9/MANIFEST.json new file mode 100644 index 00000000..546d3f17 --- /dev/null +++ b/ansible_collections/dellemc/os9/MANIFEST.json @@ -0,0 +1,37 @@ +{ + "collection_info": { + "description": "Ansible Network Collections for Dell EMC OS9", + "repository": "https://github.com/ansible-collections/dellemc.os9", + "tags": [ + "dell", + "dellemc", + "os9", + "emc", + "networking" + ], + "dependencies": { + "ansible.netcommon": ">=1.0.0" + }, + "authors": [ + "Senthil Ganesan Ganesan ", + "Komal Patil " + ], + "issues": "https://github.com/ansible-collections/dellemc.os9/issues", + "name": "os9", + "license": [], + "documentation": "https://github.com/ansible-collections/dellemc.os9/tree/master/docs", + "namespace": "dellemc", + "version": "1.0.4", + "readme": "README.md", + "license_file": "LICENSE", + "homepage": "https://github.com/ansible-collections/dellemc.os9" + }, + "file_manifest_file": { + "format": 1, + "ftype": "file", + "chksum_sha256": "7666ea3d71e26d56fcb8d90a06ea28c4e634ea9a3618f96e7ab99cd276ea67fb", + "name": "FILES.json", + "chksum_type": "sha256" + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/README.md b/ansible_collections/dellemc/os9/README.md new file mode 100644 index 00000000..0ed96744 --- /dev/null +++ b/ansible_collections/dellemc/os9/README.md @@ -0,0 +1,96 @@ +# Ansible Network Collection for Dell EMC OS9 + +## Collection contents + +This collection includes the Ansible modules, plugins and roles needed to provision and manage Dell EMC PowerSwitch platforms running Dell EMC OS9. Sample playbooks and documentation are also included to show how the collection can be used. + +### Collection core modules + +- **os9_command.py** — Run commands on devices running OS9 + +- **os9_config.py** — Manage configuration sections on devices running OS9 + +- **os9_facts.py** — Collect facts from devices running OS9 + +### Collection roles + +These roles facilitate provisioning and administration of devices running Dell EMC OS9. There are over 22 roles available that provide a comprehensive coverage of most OS9 resources, including os9_aaa, os9_bgp and os9_ecmp. The documentation for each role is at [OS9 roles](https://github.com/ansible-collections/dellemc.os9/blob/master/docs/roles.rst). + +### Sample use case playbooks + +This collection includes the following sample playbooks that illustrate end to end use cases: + +- [CLOS Fabric](https://github.com/ansible-collections/dellemc.os9/blob/master/playbooks/clos_fabric_ebgp/README.md) — Example playbook to build a Layer 3 Leaf-Spine fabric + +## Installation + +Use this command to install the latest version of the OS9 collection from Ansible Galaxy: + +``` + ansible-galaxy collection install dellemc.os9 + +``` + +To install a specific version, a version range identifier must be specified. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0: + +``` + ansible-galaxy collection install 'dellemc.os9:>=1.0.0,<2.0.0' + +``` + +## Version compatibility + +* Ansible version 2.10 or higher +* Python 2.7 or higher and Python 3.5 or higher + +> **NOTE**: For Ansible versions lower than 2.10, use the legacy [dellos9 modules](https://ansible-dellos-docs.readthedocs.io/en/latest/modules.html#os9-modules) and [dellos roles](https://ansible-dellos-docs.readthedocs.io/en/latest/roles.html). + + +## Sample playbook + +**playbook.yaml** + +``` +- hosts: os9_switches + connection: network_cli + collections: + - dellemc.os9 + roles: + - os9_vlan +``` + +**host_vars/os9_sw1.yaml** + +``` +hostname: os9_sw1 +# Parameters for connection type network_cli +ansible_ssh_user: xxxx +ansible_ssh_pass: xxxx +ansible_network_os: dellemc.os9.os9 + +# Create vlan100 and delete vlan888 +os9_vlan: + vlan 100: + description: "Blue" + state: present + vlan 888: + state: absent + +``` + +**inventory.yaml** + +``` +[os9_sw1] +os9_sw1 ansible_host=100.104.28.119 + +[os9_sw2] +os9_sw2 ansible_host=100.104.28.118 + +[os9_switches:children] +os9_sw1 +os9_sw2 + +``` + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst new file mode 100644 index 00000000..e91c0ed1 --- /dev/null +++ b/ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst @@ -0,0 +1,76 @@ +====================================================================== +Ansible Network Collection for Dell EMC OS9 Release Notes +====================================================================== + +.. contents:: Topics + +v1.0.4 +====== + +Release Summary +--------------- + +- Fixed sanity error found during the sanity tst of automation hub upload +- Fix issue in using list of strings for commands argument for os10_command module (https://github.com/ansible-collections/dellemc.os9/issues/15) + +v1.0.3 +====== + +Release Summary +--------------- + +Added bug fixes for bugs found during System Test. + +v1.0.2 +====== + +Release Summary +--------------- + +Added changelogs. + +v1.0.1 +====== + +Release Summary +--------------- + +Updated documentation review comments. + +v1.0.0 +====== + +New Modules +----------- + +- os9_command - Run commands on devices running Dell EMC os9. +- os9_config - Manage configuration on devices running os9. +- os9_facts - Collect facts from devices running os9. + +New Roles +--------- + +- os9_aaa - Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server. +- os9_acl - Facilitates the configuration of Access Control lists. +- os9_bgp - Facilitates the configuration of border gateway protocol (BGP) attributes. +- os9_copy_config - This role pushes the backup running configuration into a os9 device. +- os9_dcb - Facilitates the configuration of data center bridging (DCB). +- os9_dns - Facilitates the configuration of domain name service (DNS). +- os9_ecmp - Facilitates the configuration of equal cost multi-path (ECMP) for IPv4. +- os9_interface - Facilitates the configuration of interface attributes. +- os9_lag - Facilitates the configuration of link aggregation group (LAG) attributes. +- os9_lldp - Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. +- os9_logging - Facilitates the configuration of global logging attributes and logging servers. +- os9_ntp - Facilitates the configuration of network time protocol (NTP) attributes. +- os9_prefix_list - Facilitates the configuration of IP prefix-list. +- os9_sflow - Facilitates the configuration of global and interface level sFlow attributes. +- os9_snmp - Facilitates the configuration of global SNMP attributes. +- os9_system - Facilitates the configuration of hostname and hashing algorithm. +- os9_users - Facilitates the configuration of global system user attributes. +- os9_vlan - Facilitates the configuration of virtual LAN (VLAN) attributes. +- os9_vlt - Facilitates the configuration of virtual link trunking (VLT). +- os9_vrf - Facilitates the configuration of virtual routing and forwarding (VRF). +- os9_vrrp - Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes. +- os9_xstp - Facilitates the configuration of xSTP attributes. + +\(c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved. diff --git a/ansible_collections/dellemc/os9/changelogs/changelog.yaml b/ansible_collections/dellemc/os9/changelogs/changelog.yaml new file mode 100644 index 00000000..82ea60c2 --- /dev/null +++ b/ansible_collections/dellemc/os9/changelogs/changelog.yaml @@ -0,0 +1,107 @@ +ancestor: null +releases: + 1.0.0: + modules: + - description: Run commands on devices running Dell EMC os9. + name: os9_command + namespace: '' + - description: Manage configuration on devices running os9. + name: os9_config + namespace: '' + - description: Collect facts from devices running os9. + name: os9_facts + namespace: '' + roles: + - description: Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server. + name: os9_aaa + namespace: '' + - description: Facilitates the configuration of Access Control lists. + name: os9_acl + namespace: '' + - description: Facilitates the configuration of border gateway protocol (BGP) attributes. + name: os9_bgp + namespace: '' + - description: This role pushes the backup running configuration into a OS9 device. + name: os9_copy_config + namespace: '' + - description: Facilitates the configuration of data center bridging (DCB). + name: os9_dcb + namespace: '' + - description: Facilitates the configuration of domain name service (DNS). + name: os9_dns + namespace: '' + - description: Facilitates the configuration of equal cost multi-path (ECMP) for IPv4. + name: os9_ecmp + namespace: '' + - description: Facilitates the configuration of interface attributes. + name: os9_interface + namespace: '' + - description: Facilitates the configuration of link aggregation group (LAG) attributes. + name: os9_lag + namespace: '' + - description: Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. + name: os9_lldp + namespace: '' + - description: Facilitates the configuration of global logging attributes and logging servers. + name: os9_logging + namespace: '' + - description: Facilitates the configuration of network time protocol (NTP) attributes. + name: os9_ntp + namespace: '' + - description: Facilitates the configuration of IP prefix-list. + name: os9_prefix_list + namespace: '' + - description: Facilitates the configuration of global and interface level sFlow attributes. + name: os9_sflow + namespace: '' + - description: Facilitates the configuration of global SNMP attributes. + name: os9_snmp + namespace: '' + - description: Facilitates the configuration of hostname and hashing algorithm. + name: os9_system + namespace: '' + - description: Facilitates the configuration of global system user attributes. + name: os9_users + namespace: '' + - description: Facilitates the configuration of virtual LAN (VLAN) attributes. + name: os9_vlan + namespace: '' + - description: Facilitates the configuration of virtual link trunking (VLT). + name: os9_vlt + namespace: '' + - description: Facilitates the configuration of virtual routing and forwarding (VRF). + name: os9_vrf + namespace: '' + - description: Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes. + name: os9_vrrp + namespace: '' + - description: Facilitates the configuration of xSTP attributes. + name: os9_xstp + namespace: '' + release_date: '2020-07-31' + 1.0.1: + changes: + release_summary: Updated documentation review comments + fragments: + - 1.0.1.yaml + release_date: '2020-08-04' + 1.0.2: + changes: + release_summary: Added changelogs. + fragments: + - 1.0.2.yaml + release_date: '2020-08-18' + 1.0.3: + changes: + release_summary: Added bug fixes for bugs found during System Test. + fragments: + - 1.0.3.yaml + release_date: '2020-10-09' + 1.0.4: + changes: + bugfixes: + - Fixed sanity error found during the sanity tst of automation hub upload + - Fix issue in using list of strings for commands argument for os10_command module (https://github.com/ansible-collections/dellemc.os9/issues/15) + fragments: + - 1.0.4.yaml + release_date: '2021-02-15' \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/changelogs/config.yaml b/ansible_collections/dellemc/os9/changelogs/config.yaml new file mode 100644 index 00000000..d536811c --- /dev/null +++ b/ansible_collections/dellemc/os9/changelogs/config.yaml @@ -0,0 +1,30 @@ +changelog_filename_template: CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +flatmap: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Ansible Network Collection for Dell EMC OS9 +trivial_section_name: trivial diff --git a/ansible_collections/dellemc/os9/docs/os9_aaa.md b/ansible_collections/dellemc/os9/docs/os9_aaa.md new file mode 100644 index 00000000..276bb766 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_aaa.md @@ -0,0 +1 @@ +../roles/os9_aaa/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_acl.md b/ansible_collections/dellemc/os9/docs/os9_acl.md new file mode 100644 index 00000000..e2a0fe66 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_acl.md @@ -0,0 +1 @@ +../roles/os9_acl/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_bgp.md b/ansible_collections/dellemc/os9/docs/os9_bgp.md new file mode 100644 index 00000000..dc6b754c --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_bgp.md @@ -0,0 +1 @@ +../roles/os9_bgp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_copy_config.md b/ansible_collections/dellemc/os9/docs/os9_copy_config.md new file mode 100644 index 00000000..d9342bc2 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_copy_config.md @@ -0,0 +1 @@ +../roles/os9_copy_config/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_dcb.md b/ansible_collections/dellemc/os9/docs/os9_dcb.md new file mode 100644 index 00000000..7199f1f6 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_dcb.md @@ -0,0 +1 @@ +../roles/os9_dcb/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_dns.md b/ansible_collections/dellemc/os9/docs/os9_dns.md new file mode 100644 index 00000000..01147082 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_dns.md @@ -0,0 +1 @@ +../roles/os9_dns/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_ecmp.md b/ansible_collections/dellemc/os9/docs/os9_ecmp.md new file mode 100644 index 00000000..b3281d80 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_ecmp.md @@ -0,0 +1 @@ +../roles/os9_ecmp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_interface.md b/ansible_collections/dellemc/os9/docs/os9_interface.md new file mode 100644 index 00000000..1fc7f06e --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_interface.md @@ -0,0 +1 @@ +../roles/os9_interface/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_lag.md b/ansible_collections/dellemc/os9/docs/os9_lag.md new file mode 100644 index 00000000..3621b7a1 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_lag.md @@ -0,0 +1 @@ +../roles/os9_lag/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_lldp.md b/ansible_collections/dellemc/os9/docs/os9_lldp.md new file mode 100644 index 00000000..619667ac --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_lldp.md @@ -0,0 +1 @@ +../roles/os9_lldp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_logging.md b/ansible_collections/dellemc/os9/docs/os9_logging.md new file mode 100644 index 00000000..eb996e01 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_logging.md @@ -0,0 +1 @@ +../roles/os9_logging/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_ntp.md b/ansible_collections/dellemc/os9/docs/os9_ntp.md new file mode 100644 index 00000000..6e6800f0 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_ntp.md @@ -0,0 +1 @@ +../roles/os9_ntp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_prefix_list.md b/ansible_collections/dellemc/os9/docs/os9_prefix_list.md new file mode 100644 index 00000000..53760cec --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_prefix_list.md @@ -0,0 +1 @@ +../roles/os9_prefix_list/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_sflow.md b/ansible_collections/dellemc/os9/docs/os9_sflow.md new file mode 100644 index 00000000..29343446 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_sflow.md @@ -0,0 +1 @@ +../roles/os9_sflow/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_snmp.md b/ansible_collections/dellemc/os9/docs/os9_snmp.md new file mode 100644 index 00000000..c698c4a7 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_snmp.md @@ -0,0 +1 @@ +../roles/os9_snmp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_system.md b/ansible_collections/dellemc/os9/docs/os9_system.md new file mode 100644 index 00000000..350df9ae --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_system.md @@ -0,0 +1 @@ +../roles/os9_system/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_users.md b/ansible_collections/dellemc/os9/docs/os9_users.md new file mode 100644 index 00000000..893584bb --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_users.md @@ -0,0 +1 @@ +../roles/os9_users/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_vlan.md b/ansible_collections/dellemc/os9/docs/os9_vlan.md new file mode 100644 index 00000000..62b37f32 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_vlan.md @@ -0,0 +1 @@ +../roles/os9_vlan/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_vlt.md b/ansible_collections/dellemc/os9/docs/os9_vlt.md new file mode 100644 index 00000000..e492ad7a --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_vlt.md @@ -0,0 +1 @@ +../roles/os9_vlt/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_vrf.md b/ansible_collections/dellemc/os9/docs/os9_vrf.md new file mode 100644 index 00000000..3c1c4488 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_vrf.md @@ -0,0 +1 @@ +../roles/os9_vrf/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_vrrp.md b/ansible_collections/dellemc/os9/docs/os9_vrrp.md new file mode 100644 index 00000000..3ba26515 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_vrrp.md @@ -0,0 +1 @@ +../roles/os9_vrrp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/os9_xstp.md b/ansible_collections/dellemc/os9/docs/os9_xstp.md new file mode 100644 index 00000000..85137fd4 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/os9_xstp.md @@ -0,0 +1 @@ +../roles/os9_xstp/README.md \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/docs/roles.rst b/ansible_collections/dellemc/os9/docs/roles.rst new file mode 100644 index 00000000..b87901d9 --- /dev/null +++ b/ansible_collections/dellemc/os9/docs/roles.rst @@ -0,0 +1,136 @@ +############################################################## +Ansible Network Collection Roles for Dell EMC OS9 +############################################################## + +The roles facilitate provisioning of devices running Dell EMC OS9. This document describes each of the roles. + +AAA role +-------- + +The `os9_aaa `_ role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of TACACS and RADIUS server and AAA. + + +ACL role +-------- + +The `os9_acl `_ role facilitates the configuration of an Access Control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to line terminals. + + +BGP role +-------- + +The `os9_bgp `_ role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path. + + +Copy configuration role +----------------------- + +The `os9_copy_config `_ role pushes the backup running configuration into a device. This role merges the configuration in the template file with the running configuration of the Dell EMC Networking OS9 device. + + +DCB role +-------- + +The `os9_dcb `_ role facilitates the configuration of data center bridging (DCB). It supports the configuration of the DCB map and the DCB buffer, and assigns them to interfaces. + + +DNS role +-------- + +The `os9_dns `_ role facilitates the configuration of domain name service (DNS). + + +ECMP role +--------- + +The `os9_ecmp `_ role facilitates the configuration of equal cost multi-path (ECMP). It supports the configuration of ECMP for IPv4. + + +Interface role +-------------- + +The `os9_interface `_ role facilitates the configuration of interface attributes. It supports the configuration of administrative state, description, MTU, IP address, IP helper, suppress_ra and port mode. + + +LAG role +-------- + +The `os9_lag `_ role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type (static/dynamic) and minimum required link. + + +LLDP role +--------- + +The `os9_lldp `_ role facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. This role supports the configuration of hello, mode, multiplier, advertise tlvs, management interface, fcoe, iscsi at global and interface levels. + + +Logging role +------------ + +The `os9_logging `_ role facilitates the configuration of global logging attributes, and supports the configuration of logging servers. + + +NTP role +-------- + +The `os9_ntp `_ role facilitates the configuration of network time protocol attributes. + + +Prefix-list role +---------------- + +The `os9_prefix_list `_ role facilitates the configuration of a prefix-list, supports the configuration of IP prefix-list, and assigns the prefix-list to line terminals. + + +sFlow role +---------- + +The `os9_sflow `_ role facilitates the configuration of global and interface level sFlow attributes. It supports the configuration of sFlow collectors at the global level, enable/disable, and specification of sFlow polling-interval, sample-rate, max-datagram size, and so on are supported at the interface and global level. + + +SNMP role +--------- + +The `os9_snmp `_ role facilitates the configuration of global snmp attributes. It supports the configuration of SNMP server attributes like users, group, community, location, and traps. + + +System role +----------- + +The `os9_system `_ role facilitates the configuration of global system attributes. This role specifically enables configuration of hostname and enable password for os9. It also supports the configuration of management route, hash alogrithm, clock, line terminal, banner, and reload type. + + +Users role +---------- + +The `os9_users `_ role facilitates the configuration of global system user attributes. This role supports the configuration of CLI users. + + +VLAN role +--------- + +The `os9_vlan `_ role facilitates configuring virtual LAN (VLAN) attributes. This role supports the creation and deletion of a VLAN and its member ports. + + +VLT role +-------- + +The `os9_vlt `_ role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. + + +VRF role +-------- + +The `os9_vrf `_ role facilitates the configuration of basic virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. + + +VRRP role +--------- + +The `os9_vrrp `_ role facilitates configuration of virtual router redundancy protocol (VRRP) attributes. This role supports the creation of VRRP groups for interfaces, and setting the VRRP group attributes. + + +xSTP role +--------- + +The `os9_xstp `_ role facilitates the configuration of xSTP attributes. This role supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP) protocol, multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). This role supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. diff --git a/ansible_collections/dellemc/os9/meta/runtime.yml b/ansible_collections/dellemc/os9/meta/runtime.yml new file mode 100644 index 00000000..ad156258 --- /dev/null +++ b/ansible_collections/dellemc/os9/meta/runtime.yml @@ -0,0 +1,8 @@ +plugin_routing: + action: + os9_config: + redirect: dellemc.os9.os9 + os9_command: + redirect: dellemc.os9.os9 + os9_facts: + redirect: dellemc.os9.os9 diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md new file mode 100644 index 00000000..410147db --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md @@ -0,0 +1,35 @@ +# Provision CLOS fabric using the Ansible collection for Dell EMC OS9 + +This example describes how to use Ansible to build a CLOS fabric with a Dell EMC PowerSwitch platform running Dell EMC OS9 device. The sample topology is a two-tier CLOS fabric with two spines and four leaves connected as mesh. eBGP is running between the two tiers. All switches in spine have the same AS number, and each leaf switch has a unique AS number. All AS numbers used are private. + +For application load-balancing purposes, the same prefix is advertised from multiple leaf switches and uses _BGP multipath relax_ feature. + +![CLOS FABRIC Topology](https://ansible-dellos-docs.readthedocs.io/en/latest/_images/topo.png) + +## Create a simple Ansible playbook + +**1**. Create an inventory file called `inventory.yaml`, then specify the device IP address. + +**2**. Create a group variable file called `group_vars/all`, then define credentials and SNMP variables. + +**3**. Create a group variable file called `group_vars/spine.yaml`, then define credentials, hostname, and BGP neighbors of spine group. + +**4**. Create a host variable file called `host_vars/spine1.yaml`, then define the host, credentials, and transport. + +**5**. Create a host variable file called `host_vars/spine2.yaml`, then define the host, credentials, and transport. + +**6**. Create a host variable file called `host_vars/leaf1.yaml`, then define the host, credentials, and transport. + +**7**. Create a host variable file called `host_vars/leaf2.yaml`, then define the host, credentials, and transport. + +**8**. Create a host variable file called `host_vars/leaf3.yaml`, then define the host, credentials, and transport. + +**9**. Create a host variable file called `host_vars/leaf4.yaml`, then define the host, credentials, and transport. + +**10**. Create a playbook called `datacenter.yaml`. + +**11**. Run the playbook. + + ansible-playbook -i inventory.yaml datacenter.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml new file mode 100644 index 00000000..f17ebd14 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml @@ -0,0 +1,11 @@ +--- +- hosts: datacenter + gather_facts: no + connection: network_cli + collections: + - dellemc.os9 + roles: + - os9_interface + - os9_bgp + - os9_snmp + - os9_system diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all new file mode 100644 index 00000000..c3e4398b --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all @@ -0,0 +1,10 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 +build_dir: ../tmp/tmp_os9 + +os9_snmp: + snmp_community: + - name: public + access_mode: ro + state: present diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml new file mode 100644 index 00000000..17568725 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml @@ -0,0 +1,64 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 + +os9_system: + hostname: "{{ spine_hostname }}" + +os9_bgp: + asn: 64901 + router_id: "{{ bgp_router_id }}" + best_path: + as_path: ignore + as_path_state: present + med: + - attribute: confed + state: present + neighbor: + - type: ipv4 + remote_asn: "{{ bgp_neigh1_remote_asn }}" + ip: "{{ bgp_neigh1_ip }}" + admin: up + state: present + - type: ipv4 + remote_asn: "{{ bgp_neigh2_remote_asn }}" + ip: "{{ bgp_neigh2_ip }}" + admin: up + state: present + - type: ipv4 + remote_asn: "{{ bgp_neigh3_remote_asn }}" + ip: "{{ bgp_neigh3_ip }}" + admin: up + state: present + - type: ipv4 + remote_asn: "{{ bgp_neigh4_remote_asn }}" + ip: "{{ bgp_neigh4_ip }}" + admin: up + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh5_remote_asn }}" + ip: "{{ bgp_neigh5_ip }}" + admin: up + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh6_remote_asn }}" + ip: "{{ bgp_neigh6_ip }}" + admin: up + address_family: + - type: ipv4 + activate: false + state: present + - type: ipv6 + activate: true + state: present + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh7_remote_asn }}" + ip: "{{ bgp_neigh7_ip }}" + admin: up + state: present + - type: ipv6 + remote_asn: "{{ bgp_neigh8_remote_asn }}" + ip: "{{ bgp_neigh8_ip }}" + admin: up + state: present diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml new file mode 100644 index 00000000..2244418e --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml @@ -0,0 +1,61 @@ +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 +leaf_hostname: "leaf-1" +os9_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: xor1 + state: present +os9_interface: + TenGigabitEthernet 0/0: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.1.2/24 + ipv6_and_mask: 2001:100:1:1::2/64 + state_ipv6: present + TenGigabitEthernet 0/1: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.1.2/24 + ipv6_and_mask: 2001:100:2:1::2/64 + state_ipv6: present +os9_bgp: + asn: 64801 + router_id: 100.0.2.1 + best_path: + as_path: ignore + as_path_state: present + med: + - attribute: confed + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.1.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.1.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:1::1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:1::1 + admin: up + state: present + state: present diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml new file mode 100644 index 00000000..2e5cc580 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml @@ -0,0 +1,65 @@ +hostname: leaf2 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 +leaf_hostname: "leaf-2" +os9_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: xor1 + state: present +os9_interface: + TenGigabitEthernet 0/0: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.17.2/24 + ipv6_and_mask: 2001:100:1:11::2/64 + state_ipv6: present + TenGigabitEthernet 0/1: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.17.2/24 + ipv6_and_mask: 2001:100:2:11::2/64 +os9_bgp: + asn: 64802 + router_id: 100.0.2.2 + best_path: + as_path: ignore + as_path_state: present + med: + - attribute: confed + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.18.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.1.17.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.17.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:11::1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:11::1 + admin: up + state: present diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml new file mode 100644 index 00000000..f14f44e0 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml @@ -0,0 +1,65 @@ +hostname: leaf3 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 +leaf_hostname: "leaf-3" +os9_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: xor1 + state: present +os9_interface: + TenGigabitEthernet 0/0: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.33.2/24 + ipv6_and_mask: 2001:100:1:21::2/64 + state_ipv6: present + TenGigabitEthernet 0/1: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.33.2/24 + ipv6_and_mask: 2001:100:2:21::2/64 +os9_bgp: + asn: 64803 + router_id: 100.0.2.3 + best_path: + as_path: ignore + as_path_state: present + med: + - attribute: confed + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.33.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.33.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:21::1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:22::1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:21::1 + admin: up + state: present diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml new file mode 100644 index 00000000..9fc8ca87 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml @@ -0,0 +1,61 @@ +hostname: leaf4 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 +leaf_hostname: "leaf-4" +os9_system: + hostname: "{{ leaf_hostname }}" + hash_algo: + algo: + - name: ecmp + mode: xor1 + state: present +os9_interface: + TenGigabitEthernet 0/0: + desc: "Connected to Spine 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.49.2/24 + ipv6_and_mask: 2001:100:1:31::2/64 + state_ipv6: present + TenGigabitEthernet 0/1: + desc: "Connected to Spine 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.49.2/24 + ipv6_and_mask: 2001:100:2:31::2/64 + state_ipv6: present +os9_bgp: + asn: 64804 + router_id: 100.0.2.4 + best_path: + as_path: ignore + as_path_state: present + med: + - attribute: confed + state: present + neighbor: + - type: ipv4 + remote_asn: 64901 + ip: 100.1.49.1 + admin: up + state: present + - type: ipv4 + remote_asn: 64901 + ip: 100.2.49.1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:1:31::1 + admin: up + state: present + - type: ipv6 + remote_asn: 64901 + ip: 2001:100:2:31::1 + admin: up + state: present diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml new file mode 100644 index 00000000..9967d338 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml @@ -0,0 +1,61 @@ +hostname: spine1 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 +spine_hostname: "spine-1" + +os9_interface: + TenGigabitEthernet 0/2: + desc: "Connected to leaf 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.1.1/24 + ipv6_and_mask: 2001:100:1:1::1/64 + state_ipv6: present + TenGigabitEthernet 0/3: + desc: "Connected to leaf 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.33.1/24 + ipv6_and_mask: 2001:100:1:21::1/64 + state_ipv6: present + TenGigabitEthernet 0/4: + desc: "Connected to leaf 3" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.17.1/24 + ipv6_and_mask: 2001:100:1:11::1/64 + state_ipv6: present + TenGigabitEthernet 0/5: + desc: "Connected to leaf 4" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.1.49.1/24 + ipv6_and_mask: 2001:100:1:31::1/64 + state_ipv6: present + +bgp_router_id: "100.0.1.1" +bgp_neigh1_remote_asn: 64801 +bgp_neigh1_ip: "100.1.1.2" +bgp_neigh2_remote_asn: 64803 +bgp_neigh2_ip: "100.1.33.2" +bgp_neigh3_remote_asn: 64802 +bgp_neigh3_ip: "100.1.17.2" +bgp_neigh4_remote_asn: 64804 +bgp_neigh4_ip: "100.1.49.2" +bgp_neigh5_remote_asn: 64801 +bgp_neigh5_ip: "2001:100:1:1::2" +bgp_neigh6_remote_asn: 64802 +bgp_neigh6_ip: "2001:100:1:11::2" +bgp_neigh7_remote_asn: 64803 +bgp_neigh7_ip: "2001:100:1:21::2" +bgp_neigh8_remote_asn: 64804 +bgp_neigh8_ip: "2001:100:1:31::2" diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml new file mode 100644 index 00000000..218d6478 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml @@ -0,0 +1,60 @@ +hostname: spine2 +ansible_ssh_user: xxxxx +ansible_ssh_pass: xxxxx +ansible_network_os: dellemc.os9.os9 +spine_hostname: "spine-2" +os9_interface: + TenGigabitEthernet 0/6: + desc: "Connected to leaf 1" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.1.1/24 + ipv6_and_mask: 2001:100:2:1::1/64 + state_ipv6: present + TenGigabitEthernet 0/7: + desc: "Connected to leaf 2" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.17.1/24 + ipv6_and_mask: 2001:100:2:11::1/64 + state_ipv6: present + TenGigabitEthernet 0/8: + desc: "Connected to leaf 3" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.33.1/24 + ipv6_and_mask: 2001:100:2:21::1/64 + state_ipv6: present + TenGigabitEthernet 0/9: + desc: "Connected to leaf 4" + mtu: 9216 + portmode: + admin: up + switchport: False + ip_and_mask: 100.2.49.1/24 + ipv6_and_mask: 2001:100:2:31::1/64 + state_ipv6: present + +bgp_router_id: "100.0.1.2" +bgp_neigh1_remote_asn: 64801 +bgp_neigh1_ip: "100.2.1.2" +bgp_neigh2_remote_asn: 64802 +bgp_neigh2_ip: "100.2.33.2" +bgp_neigh3_remote_asn: 64803 +bgp_neigh3_ip: "100.2.17.2" +bgp_neigh4_remote_asn: 64804 +bgp_neigh4_ip: "100.2.49.2" +bgp_neigh5_remote_asn: 64801 +bgp_neigh5_ip: "2001:100:2:1::2" +bgp_neigh6_remote_asn: 64802 +bgp_neigh6_ip: "2001:100:2:11::2" +bgp_neigh7_remote_asn: 64803 +bgp_neigh7_ip: "2001:100:2:21::2" +bgp_neigh8_remote_asn: 64804 +bgp_neigh8_ip: "2001:100:2:31::2" diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/plugins/action/__init__.py b/ansible_collections/dellemc/os9/plugins/action/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/plugins/action/os9.py b/ansible_collections/dellemc/os9/plugins/action/os9.py new file mode 100644 index 00000000..0cbfa910 --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/action/os9.py @@ -0,0 +1,95 @@ +# +# (c) 2020 Red Hat Inc. +# +# Copyright (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import copy + +from ansible import constants as C +from ansible.module_utils._text import to_text +from ansible.module_utils.connection import Connection +from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_provider_spec +from ansible.utils.display import Display + +display = Display() + + +class ActionModule(ActionNetworkModule): + + def run(self, tmp=None, task_vars=None): + del tmp # tmp no longer has any effect + + module_name = self._task.action.split('.')[-1] + self._config_module = True if module_name == 'os9_config' else False + socket_path = None + persistent_connection = self._play_context.connection.split('.')[-1] + + if persistent_connection == 'network_cli': + provider = self._task.args.get('provider', {}) + if any(provider.values()): + display.warning('provider is unnecessary when using network_cli and will be ignored') + del self._task.args['provider'] + elif self._play_context.connection == 'local': + provider = load_provider(os9_provider_spec, self._task.args) + pc = copy.deepcopy(self._play_context) + pc.connection = 'network_cli' + pc.network_os = 'dellemc.os9.os9' + pc.remote_addr = provider['host'] or self._play_context.remote_addr + pc.port = int(provider['port'] or self._play_context.port or 22) + pc.remote_user = provider['username'] or self._play_context.connection_user + pc.password = provider['password'] or self._play_context.password + pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file + command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) + pc.become = provider['authorize'] or False + if pc.become: + pc.become_method = 'enable' + pc.become_pass = provider['auth_pass'] + + display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) + connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) + connection.set_options(direct={'persistent_command_timeout': command_timeout}) + + socket_path = connection.run() + display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) + if not socket_path: + return {'failed': True, + 'msg': 'unable to open shell. Please see: ' + + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} + + task_vars['ansible_socket'] = socket_path + + # make sure we are in the right cli context which should be + # enable mode and not config module + if socket_path is None: + socket_path = self._connection.socket_path + + conn = Connection(socket_path) + out = conn.get_prompt() + while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'): + display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) + conn.send_command('exit') + out = conn.get_prompt() + + result = super(ActionModule, self).run(task_vars=task_vars) + return result diff --git a/ansible_collections/dellemc/os9/plugins/cliconf/__init__.py b/ansible_collections/dellemc/os9/plugins/cliconf/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/plugins/cliconf/os9.py b/ansible_collections/dellemc/os9/plugins/cliconf/os9.py new file mode 100644 index 00000000..95334bfd --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/cliconf/os9.py @@ -0,0 +1,88 @@ +# +# (c) 2020 Red Hat Inc. +# +# (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +cliconf: os9 +short_description: Use os9 cliconf to run command on Dell OS9 platform +description: + - This os9 plugin provides low level abstraction apis for + sending and receiving CLI commands from Dell OS9 network devices. +""" + +import re +import json + +from itertools import chain + +from ansible.module_utils._text import to_bytes, to_text +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list +from ansible.plugins.cliconf import CliconfBase, enable_mode + + +class Cliconf(CliconfBase): + + def get_device_info(self): + device_info = {} + + device_info['network_os'] = 'dellemc.os9.os9' + reply = self.get('show version') + data = to_text(reply, errors='surrogate_or_strict').strip() + + match = re.search(r'Software Version (\S+)', data) + if match: + device_info['network_os_version'] = match.group(1) + + match = re.search(r'System Type (\S+)', data, re.M) + if match: + device_info['network_os_model'] = match.group(1) + + reply = self.get('show running-config | grep hostname') + data = to_text(reply, errors='surrogate_or_strict').strip() + match = re.search(r'^hostname (.+)', data, re.M) + if match: + device_info['network_os_hostname'] = match.group(1) + + return device_info + + @enable_mode + def get_config(self, source='running', format='text', flags=None): + if source not in ('running', 'startup'): + return self.invalid_params("fetching configuration from %s is not supported" % source) +# if source == 'running': +# cmd = 'show running-config all' + else: + cmd = 'show startup-config' + return self.send_command(cmd) + + @enable_mode + def edit_config(self, command): + for cmd in chain(['configure terminal'], to_list(command), ['end']): + self.send_command(cmd) + + def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False): + return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) + + def get_capabilities(self): + result = super(Cliconf, self).get_capabilities() + return json.dumps(result) diff --git a/ansible_collections/dellemc/os9/plugins/doc_fragments/__init__.py b/ansible_collections/dellemc/os9/plugins/doc_fragments/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py b/ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py new file mode 100644 index 00000000..35ec6725 --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Peter Sprygada +# Copyright: (c) 2020, Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = r''' +options: + provider: + description: + - A dict object containing connection details. + type: dict + suboptions: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + type: str + port: + description: + - Specifies the port to use when building the connection to the remote + device. + type: int + username: + description: + - User to authenticate the SSH session to the remote device. If the + value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_USERNAME) will be used instead. + type: str + password: + description: + - Password to authenticate the SSH session to the remote device. If the + value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_PASSWORD) will be used instead. + type: str + ssh_keyfile: + description: + - Path to an ssh key used to authenticate the SSH session to the remote + device. If the value is not specified in the task, the value of + environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + type: path + timeout: + description: + - Specifies idle timeout (in seconds) for the connection. Useful if the + console freezes before continuing. For example when saving + configurations. + type: int + authorize: + description: + - Instructs the module to enter privileged mode on the remote device before + sending any commands. If not specified, the device will attempt to execute + all commands in non-privileged mode. If the value is not specified in the + task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be + used instead. + type: bool + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode on the + remote device. If I(authorize) is false, then this argument does nothing. + If the value is not specified in the task, the value of environment variable + C(ANSIBLE_NET_AUTH_PASS) will be used instead. + type: str +notes: + - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking). +''' diff --git a/ansible_collections/dellemc/os9/plugins/module_utils/__init__.py b/ansible_collections/dellemc/os9/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/plugins/module_utils/network/__init__.py b/ansible_collections/dellemc/os9/plugins/module_utils/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py b/ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py new file mode 100644 index 00000000..14c77773 --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py @@ -0,0 +1,146 @@ +# +# (c) 2020 Peter Sprygada, +# (c) 2020 Red Hat, Inc +# +# Copyright (c) 2020 Dell Inc. +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import re + +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import env_fallback +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList +from ansible.module_utils.connection import exec_command +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine + +_DEVICE_CONFIGS = {} + +WARNING_PROMPTS_RE = [ + r"[\r\n]?\[confirm yes/no\]:\s?$", + r"[\r\n]?\[y/n\]:\s?$", + r"[\r\n]?\[yes/no\]:\s?$" +] + +os9_provider_spec = { + 'host': dict(), + 'port': dict(type='int'), + 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), + 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), + 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), + 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'), + 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True), + 'timeout': dict(type='int'), +} +os9_argument_spec = { + 'provider': dict(type='dict', options=os9_provider_spec), +} + + +def check_args(module, warnings): + pass + + +def get_config(module, flags=None): + flags = [] if flags is None else flags + + cmd = 'show running-config ' + cmd += ' '.join(flags) + cmd = cmd.strip() + + try: + return _DEVICE_CONFIGS[cmd] + except KeyError: + rc, out, err = exec_command(module, cmd) + if rc != 0: + module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict')) + cfg = to_text(out, errors='surrogate_or_strict').strip() + _DEVICE_CONFIGS[cmd] = cfg + return cfg + + +def to_commands(module, commands): + spec = { + 'command': dict(key=True), + 'prompt': dict(), + 'answer': dict() + } + transform = ComplexList(spec, module) + return transform(commands) + + +def run_commands(module, commands, check_rc=True): + responses = list() + commands = to_commands(module, to_list(commands)) + for cmd in commands: + cmd = module.jsonify(cmd) + rc, out, err = exec_command(module, cmd) + if check_rc and rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc) + responses.append(to_text(out, errors='surrogate_or_strict')) + return responses + + +def load_config(module, commands): + rc, out, err = exec_command(module, 'configure terminal') + if rc != 0: + module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict')) + + for command in to_list(commands): + if command == 'end': + continue + rc, out, err = exec_command(module, command) + if rc != 0: + module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc) + + exec_command(module, 'end') + + +def get_sublevel_config(running_config, module): + contents = list() + current_config_contents = list() + running_config = NetworkConfig(contents=running_config, indent=1) + obj = running_config.get_object(module.params['parents']) + if obj: + contents = obj.children + contents[:0] = module.params['parents'] + + indent = 0 + for c in contents: + if isinstance(c, str): + current_config_contents.append(c.rjust(len(c) + indent, ' ')) + if isinstance(c, ConfigLine): + current_config_contents.append(c.raw) + indent = 1 + sublevel_config = '\n'.join(current_config_contents) + + return sublevel_config diff --git a/ansible_collections/dellemc/os9/plugins/modules/__init__.py b/ansible_collections/dellemc/os9/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/plugins/modules/os9_command.py b/ansible_collections/dellemc/os9/plugins/modules/os9_command.py new file mode 100644 index 00000000..20e3cc58 --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/modules/os9_command.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Peter Sprygada +# Copyright: (c) 2020, Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: os9_command +author: "Dhivya P (@dhivyap)" +short_description: Run commands on remote devices running Dell OS9 +description: + - Sends arbitrary commands to a Dell OS9 node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(dellemc_os9_os9_config) to configure Dell OS9 devices. +extends_documentation_fragment: dellemc.os9.os9 +options: + commands: + description: + - List of commands to send to the remote os9 device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + type: list + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of I(retries), the task fails. + See examples. + type: list + elements: str + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + type: str + default: all + choices: [ all, any ] + retries: + description: + - Specifies the number of retries a command should be tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + type: int + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + type: int + default: 1 +notes: + - This module requires Dell OS9 version 9.10.0.1P13 or above. + - This module requires to increase the ssh connection rate limit. + Use the following command I(ip ssh connection-rate-limit 60) + to configure the same. This can be done via M(os9_config) module + as well. +""" + +EXAMPLES = """ +tasks: + - name: run show version on remote devices + os9_command: + commands: show version + - name: run show version and check to see if output contains OS9 + os9_command: + commands: show version + wait_for: result[0] contains OS9 + - name: run multiple commands on remote nodes + os9_command: + commands: + - show version + - show interfaces + - name: run multiple commands and evaluate the output + os9_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains OS9 + - result[1] contains Loopback +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always apart from low level errors (such as action plugin) + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always apart from low level errors (such as action plugin) + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import run_commands +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_argument_spec, check_args +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional +from ansible.module_utils.six import string_types + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + + +def parse_commands(module, warnings): + command = ComplexList(dict( + command=dict(key=True), + prompt=dict(), + answer=dict() + ), module) + commands = command(module.params['commands']) + for index, item in enumerate(commands): + if module.check_mode and not item['command'].startswith('show'): + warnings.append( + 'only show commands are supported when using check mode, not ' + 'executing `%s`' % item['command'] + ) + elif item['command'].startswith('conf'): + module.fail_json( + msg='os9_command does not support running config mode ' + 'commands. Please use os9_config instead' + ) + return commands + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', elements='str'), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + argument_spec.update(os9_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = {'changed': False} + + warnings = list() + check_args(module, warnings) + commands = parse_commands(module, warnings) + result['warnings'] = warnings + + wait_for = module.params['wait_for'] or list() + conditionals = [Conditional(c) for c in wait_for] + + retries = module.params['retries'] + interval = module.params['interval'] + match = module.params['match'] + + while retries > 0: + responses = run_commands(module, commands) + + for item in list(conditionals): + if item(responses): + if match == 'any': + conditionals = list() + break + conditionals.remove(item) + + if not conditionals: + break + + time.sleep(interval) + retries -= 1 + + if conditionals: + failed_conditions = [item.raw for item in conditionals] + msg = 'One or more conditional statements have not been satisfied' + module.fail_json(msg=msg, failed_conditions=failed_conditions) + + result.update({ + 'changed': False, + 'stdout': responses, + 'stdout_lines': list(to_lines(responses)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os9/plugins/modules/os9_config.py b/ansible_collections/dellemc/os9/plugins/modules/os9_config.py new file mode 100644 index 00000000..a6d20ed0 --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/modules/os9_config.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# +# (c) 2020 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: os9_config +author: "Dhivya P (@dhivyap)" +short_description: Manage Dell EMC Networking OS9 configuration sections +description: + - OS9 configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with OS9 configuration sections in + a deterministic way. +extends_documentation_fragment: dellemc.os9.os9 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. This argument is mutually exclusive with I(src). + type: list + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section or hierarchy + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + type: list + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is + mutually exclusive with I(lines). + type: path + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + type: list + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + type: list + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + type: str + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + type: str + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When you set this argument to + I(merge), the configuration changes merge with the current + device running configuration. When you set this argument to I(check) + the configuration updates are determined but not actually configured + on the remote device. + type: str + default: merge + choices: ['merge', 'check'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + type: bool + default: no + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + type: str + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. If the C(backup_options) value is not given, + the backup file is written to the C(backup) folder in the playbook + root directory. If the directory does not exist, it is created. + type: bool + default: 'no' + backup_options: + description: + - This is a dict object containing configurable options related to backup file path. + The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set + to I(no) this option will be silently ignored. + suboptions: + filename: + description: + - The filename to be used to store the backup configuration. If the the filename + is not given it will be generated based on the hostname, current time and date + in format defined by _config.@ + type: str + dir_path: + description: + - This option provides the path ending with directory name in which the backup + configuration file will be stored. If the directory does not exist it will be first + created and the filename is either the value of C(filename) or default filename + as described in C(filename) options description. If the path value is not given + in that case a I(backup) directory will be created in the current working directory + and backup configuration will be copied in C(filename) within I(backup) directory. + type: path + type: dict +notes: + - This module requires Dell OS9 version 9.10.0.1P13 or above. + - This module requires to increase the ssh connection rate limit. + Use the following command I(ip ssh connection-rate-limit 60) + to configure the same. This can also be done with the + M(os9_config) module. +""" + +EXAMPLES = """ +- os9_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" +- os9_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + - 50 permit ip host 5.5.5.5 any log + parents: ['ip access-list extended test'] + before: ['no ip access-list extended test'] + match: exact +- os9_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + parents: ['ip access-list extended test'] + before: ['no ip access-list extended test'] + replace: block +- os9_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + backup: yes + backup_options: + filename: backup.cfg + dir_path: /home/user +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device. + returned: always + type: list + sample: ['hostname foo', 'router bgp 1', 'bgp router-id 1.1.1.1'] +commands: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['hostname foo', 'router bgp 1', 'bgp router-id 1.1.1.1'] +saved: + description: Returns whether the configuration is saved to the startup + configuration or not. + returned: When not check_mode. + type: bool + sample: True +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: str + sample: /playbooks/ansible/backup/os9_config.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import get_config, get_sublevel_config +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_argument_spec, check_args +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import load_config, run_commands +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import WARNING_PROMPTS_RE +from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + commands = module.params['lines'][0] + if (isinstance(commands, dict)) and (isinstance(commands['command'], list)): + candidate.add(commands['command'], parents=parents) + elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)): + candidate.add([commands['command']], parents=parents) + else: + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def get_running_config(module): + contents = module.params['config'] + if not contents: + contents = get_config(module) + return contents + + +def main(): + + backup_spec = dict( + filename=dict(), + dir_path=dict(type='path') + ) + argument_spec = dict( + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + src=dict(type='path'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', + choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + update=dict(choices=['merge', 'check'], default='merge'), + save=dict(type='bool', default=False), + config=dict(), + backup=dict(type='bool', default=False), + backup_options=dict(type='dict', options=backup_spec) + ) + + argument_spec.update(os9_argument_spec) + + mutually_exclusive = [('lines', 'src'), + ('parents', 'src')] + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + parents = module.params['parents'] or list() + + match = module.params['match'] + replace = module.params['replace'] + + warnings = list() + check_args(module, warnings) + + result = dict(changed=False, saved=False, warnings=warnings) + + candidate = get_candidate(module) + + if module.params['backup']: + if not module.check_mode: + result['__backup__'] = get_config(module) + commands = list() + + if any((module.params['lines'], module.params['src'])): + if match != 'none': + config = get_running_config(module) + if parents: + contents = get_sublevel_config(config, module) + config = NetworkConfig(contents=contents, indent=1) + else: + config = NetworkConfig(contents=config, indent=1) + configobjs = candidate.difference(config, match=match, replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands') + if ((isinstance(module.params['lines'], list)) and + (isinstance(module.params['lines'][0], dict)) and + set(['prompt', 'answer']).issubset(module.params['lines'][0])): + + cmd = {'command': commands, + 'prompt': module.params['lines'][0]['prompt'], + 'answer': module.params['lines'][0]['answer']} + commands = [module.jsonify(cmd)] + else: + commands = commands.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + if not module.check_mode and module.params['update'] == 'merge': + load_config(module, commands) + + result['changed'] = True + result['commands'] = commands + result['updates'] = commands + + if module.params['save']: + result['changed'] = True + if not module.check_mode: + cmd = {'command': 'copy running-config startup-config', + 'prompt': r'\[confirm yes/no\]:\s?$', 'answer': 'yes'} + run_commands(module, [cmd]) + result['saved'] = True + else: + module.warn('Skipping command `copy running-config startup-config`' + 'due to check_mode. Configuration not copied to ' + 'non-volatile storage') + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os9/plugins/modules/os9_facts.py b/ansible_collections/dellemc/os9/plugins/modules/os9_facts.py new file mode 100644 index 00000000..fe04afc0 --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/modules/os9_facts.py @@ -0,0 +1,578 @@ +#!/usr/bin/python +# +# (c) 2020 Peter Sprygada, +# Copyright (c) 2020 Dell Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: os9_facts +author: "Dhivya P (@dhivyap)" +short_description: Collect facts from remote devices running Dell EMC Networking OS9 +description: + - Collects a base set of device facts from a remote device that + is running OS9. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: dellemc.os9.os9 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + type: list + default: [ '!config' ] +notes: + - This module requires OS9 version 9.10.0.1P13 or above. + - This module requires an increase of the SSH connection rate limit. + Use the following command I(ip ssh connection-rate-limit 60) + to configure the same. This can be also be done with the M(os9_config) module. +""" + +EXAMPLES = """ +# Collect all facts from the device +- os9_facts: + gather_subset: all +# Collect only the config and default facts +- os9_facts: + gather_subset: + - config +# Do not collect hardware facts +- os9_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_servicetags: + description: The servicetags from remote device + returned: always + type: list +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str +ansible_net_image: + description: The image file the device is running + returned: always + type: str +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re +try: + from itertools import izip +except ImportError: + izip = zip + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import run_commands +from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_argument_spec, check_args +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + COMMANDS = list() + + def __init__(self, module): + self.module = module + self.facts = dict() + self.responses = None + + def populate(self): + self.responses = run_commands(self.module, self.COMMANDS, check_rc=False) + + def run(self, cmd): + return run_commands(self.module, cmd, check_rc=False) + + +class Default(FactsBase): + + COMMANDS = [ + 'show version', + 'show inventory', + 'show running-config | grep hostname' + ] + + def populate(self): + super(Default, self).populate() + data = self.responses[0] + self.facts['version'] = self.parse_version(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + + data = self.responses[1] + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['servicetags'] = self.parse_servicetags(data) + + data = self.responses[2] + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'Software Version:\s*(.+)', data) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'^hostname (.+)', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'^System Type:\s*(.+)', data, re.M) + if match: + return match.group(1) + + def parse_image(self, data): + match = re.search(r'image file is "(.+)"', data) + if match: + return match.group(1) + + def parse_serialnum(self, data): + for line in data.split('\n'): + if line.startswith('*'): + match = re.search( + r'\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', line, re.M) + if match: + return match.group(3) + + def parse_servicetags(self, data): + tags = [] + for line in data.split('\n'): + match = re.match(r'\**\s+[0-9]+\s+.*(\b[A-Z0-9]{7}\b)', line) + if match: + tags.append(match.group(1)) + return tags + + +class Hardware(FactsBase): + + COMMANDS = [ + 'show file-systems', + 'show memory | except Processor' + ] + + def populate(self): + super(Hardware, self).populate() + data = self.responses[0] + self.facts['filesystems'] = self.parse_filesystems(data) + + data = self.responses[1] + match = re.findall(r'\s(\d+)\s', data) + if match: + self.facts['memtotal_mb'] = int(match[0]) // 1024 + self.facts['memfree_mb'] = int(match[2]) // 1024 + + def parse_filesystems(self, data): + return re.findall(r'\s(\S+):$', data, re.M) + + +class Config(FactsBase): + + COMMANDS = ['show running-config'] + + def populate(self): + super(Config, self).populate() + self.facts['config'] = self.responses[0] + + +class Interfaces(FactsBase): + + COMMANDS = [ + 'show interfaces', + 'show ipv6 interface', + 'show lldp neighbors detail', + 'show inventory' + ] + + def populate(self): + super(Interfaces, self).populate() + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.responses[0] + interfaces = self.parse_interfaces(data) + + for key in list(interfaces.keys()): + if "ManagementEthernet" in key: + temp_parsed = interfaces[key] + del interfaces[key] + interfaces.update(self.parse_mgmt_interfaces(temp_parsed)) + + for key in list(interfaces.keys()): + if "Vlan" in key: + temp_parsed = interfaces[key] + del interfaces[key] + interfaces.update(self.parse_vlan_interfaces(temp_parsed)) + + self.facts['interfaces'] = self.populate_interfaces(interfaces) + + data = self.responses[1] + if len(data) > 0: + data = self.parse_ipv6_interfaces(data) + self.populate_ipv6_interfaces(data) + + data = self.responses[3] + if 'LLDP' in self.get_protocol_list(data): + neighbors = self.responses[2] + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def get_protocol_list(self, data): + start = False + protocol_list = list() + for line in data.split('\n'): + match = re.search(r'Software Protocol Configured\s*', line) + if match: + start = True + continue + if start: + line = line.strip() + if line.isalnum(): + protocol_list.append(line) + return protocol_list + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in interfaces.items(): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + ipv4 = self.parse_ipv4(value) + intf['ipv4'] = self.parse_ipv4(value) + if ipv4: + self.add_ip_address(ipv4['address'], 'ipv4') + + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['mediatype'] = self.parse_mediatype(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv6_interfaces(self, data): + for key, value in data.items(): + if key in self.facts['interfaces']: + self.facts['interfaces'][key]['ipv6'] = list() + addresses = re.findall(r'\s+(.+), subnet', value, re.M) + subnets = re.findall(r', subnet is (\S+)', value, re.M) + for addr, subnet in izip(addresses, subnets): + ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + + for entry in neighbors.split( + '========================================================================'): + if entry == '': + continue + + intf = self.parse_lldp_intf(entry) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = self.parse_lldp_host(entry) + fact['port'] = self.parse_lldp_port(entry) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + newline_count = 0 + interface_start = True + + for line in data.split('\n'): + if interface_start: + newline_count = 0 + if len(line) == 0: + newline_count += 1 + if newline_count == 2: + interface_start = True + else: + match = re.match(r'^(\S+) (\S+)', line) + if match and interface_start: + interface_start = False + key = match.group(0) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_mgmt_interfaces(self, data): + parsed = dict() + interface_start = True + for line in data.split('\n'): + match = re.match(r'^(\S+) (\S+)', line) + if "Time since" in line: + interface_start = True + parsed[key] += '\n%s' % line + elif match and interface_start: + interface_start = False + key = match.group(0) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_vlan_interfaces(self, data): + parsed = dict() + interface_start = True + line_before_end = False + for line in data.split('\n'): + match = re.match(r'^(\S+) (\S+)', line) + match_endline = re.match(r'^\s*\d+ packets, \d+ bytes$', line) + + if "Output Statistics" in line: + line_before_end = True + parsed[key] += '\n%s' % line + elif match_endline and line_before_end: + line_before_end = False + interface_start = True + parsed[key] += '\n%s' % line + elif match and interface_start: + interface_start = False + key = match.group(0) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_ipv6_interfaces(self, data): + parsed = dict() + for line in data.split('\n'): + if len(line) == 0: + continue + if line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'^(\S+) (\S+)', line) + if match: + key = match.group(0) + parsed[key] = line + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'address is (\S+)', data) + if match: + if match.group(1) != "not": + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Internet address is (\S+)', data) + if match: + if match.group(1) != "not": + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+)', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'LineSpeed (\d+)', data) + if match: + return int(match.group(1)) + + def parse_duplex(self, data): + match = re.search(r'(\w+) duplex', data, re.M) + if match: + return match.group(1) + + def parse_mediatype(self, data): + media = re.search(r'(.+) media present, (.+)', data, re.M) + if media: + match = re.search(r'type is (.+)$', media.group(0), re.M) + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (\w+[ ]?\w*)\(?.*\)?$', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lldp_intf(self, data): + match = re.search(r'^\sLocal Interface (\S+\s\S+)', data, re.M) + if match: + return match.group(1) + + def parse_lldp_host(self, data): + match = re.search(r'Remote System Name: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_port(self, data): + match = re.search(r'Remote Port ID: (.+)$', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + """main entry point for module execution + """ + argument_spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + argument_spec.update(os9_argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + for inst in instances: + inst.populate() + facts.update(inst.facts) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + warnings = list() + check_args(module, warnings) + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/os9/plugins/terminal/__init__.py b/ansible_collections/dellemc/os9/plugins/terminal/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/plugins/terminal/os9.py b/ansible_collections/dellemc/os9/plugins/terminal/os9.py new file mode 100644 index 00000000..e0052fc4 --- /dev/null +++ b/ansible_collections/dellemc/os9/plugins/terminal/os9.py @@ -0,0 +1,83 @@ +# +# (c) 2020 Red Hat Inc. +# +# Copyright (c) 2020 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import json + +from ansible.module_utils._text import to_text, to_bytes +from ansible.plugins.terminal import TerminalBase +from ansible.errors import AnsibleConnectionFailure + + +class TerminalModule(TerminalBase): + + terminal_stdout_re = [ + re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), + re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$") + ] + + terminal_stderr_re = [ + re.compile(br"% ?Error: (?:(?!\bdoes not exist\b)(?!\balready exists\b)(?!\bHost not found\b)(?!\bnot active\b).)*\n"), + re.compile(br"% ?Bad secret"), + re.compile(br"invalid input", re.I), + re.compile(br"(?:incomplete|ambiguous) command", re.I), + re.compile(br"connection timed out", re.I), + re.compile(br"'[^']' +returned error code: ?\d+"), + ] + + terminal_initial_prompt = br"\[y/n\]:" + + terminal_initial_answer = b"y" + + def on_open_shell(self): + try: + self._exec_cli_command(b'terminal length 0') + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to set terminal parameters') + + def on_become(self, passwd=None): + if self._get_prompt().endswith(b'#'): + return + + cmd = {u'command': u'enable'} + if passwd: + cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict') + cmd[u'answer'] = passwd + + try: + self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict')) + except AnsibleConnectionFailure: + raise AnsibleConnectionFailure('unable to elevate privilege to enable mode') + + def on_unbecome(self): + prompt = self._get_prompt() + if prompt is None: + # if prompt is None most likely the terminal is hung up at a prompt + return + + if prompt.strip().endswith(b')#'): + self._exec_cli_command(b'end') + self._exec_cli_command(b'disable') + + elif prompt.endswith(b'#'): + self._exec_cli_command(b'disable') diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE b/ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/README.md b/ansible_collections/dellemc/os9/roles/os9_aaa/README.md new file mode 100644 index 00000000..84995090 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/README.md @@ -0,0 +1,331 @@ +AAA role +======== + +This role facilitates the configuration of authentication, authorization, and acccounting (AAA), and supports the configuration of RADIUS and TACACS servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The AAA role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_aaa keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os9 | +| ``radius_server.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os9 | +| ``radius_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *radius_server.key* is 7 or 0 | os9 | +| ``radius_server.retransmit`` | integer | Configures the number of retransmissions | os9 | +| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions | os9 | +| ``radius_server.deadtime`` | integer | Configures the server dead time | os9 | +| ``radius_server.group`` | dictionary | Configures the RADIUS servers group (see ``group.*``) | os9 | +| ``group.name`` | string (required) | Configures the group name of the RADIUS servers | os9 | +| ``group.host`` | dictionary | Configures the RADIUS server host in the group (see ``host.*``) | os9 | +| ``host.ip`` | string | Configures the RADIUS server host address in the group | os9 | +| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key | os9 | +| ``host.key_string`` | string: 7,0 | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os9 | +| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 | +| ``host.timeout`` | integer | Configures the timeout for retransmissions | os9 | +| ``host.state`` | string: present,absent | Removes the host from group of RADIUS server if set to absent | os9 | +| ``group.vrf`` | dictionary | Configures the VRF for RADIUS servers in the group (see ``vrf.*``) | os9 | +| ``vrf.vrf_name`` | string (required) | Configures the name of VRF for the RADIUS server group | os9 | +| ``vrf.source_intf`` | integer | Configures the source interface for outgoing packets from servers in the group | os9 | +| ``vrf.state`` | string: present,absent | Removes the VRF from group of RADIUS servers if set to absent | os9 | +| ``group.state`` | string: present,absent | Removes the RADIUS server group if set to absent | os9 | +| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os9 | +| ``host.ip`` | string | Configures the RADIUS server host address | os9 | +| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os9 | +| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os9 | +| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 | +| ``host.timeout`` | integer | Configures timeout for retransmissions | os9 | +| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os9 | +| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os9 | +| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``)| os9 | +| ``tacacs_server.key`` | string (required): 0,7,LINE | Configures the authentication key for TACACS server | os9 | +| ``tacacs_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *tacacs_server.key* is 7 or 0 | os9 | +| ``tacacs_server.group`` | dictionary | Configures the group of TACACS servers (see ``group.*``) | os9 | +| ``group.name`` | string (required) | Configures the group name of the TACACS servers | os9 | +| ``group.host`` | dictionary | Configures the TACACS server host in the group (see ``host.*``) | os9 | +| ``host.ip`` | string | Configures the TACACS server host address in the group | os9 | +| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key of the TACACS server host | os9 | +| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only *host.key* is 7 or 0 | os9 | +| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 | +| ``host.timeout`` | integer | Configures timeout for retransmissions | os9 | +| ``host.state`` | string: present,absent | Removes the host from group of TACACS server if set to absent | os9 | +| ``group.vrf`` | dictionary | Configures VRF for TACACS servers in the group (see ``vrf.*``) | os9 | +| ``vrf.vrf_name`` | string (required) | Configures the name of VRF for TACACS server group | os9 | +| ``vrf.source_intf`` | integer | Configures source interface for outgoing packets from servers in the group | os9 | +| ``vrf.state`` | string: present,absent | Removes the VRF from group of TACACS server if set to absent | os9 | +| ``group.state`` | string: present,absent | Removes the TACACS server group if set to absent | os9 | +| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os9 | +| ``host.ip`` | string | Configures the TACACS sever host address | os9 | +| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key | os9 | +| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os9 | +| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 | +| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 | +| ``host.timeout`` | integer | Configures the timeout for retransmissions | os9 | +| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os9 | +| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os9 | +| ``aaa_accounting.commands`` | list | Configures accounting for EXEC (shell) and config commands (see ``commands.*``) | os9 | +| ``commands.enable_level`` | integer | Configures enable level for accounting of commands | os9 | +| ``commands.role_name`` | string | Configures user role for accounting of commands; variable is mutually exclusive with ``enable_level`` | os9 | +| ``commands.accounting_list_name`` | integer | Configures named accounting list for commands | os9 | +| ``commands.no_accounting`` | boolean | Configures no accounting of commands | os9 | +| ``commands.record_option`` | string: start-stop,stop-only,wait-start | Configures options to record data | os9 | +| ``commands.state`` | string: present,absent | Removes the named accounting list for the commands if set to absent | os9 | +| ``aaa_accounting.exec`` | list | Configures accounting for EXEC (shell) commands (see ``exec.*``) | os9 | +| ``exec.accounting_list_name`` | string | Configures named accounting list for EXEC (shell) commands | os9 | +| ``exec.no_accounting`` | boolean | Configures no accounting of EXEC (shell) commands | os9 | +| ``exec.record_option`` | string: start-stop,stop-only,wait-start | Configures options to record data | os9 | +| ``exec.state`` | string: present,absent | Removes the named accounting list for the EXEC (shell) commands if set to absent | os9 | +| ``aaa_accounting.suppress`` | boolean | Suppresses accounting for users with NULL username | os9| +| ``aaa_accounting.dot1x`` | string: none,start-stop,stop-only,wait-start | Configures accounting for dot1x events | os9 | +| ``aaa_accounting.rest`` | string:none,start-stop,stop-only,wait-start | Configures accounting for REST interface events | os9 | +| ``aaa_authorization`` | dictionary | Configures authorization parameters (see ``aaa_authorization.*``) | os9 | +| ``aaa_authorization.commands`` | list | Configures authorization for EXEC (shell) and config commands (see ``commands.*``)| os9 | +| ``commands.enable_level`` | integer | Configures enable level for authorization of commands | os9 | +| ``commands.role_name`` | string | Configures user role for authorization of commands; mutually exclusive with ``enable_level`` | os9 | +| ``commands.authorization_list_name`` | string | Configures named authorization list for commands | os9 | +| ``commands.authorization_method`` | string: none | Configures no authorization of commands | os9 | +| ``commands.use_data`` | string: local,tacacs+ | Configures data used for authorization | os9 | +| ``commands.state`` | string: present,absent | Removes the named authorization list for the commands if set to absent | os9 | +| ``aaa_authorization.config_commands`` | boolean | Configures authorization for configuration mode commands | os9 | +| ``aaa_authorization.role_only`` | boolean | Configures validation of authentication mode for user role | os9 | +| ``aaa_authorization.exec`` | list | Configures authorization for EXEC (shell) commands (see ``exec.*``) | os9 | +| ``exec.authorization_list_name`` | string | Configures named authorization list for EXEC (shell) commands | os9 | +| ``exec.authorization_method`` | string: none | Configures no authorization of EXEC (shell) commands | os9 | +| ``exec.use_data`` | string: local,tacacs+ | Configures data used for authorization | os9 | +| ``exec.state`` | string: present,absent | Removes the named authorization list for the EXEC (shell) commands if set to absent | os9 | +| ``aaa_authorization.network`` | string: none,radius,ias | Configures authorization for network events | os9 | +| ``aaa_authentication`` | dictionary | Configures authentication parameters (see ``aaa_authentication.*``) | os9 | +| ``aaa_radius`` | dictionary | Configures AAA for RADIUS group of servers (see ``aaa_radius.*``) | os9 | +| ``aaa_radius.group`` | string | Configures name of the RADIUS group of servers for AAA | os9 | +| ``aaa_radius.auth_method`` | string: pap,mschapv2 | Configures authentication method of RADIUS group of servers for AAA | os9 | +| ``aaa_tacacs`` | dictionary | Configures AAA for TACACS group of servers (see ``aaa_tacacs.*``) | os9 | +| ``aaa_tacacs.group`` | string | Configures name of the TACACS group of servers for AAA | os9 | +| ``aaa_authentication.auth_list`` | list | Configures named authentication list for hosts (see ``host.*``) | os9 | +| ``auth_list.name`` | string | Configures named authentication list | os9 | +| ``auth_list.login_or_enable`` | string: enable,login | Configures authentication list for login or enable | os9 | +| ``auth_list.server`` | string: radius,tacacs+ | Configures AAA to use this list of all server hosts | os9 | +| ``auth_list.use_password`` | string: line,local,enable,none | Configures password to use for authentication | os9 | +| ``auth_list.state`` | string: present,absent | Removes the named authentication list if set to absent | os9 | +| ``aaa_authentication.dot1x`` | string: none,radius,ias | Configures authentication for dot1x events | os9 | +| ``line_terminal`` | dictionary | Configures the terminal line (see ``line_terminal.*``) | os9 | +| ``line_terminal.`` | dictionary | Configures the primary or virtual terminal line (see ``.*``); value can be console , vty | os9 | +| ``.authorization`` | dictionary | Configures authorization parameters of line terminal (see ``authorization.*``) | os9 | +| ``authorization.commands`` | list | Configures authorization for EXEC (shell) and config commands (see ``commands.*``) | os9 | +| ``commands.enable_level`` | integer | Configures enable level for authorization of commands at line terminal | os9 | +| ``commands.role_name`` | string | Configures user role for authorization of commands at line terminal; mutually exclusive with `enable_level` | os9 | +| ``commands.authorization_list_name`` | string | Configures named authorization list for commands | os9 | +| ``commands.state`` | string: present,absent | Removes the authorization of commands from line terminal if set to absent | os9 | +| ``authorization.exec`` | list | Configures authorization for EXEC (shell) commands at line terminal (see ``exec.*``) | os9 | +| ``exec.authorization_list_name`` | string | Configures named authorization list for EXEC (shell) commands | os9 | +| ``exec.state`` | string: present,absent | Removes the authorization of EXEC (shell) from line terminal if set to absent | os9 | +| ``.accounting`` | dictionary | Configures accounting parameters of line terminal (see ``accounting.*``) | os9 | +| ``accounting.commands`` | list | Configures accounting for EXEC (shell) and config commands (see ``commands.*``) | os9 | +| ``commands.enable_level`` | integer | Configures enable level for accounting of commands at line terminal | os9| +| ``commands.role_name`` | string | Configures user role for accounting of commands at line terminal; mutually exclusive with ``enable_level`` | os9 | +| ``commands.accounting_list_name`` | string | Configures named accounting list for commands | os9 | +| ``commands.state`` | string: present,absent | Removes the accounting of commands from line terminal if set to absent | os9| +| ``accounting.exec`` | list | Configures accounting for EXEC (shell) commands at line terminal (see ``exec.*``) | os9 | +| ``exec.accounting_list_name`` | string | Configures named accounting list for EXEC (shell) commands | os9 | +| ``exec.state`` | string: present,absent | Removes the accounting of EXEC (shell) from line terminal if set to absent | os9 | +| ``.authentication`` | dictionary | Configures authentication parameters of line terminal (see ``authentication.*``) | os9 | +| ``authentication.enable`` | string | Configures the authentication list for privilege-level password authentication | os9 | +| ``authentication.login`` | string | Configures the authentication list for password checking | os9 | +| ``client.ip`` | string | Configures the client IP for the radius server | os9 | +| ``client.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os9 | +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_aaa* role to configure AAA for RADIUS and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS0 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os9_aaa* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_aaa: + radius_server: + key: radius + retransmit: 5 + timeout: 40 + deadtime: 2300 + group: + - name: RADIUS + host: + - ip: 2001:4898:f0:f09b::1002 + key: 0 + key_string: aaaa + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + vrf: + vrf_name: test + source_intf: fortyGigE 1/2 + state: absent + state: present + host: + - ip: 2001:4898:f0:f09b::1002 + key: xxx + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + tacacs_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa + group: + - name: TACACS + host: + - ip: 2001:4898:f0:f09b::1000 + key: 0 + key_string: aaa + retransmit: 6 + auth_port: 3 + timeout: 2 + state: present + vrf: + vrf_name: tes + source_intf: fortyGigE 1/3 + state: present + state: present + host: + - ip: 2001:4898:f0:f09b::1000 + key: 0 + key_string: aa + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + aaa_accounting: + commands: + - enable_level: 2 + accounting_list_name: aa + record_option: start-stop + state: present + - role_name: netadmin + accounting_list_name: aa + no_accounting: none + suppress: True + exec: + - accounting_list_name: aaa + no_accounting: true + state: present + dot1x: none + rest: none + aaa_authorization: + commands: + - enable_level: 2 + authorization_list_name: aa + use_data: local + state: present + - role_name: netadmin + authorization_list_name: aa + authorization_method: none + use_data: local + config_commands: True + role_only: + exec: + - authorization_list_name: aaa + authorization_method: if-authenticated + use_data: local + state: present + aaa_authentication: + auth_list: + - name: default + login_or_enable: login + server: radius + use_password: local + state: present + - name: console + server: tacacs+ + login_or_enable: login + use_password: local + aaa_radius: + group: RADIUS + auth_method: pap + aaa_tacacs: + group: TACACS + line_terminal: + vty 0: + authorization: + commands: + - enable_level: 2 + authorization_list_name: aa + state: present + - role_name: netadmin + authorization_list_name: aa + state: present + exec: + - authorization_list_name: aa + state: present + accounting: + commands: + - enable_level: 2 + accounting_list_name: aa + state: present + - role_name: netadmin + accounting_list_name: aa + state: absent + exec: + accounting_list_name: aa + state: present + authentication: + enable: + login: console + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_aaa + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml new file mode 100644 index 00000000..8fce0035 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# defaults file for dellemc.os9.os9_aaa +attribute_type: + mandatory: mandatory + on_for_login_auth: on-for-login-auth + include_in_access_req: include-in-access-req + mac: "mac format" + mac_ietf: "mac format ietf" + mac_ietf_lower_case: "mac format ietf lower-case" + mac_ietf_upper_case: "mac format ietf upper-case" + mac_legacy: "mac format legacy" + mac_legacy_lower_case: "mac format legacy lower-case" + mac_legacy_upper_case: "mac format legacy upper-case" + mac_unformatted: "mac format unformatted" + mac_unformatted_lower_case: "mac format unformatted lower-case" + mac_unformatted_upper_case: "mac format unformatted upper-case" \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml new file mode 100644 index 00000000..ad771c4f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_aaa \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml new file mode 100644 index 00000000..2f94f923 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml @@ -0,0 +1,19 @@ +# copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os9_aaa role facilitates the configuration of Authentication Authorization Acccounting (AAA) attributes + in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml new file mode 100644 index 00000000..5ffba7b6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os9 + + - name: "Generating AAA configuration for os9" + template: + src: os9_aaa.j2 + dest: "{{ build_dir }}/aaa9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning AAA configuration for os9" + dellemc.os9.os9_config: + src: os9_aaa.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2 b/ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2 new file mode 100644 index 00000000..0d4aa9f4 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2 @@ -0,0 +1,680 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure AAA commands for os9 Devices +os9_aaa: + tacacs_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa + group: + - name: TACACS + host: + - ip: 2001:4898:f0:f09b::1000 + key: 0 + key_string: aaa + auth_port: 3 + timeout: 2 + state: present + vrf: + vrf_name: test + source_intf: fortyGigE 1/2 + state: present + state: present + host: + - ip: 2001:4898:f0:f09b::1000 + key: 0 + key_string: aaa + auth_port: 3 + timeout: 2 + state: present + radius_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fb + retransmit: 5 + timeout: 10 + deadtime: 2000 + group: + - name: Radius + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + key_string: aaa + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + vrf: + vrf_name: test + source_intf: fortyGigE 1/3 + state: present + state: present + host: + - ip: 2001:4898:f0:f09b::1001 + key: 0 + key_string: aaa + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + aaa_accounting: + commands: + - enable_level: 2 + accounting_list_name: aa + no_accounting: true + record_option: start-stop + state: present + suppress: True + exec: + - accounting_list_name: aaa + no_accounting: true + state: present + dot1x: none + rest: none + aaa_authorization: + commands: + - enable_level: 2 + authorization_list_name: aa + use_data: local + state: present + - role_name: netadmin + authorization_list_name: aa + authorization_method: none + use_data: local + config_commands: True + role_only: True + exec: + - authorization_list_name: aaa + authorization_method: if-authenticated + use_data: local + state: present + aaa_radius: + group: RADIUS + auth_method: pap + aaa_tacacs: + group: TACACS + aaa_authentication: + auth_list: + - name: default + login_or_enable: login + server: tacacs+ + use_password: local + state: present + - name: console + server: radius + login_or_enable: login + use_password: local + line_terminal: + vty 0: + authorization: + commands: + - enable_level: 2 + authorization_list_name: aa + state: present + - enable_level: 2 + authorization_list_name: aa + state: present + exec: + - authorization_list_name: aa + state: present + accounting: + commands: + - enable_level: 2 + accounting_list_name: aa + state: present + - enable_level: 2 + accounting_list_name: aa + state: present + exec: + - accounting_list_name: aa + state: present + authentication: + enable: aa + login: console +##################################################} +{% if os9_aaa is defined and os9_aaa %} +{% for key in os9_aaa.keys() %} + {% set aaa_vars = os9_aaa[key] %} + {% if key == "tacacs_server" %} + {% set server = "tacacs-server" %} + {% endif %} + {% if key == "radius_server" %} + {% set server = "radius-server" %} + {% endif %} + {% if server is defined and server %} + {% if aaa_vars %} + {% set item = aaa_vars %} + {% if item.retransmit is defined %} + {% if item.retransmit %} +{{ server }} retransmit {{ item.retransmit }} + {% else %} +no {{ server }} retransmit + {% endif %} + {% endif %} + {% if item.timeout is defined %} + {% if item.timeout %} +{{ server }} timeout {{ item.timeout }} + {% else %} +no {{ server }} timeout + {% endif %} + {% endif %} + {% if item.deadtime is defined %} + {% if item.deadtime %} +{{ server }} deadtime {{ item.deadtime }} + {% else %} +no {{ server }} deadtime + {% endif %} + {% endif %} + {% if item.key is defined %} + {% if item.key == 0 or item.key == 7 %} + {% if item.key_string is defined and item.key_string%} +{{ server }} key {{ item.key }} {{ item.key_string }} + {% endif %} + {% elif item.key %} +{{ server }} key {{ item.key }} + {% else %} +no {{ server }} key + {% endif %} + {% endif %} + {% if item.host is defined and item.host %} + {% for hostlist in item.host %} + {% if hostlist.ip is defined and hostlist.ip %} + {% if hostlist.state is defined and hostlist.state == "absent" %} + {% if (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7) ) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) %} +no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} +no {{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7) )%} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) %} +no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} + {% else %} +no {{ server }} host {{ hostlist.ip }} + {% endif %} + {% else %} + {% if (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key== 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) %} +{{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} +{{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server"%} +{{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7))%} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) %} +{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} + {% else %} +{{ server }} host {{ hostlist.ip }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if item.group is defined and item.group %} + {% for groupitem in item.group %} + {% if groupitem.name is defined and groupitem.name %} + {% if groupitem.state is defined and groupitem.state == "absent" %} +no {{ server }} group {{ groupitem.name }} + {% else %} +{{ server }} group {{ groupitem.name }} + {% if groupitem.host is defined and groupitem.host %} + {% for hostlist in groupitem.host %} + {% if hostlist.ip is defined and hostlist.ip %} + {% if hostlist.state is defined and hostlist.state == "absent" %} + {% if (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} + {% elif (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) %} + no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + no {{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) %} + no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} + {% else %} + no {{ server }} host {{ hostlist.ip }} + {% endif %} + {% else %} + {% if (hostlist.key is defined and (hostlist.key== 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.timeout is defined and hostlist.timeout) %} + {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} + {% elif (hostlist.auth_port is defined and hostlist.auth_port) %} + {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %} + {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} + {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server"%} + {{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }} + {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} + {% elif (hostlist.key is defined and hostlist.key) %} + {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} + {% else %} + {{ server }} host {{ hostlist.ip }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if groupitem.vrf is defined and groupitem.vrf %} + {% if groupitem.vrf.vrf_name is defined and groupitem.vrf.vrf_name %} + {% if groupitem.vrf.state is defined and groupitem.vrf.state == "absent" %} + no {{ server }} vrf {{ groupitem.vrf.vrf_name }} + {% else %} + {% if groupitem.vrf.source_intf is defined and groupitem.vrf.source_intf %} + {{ server }} vrf {{ groupitem.vrf.vrf_name }} source-interface {{ groupitem.vrf.source_intf }} + {% else %} + {{ server }} vrf {{ groupitem.vrf.vrf_name }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} +{% endfor %} + + {% if os9_aaa.aaa_accounting is defined and os9_aaa.aaa_accounting %} + {% set aaa_accounting = os9_aaa.aaa_accounting %} + {% if aaa_accounting.suppress is defined %} + {% if aaa_accounting.suppress %} +aaa accounting suppress null-username + {% else %} +no aaa accounting suppress null-username + {% endif %} + {% endif %} + {% if aaa_accounting.dot1x is defined %} + {% if aaa_accounting.dot1x == "none" %} +aaa accounting dot1x default none + {% elif aaa_accounting.dotx %} +aaa accounting dot1x default {{ aaa_accounting.dot1x }} tacacs+ + {% else %} +no aaa accounting dotx default + {% endif %} + {% endif %} + {% if aaa_accounting.rest is defined %} + {% if aaa_accounting.rest == "none" %} +aaa accounting rest default none + {% elif aaa_accounting.rest %} +aaa accounting rest default {{ aaa_accounting.rest }} tacacs+ + {% else %} +no aaa accounting rest default + {% endif %} + {% endif %} + {% if aaa_accounting.exec is defined and aaa_accounting.exec %} + {% for command in aaa_accounting.exec %} + {% if command.accounting_list_name is defined and command.accounting_list_name %} + {% if command.state is defined and command.state == "absent" %} +no aaa accounting exec {{ command.accounting_list_name }} + {% else %} + {% if command.record_option is defined and command.record_option %} +aaa accounting exec {{ command.accounting_list_name }} {{ command.record_option }} tacacs+ + {% elif command.no_accounting is defined and command.no_accounting %} +aaa accounting exec {{ command.accounting_list_name }} none + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if aaa_accounting.commands is defined and aaa_accounting.commands %} + {% for command in aaa_accounting.commands %} + {% if command.enable_level is defined and command.enable_level %} + {% if command.accounting_list_name is defined and command.accounting_list_name %} + {% if command.state is defined and command.state == "absent" %} +no aaa accounting commands {{ command.enable_level }} {{ command.accounting_list_name }} + {% else %} + {% if command.record_option is defined and command.record_option %} +aaa accounting commands {{ command.enable_level }} {{ command.accounting_list_name }} {{ command.record_option }} tacacs+ + {% elif command.no_accounting is defined and command.no_accounting %} +aaa accounting commands {{ command.enable_level }} {{ command.accounting_list_name }} none + {% endif %} + {% endif %} + {% endif %} + {% elif command.role_name is defined and command.role_name %} + {% if command.accounting_list_name is defined and command.accounting_list_name %} + {% if command.state is defined and command.state == "absent" %} +no aaa accounting commands role {{ command.role_name }} {{ command.accounting_list_name }} + {% else %} + {% if command.record_option is defined and command.record_option %} +aaa accounting commands role {{ command.role_name }} {{ command.accounting_list_name }} {{ command.record_option }} tacacs+ + {% elif command.no_accounting is defined and command.no_accounting %} +aaa accounting commands role {{ command.role_name }} {{ command.accounting_list_name }} none + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if os9_aaa.aaa_authorization is defined and os9_aaa.aaa_authorization %} + {% set aaa_authorization = os9_aaa.aaa_authorization %} + {% if aaa_authorization.config_commands is defined %} + {% if aaa_authorization.config_commands %} +aaa authorization config-commands + {% else %} +no aaa authorization config-commands + {% endif %} + {% endif %} + {% if aaa_authorization.role_only is defined %} + {% if aaa_authorization.role_only %} +aaa authorization role-only + {% else %} +no aaa authorization role-only + {% endif %} + {% endif %} + {% if aaa_authorization.exec is defined and aaa_authorization.exec %} + {% for command in aaa_authorization.exec %} + {% if command.authorization_list_name is defined and command.authorization_list_name %} + {% if command.state is defined and command.state == "absent" %} +no aaa authorization exec {{ command.authorization_list_name }} + {% else %} + {% if command.use_data is defined and command.use_data %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization exec {{ command.authorization_list_name }} {{ command.use_data }} {{ command.authorization_method }} + {% else %} +aaa authorization exec {{ command.authorization_list_name }} {{ command.use_data }} + {% endif %} + {% else %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization exec {{ command.authorization_list_name }} {{ command.authorization_method }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if aaa_authorization.commands is defined and aaa_authorization.commands %} + {% for command in aaa_authorization.commands %} + {% if command.enable_level is defined and command.enable_level %} + {% if command.authorization_list_name is defined and command.authorization_list_name %} + {% if command.state is defined and command.state == "absent" %} +no aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }} + {% else %} + {% if command.use_data is defined and command.use_data %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }} {{ command.use_data }} {{ command.authorization_method }} + {% else %} +aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }} {{ command.use_data }} + {% endif %} + {% else %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }} {{ command.authorization_method }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% elif command.role_name is defined and command.role_name %} + {% if command.authorization_list_name is defined and command.authorization_list_name %} + {% if command.state is defined and command.state == "absent" %} +no aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }} + {% else %} + {% if command.use_data is defined and command.use_data %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }} {{ command.use_data }} {{ command.authorization_method }} + {% else %} +aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }} {{ command.use_data }} + {% endif %} + {% else %} + {% if command.authorization_method is defined and command.authorization_method %} +aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }} {{ command.authorization_method }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% if os9_aaa.aaa_radius is defined and os9_aaa.aaa_radius %} + {% if os9_aaa.aaa_radius.group is defined %} + {% if os9_aaa.aaa_radius.group %} +aaa radius group {{ os9_aaa.aaa_radius.group }} + {% else %} +no aaa radius group + {% endif %} + {% endif %} + {% if os9_aaa.aaa_radius.auth_method is defined %} + {% if os9_aaa.aaa_radius.auth_method %} +aaa radius auth-method {{ os9_aaa.aaa_radius.auth_method }} + {% else %} +no aaa radius auth-method + {% endif %} + {% endif %} + {% endif %} + {% if os9_aaa.aaa_tacacs is defined and os9_aaa.aaa_tacacs %} + {% if os9_aaa.aaa_tacacs.group is defined %} + {% if os9_aaa.aaa_tacacs.group %} +aaa tacacs group {{ os9_aaa.aaa_tacacs.group }} + {% else %} +no aaa tacacs group + {% endif %} + {% endif %} + {% endif %} + + {% if os9_aaa.aaa_authentication is defined and os9_aaa.aaa_authentication %} + {% if os9_aaa.aaa_authentication.auth_list is defined and os9_aaa.aaa_authentication.auth_list %} + {% for auth_list in os9_aaa.aaa_authentication.auth_list %} + {% if auth_list.login_or_enable is defined and auth_list.login_or_enable %} + {% if auth_list.name is defined and auth_list.name %} + {% if auth_list.state is defined and auth_list.state == "absent" %} +no aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} + {% else %} + {% if auth_list.server is defined and auth_list.server %} + {% if auth_list.use_password is defined and auth_list.use_password %} +aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} {{ auth_list.server }} {{ auth_list.use_password }} + {% else %} +aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} {{ auth_list.server }} + {% endif %} + {% else %} + {% if auth_list.use_password is defined and auth_list.use_password %} +aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} {{ auth_list.use_password }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% if os9_aaa.line_terminal is defined and os9_aaa.line_terminal %} + {% for terminal in os9_aaa.line_terminal.keys() %} + {% set terminal_vars = os9_aaa.line_terminal[terminal] %} +line {{ terminal }} + {% if terminal_vars.authorization is defined and terminal_vars.authorization %} + {% if terminal_vars.authorization.commands is defined and terminal_vars.authorization.commands %} + {% for commands in terminal_vars.authorization.commands %} + {% if commands.enable_level is defined and commands.enable_level %} + {% if commands.state is defined and commands.state == "absent" %} + no authorization commands {{ commands.enable_level }} + {% else %} + {% if commands.authorization_list_name is defined and commands.authorization_list_name %} + authorization commands {{ commands.enable_level }} {{ commands.authorization_list_name }} + {% endif %} + {% endif %} + {% elif commands.role_name is defined and commands.role_name %} + {% if commands.state is defined and commands.state == "absent" %} + no authorization commands role {{ commands.role_name }} + {% else %} + {% if commands.authorization_list_name is defined and commands.authorization_list_name %} + authorization commands role {{ commands.role_name }} {{ commands.authorization_list_name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if terminal_vars.authorization.exec is defined and terminal_vars.authorization.exec %} + {% set exec = terminal_vars.authorization.exec %} + {% if exec.state is defined and exec.state == "absent" %} + no authorization exec + {% else %} + {% if exec.authorization_list_name is defined and exec.authorization_list_name %} + authorization exec {{ exec.authorization_list_name }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if terminal_vars.accounting is defined and terminal_vars.accounting %} + {% if terminal_vars.accounting.commands is defined and terminal_vars.accounting.commands %} + {% for commands in terminal_vars.accounting.commands %} + {% if commands.enable_level is defined and commands.enable_level %} + {% if commands.state is defined and commands.state == "absent" %} + no accounting commands {{ commands.enable_level }} + {% else %} + {% if commands.accounting_list_name is defined and commands.accounting_list_name %} + accounting commands {{ commands.enable_level }} {{ commands.accounting_list_name }} + {% endif %} + {% endif %} + {% elif commands.role_name is defined and commands.role_name %} + {% if commands.state is defined and commands.state == "absent" %} + no accounting commands role {{ commands.role_name }} + {% else %} + {% if commands.accounting_list_name is defined and commands.accounting_list_name %} + accounting commands role {{ commands.role_name }} {{ commands.accounting_list_name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if terminal_vars.accounting.exec is defined and terminal_vars.accounting.exec %} + {% set exec = terminal_vars.accounting.exec %} + {% if exec.state is defined and exec.state == "absent" %} + no accounting exec + {% else %} + {% if exec.accounting_list_name is defined and exec.accounting_list_name %} + authorization exec {{ exec.accounting_list_name }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if terminal_vars.authentication is defined and terminal_vars.authentication %} + {% if terminal_vars.authentication.enable is defined %} + {% if terminal_vars.authentication.enable %} + enable authentication {{ terminal_vars.authentication.enable }} + {% else %} + no enable authentication + {% endif %} + {% endif %} + {% if terminal_vars.authentication.login is defined %} + {% if terminal_vars.authentication.login %} + login authentication {{ terminal_vars.authentication.login }} + {% else %} + no login authentication + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml new file mode 100644 index 00000000..b4e871b8 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml @@ -0,0 +1,133 @@ +--- +# vars file for dellemc.os9.os9_aaa, +# below gives a sample configuration +# Sample variables for OS9 device +os9_aaa: + radius_server: + key: radius + retransmit: 5 + timeout: 40 + deadtime: 2300 + group: + - name: RADIUS + host: + - ip: 2001:4898:f0:f09b::1002 + key: 0 + key_string: aaaa + retransmit: 5 + auth_port: 3 + timeout: 2 + state: present + vrf: + vrf_name: test + source_intf: fortyGigE 1/2 + state: absent + state: present + host: + - ip: 10.1.1.1 + key: 0 + key_string: aaa + retransmit: 6 + auth_port: 3 + timeout: 2 + state: present + tacacs_server: + key: 7 + key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa + group: + - name: TACACS + host: + - ip: 2001:4898:f0:f09b::1000 + key: 0 + key_string: aaa + auth_port: 3 + timeout: 2 + state: present + vrf: + vrf_name: tes + source_intf: fortyGigE 1/3 + state: present + state: present + host: + - ip: 2001:4898:f0:f09b::1000 + key: 0 + key_string: aaa + auth_port: 3 + timeout: 2 + state: present + aaa_accounting: + commands: + - enable_level: 2 + accounting_list_name: aa + record_option: start-stop + state: present + - role_name: netadmin + accounting_list_name: aa + no_accounting: none + suppress: True + exec: + - accounting_list_name: aaa + no_accounting: true + state: present + dot1x: none + rest: none + aaa_authorization: + commands: + - enable_level: 2 + authorization_list_name: aa + use_data: local + state: present + - role_name: netadmin + authorization_list_name: aa + authorization_method: none + use_data: local + config_commands: True + role_only: + exec: + - authorization_list_name: aaa + authorization_method: if-authenticated + use_data: local + state: present + line_terminal: + vty 0: + authorization: + commands: + - enable_level: 2 + authorization_list_name: aa + state: present + - role_name: netadmin + authorization_list_name: aa + state: present + exec: + - authorization_list_name: aa + state: present + accounting: + commands: + - enable_level: 2 + accounting_list_name: aa + state: present + - role_name: netadmin + accounting_list_name: aa + state: absent + exec: + accounting_list_name: aa + state: present + authentication: + enable: + login: console + aaa_radius: + group: RADIUS + auth_method: pap + aaa_tacacs: + group: TACACS + aaa_authentication: + auth_list: + - name: default + login_or_enable: login + server: tacacs+ + use_password: local + state: present + - name: console + server: radius + login_or_enable: login + use_password: local \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml new file mode 100644 index 00000000..e99880ca --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_aaa \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml new file mode 100644 index 00000000..e198e3ed --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_aaa \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/LICENSE b/ansible_collections/dellemc/os9/roles/os9_acl/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/README.md b/ansible_collections/dellemc/os9/roles/os9_acl/README.md new file mode 100644 index 00000000..52ef4c55 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/README.md @@ -0,0 +1,134 @@ +ACL role +======== + +This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The ACL role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_acl keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os9 | +| ``name`` | string (required) | Configures the name of the access-control list | os9 | +| ``description`` | string | Configures the description about the access-control list | os9 | +| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os9 | +| ``remark.number`` | integer (required) | Configures the remark sequence number | os9 | +| ``remark.description`` | string | Configures the remark description | os9 | +| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os9 | +| ``extended`` | boolean: true,false | Configures an extended ACL type if set to true; configures a standard ACL if set to false | os9 | +| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os9 | +| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os9 | +| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os9 | +| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os9 | +| ``entries.source`` | string (required) | Specifies the source address to match in the packets | os9 | +| ``entries.src_condition`` | string | Specifies the condition to filter packets from the source address; ignored if MAC | os9 | +| ``entries.destination`` | string (required) | Specifies the destination address to match in the packets | os9 | +| ``entries.dest_condition`` | string | Specifies the condition to filter packets to the destination address | os9 | +| ``entries.other_options`` | string | Specifies the other options applied on packets (count, log, order, monitor, and so on) | os9 | +| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os9 | +| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os9 | +| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os9 | +| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os9 | +| ``stage_ingress.seq_number`` | integer | Configure the sequence number (greater than 0) to rank precedence for this interface and direction | +| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os9 | +| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os9 | +| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os9 | +| ``lineterminal`` | list | Configures the terminal to apply the ACL (see ``lineterminal.*``) | os9 | +| ``lineterminal.line`` | string (required) | Configures access-class on the line terminal | os9 | +| ``lineterminal.state`` | string: absent,present\* | Deletes the access-class from line terminal if set to absent | os9 | +| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes /os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os9_acl* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_acl: + - type: ipv4 + name: ssh-only + description: ipv4acl + extended: true + remark: + - number: 5 + description: "ipv4remark" + entries: + - number: 5 + permit: true + protocol: tcp + source: any + src_condition: ack + destination: any + dest_condition: eq 22 + other_options: count + state: present + stage_ingress: + - name: fortyGigE 1/28 + state: present + stage_egress: + - name: fortyGigE 1/28 + state: present + lineterminal: + - line: vty 1 + state: present + - line: vty 2 + state: absent + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_acl + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml new file mode 100644 index 00000000..7c196010 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_acl \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml new file mode 100644 index 00000000..ad771c4f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_aaa \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml new file mode 100644 index 00000000..dd418f87 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_acl role facilitates the configuration of access control list (ACL) attributes in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml new file mode 100644 index 00000000..53612e5e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml @@ -0,0 +1,16 @@ +--- +#tasks file for os9 + - name: "Generating ACL configuration for os9" + template: + src: os9_acl.j2 + dest: "{{ build_dir }}/acl9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning ACL configuration for os9" + dellemc.os9.os9_config: + src: os9_acl.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2 b/ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2 new file mode 100644 index 00000000..b47a1c2c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2 @@ -0,0 +1,277 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### + +Purpose: +Configure ACL commands for os9 devices + +os9_acl: + - name: ssh-only + type: ipv4 + description: acl + extended: true + remark: + - number: 1 + description: helloworld + state: present + entries: + - number: 10 + permit: true + protocol: tcp + source: any + destination: any + src_condition: eq 22 + dest_condition: ack + other_options: count + state: present + stage_ingress: + - name: fortyGigE 1/8 + state: present + - name: fortyGigE 1/9 + state: present + stage_egress: + - name: fortyGigE 1/19 + state: present + lineterminal: + - line: vty 0 + state: present + - line: vty 1 + state: present + state: present + - name: ipv6-ssh-only + type: ipv6 + entries: + - number: 10 + permit: true + protocol: ipv6 + source: 2001:4898::/32 + destination: any + - number: 20 + permit: true + protocol: tcp + source: any + src_condition: ack + destination: any + - number: 40 + permit: true + protocol: tcp + source: any + destination: any + state: present + lineterminal: + - line: vty 0 + state: present + - line: vty 1 + state: present +#####################################} +{% if os9_acl is defined and os9_acl %} + {% for val in os9_acl + %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} + {% if val.type is defined and val.type == "ipv4" %} + {% if val.extended is defined and val.extended %} +no ip access-list extended {{ val.name }} + {% else %} +no ip access-list standard {{ val.name }} + {% endif %} + {% elif val.type is defined and val.type == "ipv6" %} +no ipv6 access-list {{ val.name }} + {% elif val.type is defined and val.type == "mac" %} + {% if val.extended is defined and val.extended %} +no mac access-list extended {{ val.name }} + {% else %} +no mac access-list standard {{ val.name }} + {% endif %} + {% endif %} + {% else %} + {% if val.type is defined and val.type == "ipv4" %} + {% if val.extended is defined and val.extended %} +ip access-list extended {{ val.name }} + {% else %} +ip access-list standard {{ val.name }} + {% endif %} + {% elif val.type is defined and val.type == "ipv6" %} +ipv6 access-list {{ val.name }} + {% elif val.type is defined and val.type == "mac" %} + {% if val.extended is defined and val.extended %} +mac access-list extended {{ val.name }} + {% else %} +mac access-list standard {{ val.name }} + {% endif %} + {% endif %} + {% if val.description is defined %} + {% if val.description %} + description {{ val.description }} + {% else %} + no description a + {% endif %} + {% endif %} + {% if val.remark is defined and val.remark %} + {% for remark in val.remark %} + {% if remark.number is defined and remark.number %} + {% if remark.state is defined and remark.state == "absent" %} + no remark {{ remark.number }} + {% else %} + {% if remark.description is defined and remark.description %} + remark {{ remark.number }} {{ remark.description }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if val.entries is defined and val.entries %} + {% for rule in val.entries %} + {% if rule.number is defined and rule.number %} + {% if rule.state is defined and rule.state == "absent" %} + no seq {{ rule.number }} + {% else %} + {% if rule.permit is defined %} + {% if rule.permit %} + {% set is_permit = "permit" %} + {% else %} + {% set is_permit = "deny" %} + {% endif %} + {% if val.type is defined and val.type == "mac" %} + {% if rule.source is defined and rule.source %} + {% if rule.destination is defined and rule.destination %} + {% if rule.other_options is defined and rule.other_options %} + {% if rule.other_options == "log" %} + {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %} + {% else %} + {% set other_options = rule.other_options %} + {% endif %} + seq {{ rule.number }} {{ is_permit }} {{ rule.source }} {{ rule.destination }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.source }} {{ rule.destination }} + {% endif %} + {% endif %} + {% endif %} + {% else %} + {% if rule.protocol is defined and rule.protocol %} + {% if rule.source is defined and rule.source %} + {% if rule.destination is defined and rule.destination %} + {% if rule.src_condition is defined and rule.src_condition %} + {% if rule.dest_condition is defined and rule.dest_condition %} + {% if rule.other_options is defined and rule.other_options %} + {% if rule.other_options == "log" %} + {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %} + {% else %} + {% set other_options = rule.other_options %} + {% endif %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }} + {% endif %} + {% else %} + {% if rule.other_options is defined and rule.other_options %} + {% if rule.other_options == "log" %} + {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %} + {% else %} + {% set other_options = rule.other_options %} + {% endif %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} + {% endif %} + {% endif %} + {% else %} + {% if rule.dest_condition is defined and rule.dest_condition %} + {% if rule.other_options is defined and rule.other_options %} + {% if rule.other_options == "log" %} + {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %} + {% else %} + {% set other_options = rule.other_options %} + {% endif %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }} + {% endif %} + {% else %} + {% if rule.other_options is defined and rule.other_options %} + {% if rule.other_options == "log" %} + {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %} + {% else %} + {% set other_options = rule.other_options %} + {% endif %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ other_options }} + {% else %} + seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if val.lineterminal is defined and val.lineterminal %} + {% if val.type is defined and not val.type == "mac" %} + {% for vty in val.lineterminal %} + {% if vty.line is defined and vty.line %} +line {{ vty.line }} + {% if vty.state is defined and vty.state == "absent" %} + no access-class {{ val.name }} {{ val.type }} + {% else %} + access-class {{ val.name }} {{ val.type }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% if val.stage_ingress is defined and val.stage_ingress %} + {% for intf in val.stage_ingress %} + {% if intf.state is defined and intf.state == "absent" %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + no mac access-group {{ val.name }} in + {% else %} + no ip access-group {{ val.name }} in + {% endif %} + {% endif %} + {% else %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + mac access-group {{ val.name }} in + {% else %} + ip access-group {{ val.name }} in + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if val.stage_egress is defined and val.stage_egress %} + {% for intf in val.stage_egress %} + {% if intf.state is defined and intf.state == "absent" %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + no mac access-group {{ val.name }} out + {% else %} + no ip access-group {{ val.name }} out + {% endif %} + {% endif %} + {% else %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if val.type is defined and val.type == "mac" %} + mac access-group {{ val.name }} out + {% else %} + ip access-group {{ val.name }} out + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml new file mode 100644 index 00000000..9f083bb9 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml @@ -0,0 +1,88 @@ +--- +# vars file for dellemc.os9.os9_acl, +# below gives a sample configuration +# Sample variables for OS9 device +os9_acl: + - name: ssh-only-mac + type: mac + description: macacl + extended: true + remark: + - number: 1 + description: mac + state: present + entries: + - number: 5 + permit: true + protocol: tcp + source: any + destination: any + dest_condition: eq 2 + other_options: count + state: present + - number: 6 + permit: false + protocol: tcp + source: bb:bb:bb:bb:bb:bb ff:ff:ff:ff:ff:ff + destination: any + dest_condition: log + state: present + stage_ingress: + - name: fortyGigE 1/28 + state: present + - name: fortyGigE 1/27 + state: present + stage_egress: + - name: fortyGigE 1/28 + state: present + lineterminal: + - line: vty 1 + state: present + - line: vty 2 + state: absent + - line: vty 3 + state: present + - name: ipv6-ssh-only + type: ipv6 + description: ipv6acl + remark: + - number: 1 + description: ipv6 + entries: + - number: 10 + permit: true + protocol: ipv6 + source: 2001:4898::/32 + destination: any + - number: 20 + permit: true + protocol: tcp + source: any + src_condition: eq 2 + destination: 2404:f801::/32 + - number: 30 + permit: true + protocol: tcp + source: any + destination: 2a01:110::/31 + dest_condition: ack + - number: 40 + permit: true + protocol: tcp + source: any + destination: any + stage_ingress: + - name: fortyGigE 1/26 + state: present + - name: fortyGigE 1/27 + state: present + stage_egress: + - name: fortyGigE 1/26 + state: present + lineterminal: + - line: vty 0 + state: absent + - line: vty 1 + - line: vty 2 + - line: vty 3 + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml new file mode 100644 index 00000000..dbe56bcd --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_acl \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml new file mode 100644 index 00000000..95a39363 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_acl \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/README.md b/ansible_collections/dellemc/os9/roles/os9_bgp/README.md new file mode 100644 index 00000000..c8f580bc --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/README.md @@ -0,0 +1,224 @@ +BGP role +======== + +This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The BGP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_bgp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os9 | +| ``router_id`` | string | Configures the IP address of the local BGP router instance | os9 | +| ``graceful_restart`` | boolean | Configures graceful restart capability | os9 | +| ``graceful_restart.state`` | string: absent,present\* | Removes graceful restart capability if set to absent | os9 | +| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os9 | +| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os9 | +| ``best_path`` | list | Configures the default best-path selection (see ``best_path.*``) | os9 | +| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os9 | +| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os9 | +| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os9 | +| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os9 | +| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os9 | +| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os9, | +| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | , os9, | +| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os9 | +| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os9 | +| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os9 | +| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os9 | +| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os9 | +| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os9 | +| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os9 | +| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | | +| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os9 | +| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os9 | +| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os9 | +| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os9 | +| ``neighbor.timer`` | string | Configures neighbor timers ( ); 5 10, where 5 is the keepalive interval and 10 is the holdtime | os9 | +| ``neighbor.default_originate`` | boolean: true, false\* | Configures default originate routes to the BGP neighbor | os9 | +| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os9 | +| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os9 | +| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os9 | +| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os9 | +| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os9 | +| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os9 | +| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os9 | +| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os9 | +| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os9 | +| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os9 | +| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables/disables the sender-side loop detect for neighbors | os9 | +| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os9 | +| ``neighbor.src_loopback_state`` | string: absent,present\* | Deletes the source for routing packets if set to absent | os9 | +| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os9 | +| ``neighbor.passive`` | boolean: true,false\* | Configures the passive BGP peer group; supported only when neighbor is a peer-group | os9 | +| ``neighbor.subnet`` | string (required) | Configures the passive BGP neighbor to this subnet; required together with the *neighbor.passive* key for os9 devices | , os9, | +| ``neighbor.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4 BGP neighbor if set to absent | os9 | +| ``neighbor.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | | +| ``neighbor.bfd`` | boolean | Enables BDF for neighbor | | +| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os9 | +| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os9 | +| ``redistribute.route_type`` | string (required): static,connected | Configures the name of the routing protocol to redistribute | os9 | +| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os9 | +| ``redistribute.route_map`` | string: absent,present\* | Deletes the route-map to redistribute if set to absent | os9 | +| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os9 | +| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os9 | +| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_bgp* role to configure the BGP network and neighbors. The example creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os9_bgp* role. The sample host_vars given below is for os9. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_bgp: + asn: 11 + router_id: 192.168.3.100 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + graceful_restart: true + best_path: + as_path: ignore + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-best + state: present + ipv4_network: + - address: 102.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - ip: 192.168.10.2 + type: ipv4 + remote_asn: 12 + timer: 5 10 + adv_interval: 40 + fall_over: present + default_originate: False + peergroup: per + peergroup_state: present + sender_loop_detect: false + src_loopback: 1 + src_loopback_state: present + distribute_list: + in: aa + in_state: present + ebgp_multihop: 25 + admin: up + state: present + - ip: 2001:4898:5808:ffa2::1 + type: ipv6 + remote_asn: 14 + peergroup: per + peergroup_state: present + distribute_list: + in: aa + in_state: present + src_loopback: 0 + src_loopback_state: present + ebgp_multihop: 255 + admin: up + state: present + - name: peer1 + type: peergroup + remote_asn: 14 + distribute_list: + in: an + in_state: present + out: bb + out_state: present + passive: True + subnet: 10.128.4.192/27 + subnet_state: present + state: present + - ip: 172.20.12.1 + description: O_site2-spine1 + type: ipv4 + remote_asn: 64640 + fall_over: present + ebgp_multihop: 4 + src_loopback: 1 + adv_interval: 1 + timer: 3 9 + send_community: + - type: extended + address_family: + - type: ipv4 + activate: falsesrc_loopback + state: present + - type: l2vpn + activate: true + state: present + admin: up + state: present + redistribute: + - route_type: static + route_map_name: aa + state: present + address_type: ipv4 + - route_type: connected + address_type: ipv6 + state: present + state: present + +**Simple playbook to configure BGP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_bgp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml new file mode 100644 index 00000000..0063029c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_bgp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml new file mode 100644 index 00000000..385a5f7a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_bgp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml new file mode 100644 index 00000000..5db413c3 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_bgp role facilitates the configuration of BGP attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml new file mode 100644 index 00000000..46f84c4c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating BGP configuration for os9" + template: + src: os9_bgp.j2 + dest: "{{ build_dir }}/bgp9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning BGP configuration for os9" + dellemc.os9.os9_config: + src: os9_bgp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2 b/ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2 new file mode 100644 index 00000000..4bc67912 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2 @@ -0,0 +1,351 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{########################################## +Purpose: +Configure BGP commands for os9 Devices +os9_bgp: + asn: 12 + router_id: + maxpath_ibgp: 2 + maxpath_ebgp: 2 + best_path: + as_path: ignore + as_path_state: present + ignore_router_id: true + med: + - attribute: confed + state: present + ipv4_network: + - address: 101.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + neighbor: + - type: ipv4 + remote_asn: 11 + ip: 192.168.11.1 + admin: up + sender_loop_detect: false + src_loopback: 0 + src_loopback_state: present + ebgp_multihop: 255 + distribute_list: + in: aa + in_state: present + out: aa + out_state: present + state: present + - type: ipv6 + remote_asn: 14 + ip: 2001:4898:5808:ffa2::1 + sender_loop_detect: false + src_loopback: 0 + src_loopback_state: present + state: present + - type: peer_group + name: peer1 + remote_asn: 6 + subnet: 10.128.3.192/27 + subnet_state: present + admin: up + default_originate: true + sender_loop_detect: false + src_loopback: 1 + src_loopback_state: present + ebgp_multihop: 255 + state: present + redistribute: + - route_type: static + state: present + state: present +################################} +{% if os9_bgp is defined and os9_bgp%} +{% set bgp_vars = os9_bgp %} + +{% if bgp_vars.asn is defined and bgp_vars.asn %} + {% if bgp_vars.state is defined and bgp_vars.state == "absent" %} +no router bgp {{ bgp_vars.asn }} + {% else %} +{# Add Feature to the switch #} +router bgp {{ bgp_vars.asn }} + {% if bgp_vars.router_id is defined %} + {% if bgp_vars.router_id %} + bgp router-id {{ bgp_vars.router_id }} + {% else %} + no bgp router-id + {% endif %} + {% endif %} + + {% if bgp_vars.maxpath_ebgp is defined %} + {% if bgp_vars.maxpath_ebgp %} + maximum-paths ebgp {{ bgp_vars.maxpath_ebgp }} + {% else %} + no maximum-paths ebgp + {% endif %} + {% endif %} + + {% if bgp_vars.maxpath_ibgp is defined %} + {% if bgp_vars.maxpath_ibgp %} + maximum-paths ibgp {{ bgp_vars.maxpath_ibgp }} + {% else %} + no maximum-paths ibgp + {% endif %} + {% endif %} + + {% if bgp_vars.graceful_restart is defined and bgp_vars.graceful_restart %} + {% if bgp_vars.graceful_restart.state is defined and bgp_vars.graceful_restart.state == "present" %} + bgp graceful-restart + {% else %} + no bgp graceful-restart + {% endif %} + {% endif %} + + {% if bgp_vars.best_path is defined and bgp_vars.best_path %} + {% if bgp_vars.best_path.as_path is defined and bgp_vars.best_path.as_path %} + {% if bgp_vars.best_path.as_path_state is defined and bgp_vars.best_path.as_path_state == "absent" %} + no bgp bestpath as-path {{ bgp_vars.best_path.as_path }} + {% else %} + bgp bestpath as-path {{ bgp_vars.best_path.as_path }} + {% endif %} + {% endif %} + {% if bgp_vars.best_path.ignore_router_id is defined %} + {% if bgp_vars.best_path.ignore_router_id %} + bgp bestpath router-id ignore + {% else %} + no bgp bestpath router-id ignore + {% endif %} + {% endif %} + {% if bgp_vars.best_path.med is defined and bgp_vars.best_path.med %} + {% for med in bgp_vars.best_path.med %} + {% if med.attribute is defined and med.attribute %} + {% if med.state is defined and med.state == "absent" %} + no bgp bestpath med {{ med.attribute }} + {% else %} + bgp bestpath med {{ med.attribute }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if bgp_vars.ipv4_network is defined and bgp_vars.ipv4_network %} + {% for net in bgp_vars.ipv4_network %} + {# remove BGP network announcement #} + {% if net.address is defined and net.address %} + {% if net.state is defined and net.state == "absent" %} + no network {{ net.address }} +{# Add BGP network announcement #} + {% else %} + network {{ net.address }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if bgp_vars.ipv6_network is defined and bgp_vars.ipv6_network %} + address-family ipv6 unicast + {% for net in bgp_vars.ipv6_network %} + {% if net.address is defined and net.address %} + {% if net.state is defined and net.state == "absent" %} + no network {{ net.address }} + {% else %} + network {{ net.address }} + {% endif %} + {% endif %} + {% endfor %} + exit-address-family + {% endif %} + + {% if bgp_vars.neighbor is defined and bgp_vars.neighbor %} + {% for neighbor in bgp_vars.neighbor %} + {% if neighbor.type is defined %} + {% if neighbor.type == "ipv4" or neighbor.type =="ipv6" %} + {% if neighbor.ip is defined and neighbor.ip %} + {% set tag_or_ip = neighbor.ip %} + {% if neighbor.remote_asn is defined and neighbor.remote_asn %} + {% if neighbor.state is defined and neighbor.state == "absent" %} + no neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }} + {% if neighbor.peergroup is defined and neighbor.peergroup %} + {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %} + no neighbor {{ tag_or_ip }} peer-group {{ neighbor.peergroup }} + {% endif %} + {% endif %} + {% if neighbor.type == "ipv6" %} + address-family ipv6 unicast + no neighbor {{ tag_or_ip }} activate + exit-address-family + {% endif %} + {% else %} + neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }} + {% if neighbor.peergroup is defined and neighbor.peergroup %} + {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %} + no neighbor {{ tag_or_ip }} peer-group {{ neighbor.peergroup }} + {% else %} + neighbor {{ tag_or_ip }} peer-group {{ neighbor.peergroup }} + {% endif %} + {% endif %} + {% if neighbor.type == "ipv6" %} + address-family ipv6 unicast + neighbor {{ tag_or_ip }} activate + exit-address-family + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% elif neighbor.type == "peergroup" %} + {% if neighbor.name is defined and neighbor.name %} + {% set tag_or_ip = neighbor.name %} + {% if neighbor.state is defined and neighbor.state == "absent" %} + no neighbor {{ tag_or_ip }} peer-group + {% else %} + {% if neighbor.passive is defined and neighbor.passive %} + neighbor {{ tag_or_ip }} peer-group passive + {% if neighbor.subnet is defined and neighbor.subnet %} + {% if neighbor.subnet_state is defined and neighbor.subnet_state == "absent" %} + no neighbor {{ tag_or_ip }} subnet {{ neighbor.subnet }} + {% else %} + neighbor {{ tag_or_ip }} subnet {{ neighbor.subnet }} + {% endif %} + {% endif %} + {% else %} + neighbor {{ tag_or_ip }} peer-group + {% endif %} + {% if neighbor.remote_asn is defined and neighbor.remote_asn %} + {% if neighbor.remote_asn_state is defined and neighbor.remote_asn_state == "absent" %} + no neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }} + {% else %} + neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if tag_or_ip is defined and tag_or_ip %} + {% if (neighbor.state is not defined) or (neighbor.state is defined and not neighbor.state == "absent") %} + {% if neighbor.timer is defined %} + {% if neighbor.timer %} + neighbor {{ tag_or_ip }} timers {{ neighbor.timer }} + {% else %} + no neighbor {{ tag_or_ip }} timers + {% endif %} + {% endif %} + {% if neighbor.default_originate is defined %} + {% if neighbor.default_originate %} + neighbor {{ tag_or_ip }} default-originate + {% else %} + no neighbor {{ tag_or_ip }} default-originate + {% endif %} + {% endif %} + {% if neighbor.sender_loop_detect is defined %} + {% if neighbor.sender_loop_detect %} + neighbor {{ tag_or_ip }} sender-side-loop-detection + {% else %} + no neighbor {{ tag_or_ip }} sender-side-loop-detection + {% endif %} + {% endif %} + {% if neighbor.src_loopback is defined and neighbor.src_loopback|int(-1) != -1 %} + {% if neighbor.src_loopback_state is defined and neighbor.src_loopback_state == "absent" %} + no neighbor {{ tag_or_ip }} update-source Loopback {{neighbor.src_loopback }} + {% else %} + neighbor {{ tag_or_ip }} update-source Loopback {{ neighbor.src_loopback }} + {% endif %} + {% endif %} + {% if neighbor.ebgp_multihop is defined %} + {% if neighbor.ebgp_multihop %} + neighbor {{ tag_or_ip }} ebgp-multihop {{ neighbor.ebgp_multihop }} + {% else %} + no neighbor {{ tag_or_ip }} ebgp-multihop + {% endif %} + {% endif %} + {% if neighbor.distribute_list is defined and neighbor.distribute_list %} + {% if neighbor.distribute_list.in is defined and neighbor.distribute_list.in %} + {% if neighbor.distribute_list.in_state is defined and neighbor.distribute_list.in_state == "absent" %} + no neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.in }} in + {% else %} + neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.in }} in + {% endif %} + {% endif %} + {% if neighbor.distribute_list.out is defined and neighbor.distribute_list.out %} + {% if neighbor.distribute_list.out_state is defined and neighbor.distribute_list.out_state == "absent" %} + no neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.out }} out + {% else %} + neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.out }} out + {% endif %} + {% endif %} + {% endif %} + {% if neighbor.admin is defined and (neighbor.admin == "up" or neighbor.admin == "present") %} + neighbor {{ tag_or_ip }} no shutdown + {% else %} + neighbor {{ tag_or_ip }} shutdown + {% endif %} + {% if neighbor.adv_interval is defined %} + {% if neighbor.adv_interval %} + neighbor {{ tag_or_ip }} advertisement-interval {{ neighbor.adv_interval }} + {% else %} + no neighbor {{ tag_or_ip }} advertisement-interval + {% endif %} + {% endif %} + {% if neighbor.fall_over is defined and neighbor.fall_over == "present" %} + neighbor {{ tag_or_ip }} fall-over + {% elif neighbor.fall_over is defined and neighbor.fall_over == "absent" %} + no neighbor {{ tag_or_ip }} fall-over + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if bgp_vars.redistribute is defined and bgp_vars.redistribute %} + {% for route in bgp_vars.redistribute %} + {% if route.route_type is defined and route.route_type %} + {% if route.address_type is defined and route.address_type %} + {% if route.address_type == "ipv6" %} + address-family {{ route.address_type }} unicast + {% if route.state is defined and route.state == "absent" %} + no redistribute {{ route.route_type }} + {% else %} + {% if route.route_map is defined %} + {% if route.route_map == "present" %} + {% if route.route_map_name is defined and route.route_map_name %} + redistribute {{ route.route_type }} route-map {{ route.route_map_name }} + {% else %} + redistribute {{ route.route_type }} + {% endif %} + {% else %} + {% if route.route_map_name is defined and route.route_map_name %} + no redistribute {{ route.route_type }} route-map {{ route.route_map_name }} + {% endif %} + {% endif %} + {% else %} + redistribute {{ route.route_type }} + {% endif %} + {% endif %} + exit-address-family + {% else %} + {% if route.state is defined and route.state == "absent" %} + no redistribute {{ route.route_type }} + {% else %} + {% if route.route_map is defined %} + {% if route.route_map == "present" %} + {% if route.route_map_name is defined and route.route_map_name %} + redistribute {{ route.route_type }} route-map {{ route.route_map_name }} + {% else %} + redistribute {{ route.route_type }} + {% endif %} + {% else %} + {% if route.route_map_name is defined and route.route_map_name %} + no redistribute {{ route.route_type }} route-map {{ route.route_map_name }} + {% endif %} + {% endif %} + redistribute {{ route.route_type }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + +{% endif %} +{% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml new file mode 100644 index 00000000..ed00565b --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml @@ -0,0 +1,97 @@ +--- +# vars file for dellemc.os9.os9_bgp, +# below gives a sample configuration +# Sample variables for OS9 device + os9_bgp: + asn: 11 + router_id: 192.168.3.100 + maxpath_ibgp: 2 + maxpath_ebgp: 2 + best_path: + as_path: ignore + as_path_state: absent + ignore_router_id: true + med: + - attribute: confed + state: present + - attribute: missing-as-best + state: present + ipv4_network: + - address: 102.1.1.0/30 + state: present + ipv6_network: + - address: "2001:4898:5808:ffa0::/126" + state: present + - address: "2001:4898:5808:ffa1::/126" + state: present + neighbor: + - name: per + type: peergroup + remote_asn: 12 + remote_asn_state: absent + default_originate: False + src_loopback: 0 + src_loopback_state: present + ebgp_multihop: 255 + state: present + + - name: peer1 + type: peergroup + remote_asn: 14 + distribute_list: + in: an + in_state: present + out: bb + out_state: present + passive: True + subnet: 10.128.4.192/27 + state: present + + - ip: 192.168.10.2 + type: ipv4 + remote_asn: 12 + timer: 5 10 + default_originate: False + peergroup: per + peergroup_state: present + distribute_list: + in: aa + in_state: present + admin: up + state: present + + - ip: 192.168.13.3 + type: ipv4 + remote_asn: 13 + sender_loop_detect: false + src_loopback: 1 + src_loopback_state: present + distribute_list: + in: aa + in_state: present + out: aa + out_state: present + ebgp_multihop: 25 + state: present + + - ip: 2001:4898:5808:ffa2::1 + type: ipv6 + remote_asn: 14 + peergroup: per + peergroup_state: present + distribute_list: + in: aa + in_state: present + src_loopback: 0 + src_loopback_state: present + ebgp_multihop: 255 + admin: up + state: present + redistribute: + - route_type: static + state: present + address_type: ipv4 + - route_type: connected + address_type: ipv6 + state: present + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml new file mode 100644 index 00000000..15511b4d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_bgp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml new file mode 100644 index 00000000..3482e5cc --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_bgp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE b/ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/README.md b/ansible_collections/dellemc/os9/roles/os9_copy_config/README.md new file mode 100644 index 00000000..c74cf1c6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/README.md @@ -0,0 +1,131 @@ +Copy-config role +================ + +This role is used to push the backup running configuration into a Dell EMC OS9 device, and merges the configuration in the template file with the running configuration of the device. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The copy-config role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- No predefined variables are part of this role +- Use *host_vars* or *group_vars* as part of the template file +- Configuration file is host-specific +- Copy the host-specific configuration to the respective file under the template directory in *.j2* format +- Variables and values are case-sensitive + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_copy_config* role to push the configuration file into the device. It creates a *hosts* file with the switch details and corresponding variables. It writes a simple playbook that only references the *os9_copy_config* role. By including the role, you automatically get access to all of the tasks to push configuration file. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + + # This variable shall be applied in the below jinja template for each host by defining here + os9_bgp + asn: 64801 + +**Sample roles/os9_copy_config/templates/leaf1.j2** + + ! Leaf1 BGP profile on Dell OS9 switch + snmp-server community public ro + hash-algorithm ecmp crc + ! + interface ethernet1/1/1:1 + no switchport + ip address 100.1.1.2/24 + ipv6 address 2001:100:1:1::2/64 + mtu 9216 + no shutdown + ! + interface ethernet1/1/9:1 + no switchport + ip address 100.2.1.2/24 + ipv6 address 2001:100:2:1::2/64 + mtu 9216 + no shutdown + ! + router bgp {{ os9_bgp.asn }} + bestpath as-path multipath-relax + bestpath med missing-as-worst + router-id 100.0.2.1 + ! + address-family ipv4 unicast + ! + address-family ipv6 unicast + ! + neighbor 100.1.1.1 + remote-as 64901 + no shutdown + ! + neighbor 100.2.1.1 + remote-as 64901 + no shutdown + ! + neighbor 2001:100:1:1::1 + remote-as 64901 + no shutdown + ! + address-family ipv4 unicast + no activate + exit + ! + address-family ipv6 unicast + activate + exit + ! + neighbor 2001:100:2:1::1 + remote-as 64901 + no shutdown + ! + address-family ipv4 unicast + no activate + exit + ! + address-family ipv6 unicast + activate + exit + ! + +**Simple playbook to setup to push configuration file into device — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_copy_config + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml new file mode 100644 index 00000000..7f527946 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_copy_config \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml new file mode 100644 index 00000000..69e9baf7 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_copy_config \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml new file mode 100644 index 00000000..d27c1836 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + This role shall be used to push the backup running configuration into the device. + This role shall merge the configuration in the template file with the running configuration of the device + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml new file mode 100644 index 00000000..682a6e9f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml @@ -0,0 +1,7 @@ +--- +# tasks file for dellemc.os9.os9_copy_config + - name: "Merge the config file to running configuration for OS9" + dellemc.os9.os9_config: + src: "{{ hostname }}.j2" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j2 b/ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j2 new file mode 100644 index 00000000..bb0e16e7 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j2 @@ -0,0 +1,3 @@ +! Version 10.3.0E +! Last configuration change at May 09 21:47:35 2017 +! \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml new file mode 100644 index 00000000..73b314ff --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml @@ -0,0 +1 @@ +--- \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml new file mode 100644 index 00000000..e2fb514e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os9.os9_copy_config diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml new file mode 100644 index 00000000..21269beb --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_copy_config diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE b/ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/README.md b/ansible_collections/dellemc/os9/roles/os9_dcb/README.md new file mode 100644 index 00000000..20f1c020 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/README.md @@ -0,0 +1,133 @@ +DCB role +======== + +This role facilitates the configuration of data center bridging (DCB). It supports the configuration of the DCB map and the DCB buffer, and assigns them to interfaces. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The DCB role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable and can take the `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_dcb keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``dcb_enable`` | boolean: true,false | Enables/disables DCB | os9 | +| ``dcb_map`` | list | Configures the DCB map (see ``dcb_map.*``) | os9 | +| ``dcb_map.name`` | string (required) | Configures the DCB map name | os9 | +| ``dcb_map.priority_group`` | list | Configures the priority-group for the DCB map (see ``priority_group.*``) | os9 | +| ``priority_group.pgid`` | integer (required): 0-7 | Configures the priority-group ID | os9 | +| ``priority_group.bandwidth`` | integer (required) | Configures the bandwidth percentage for the priority-group | os9 | +| ``priority_group.pfc`` | boolean: true,false (required) | Configures PFC on/off for the priorities in the priority-group | os9 | +| ``priority_group.state`` | string: absent,present\* | Deletes the priority-group of the DCB map if set to absent | os9 | +| ``dcb_map.priority_pgid`` |string (required) | Configures priority to priority-group mapping; value is the PGID of priority groups separated by a space (1 1 2 2 3 3 3 4) | os9 | +| ``dcb_map.intf`` | list | Configures the DCB map to the interface (see ``intf.*``) | os9 | +| ``intf.name`` | string (required) | Configures the DCB map to the interface with this interface name | os9 | +| ``intf.state`` | string: absent,present\* | Deletes the DCB map from the interface if set to absent | os9 | +| ``dcb_map.state`` | string: absent,present\* | Deletes the DCB map if set to absent | os9 | +| ``dcb_buffer`` | list | Configures the DCB buffer profile (see ``dcb_buffer.*``) | os9 | +| ``dcb_buffer.name`` | string (required) | Configures the DCB buffer profile name | os9 | +| ``dcb_buffer.description`` | string (required) | Configures a description about the DCB buffer profile | os9 | +| ``dcb_buffer.priority_params`` | list | Configures priority flow-control buffer parameters (see ``priority_params.*``)| os9 | +| ``priority_params.pgid`` | integer (required): 0-7 | Specifies the priority-group ID | os9 | +| ``priority_params.buffer_size`` | int (required) | Configures the ingress buffer size (in KB) of the DCB buffer profile | os9 | +| ``priority_params.pause`` | integer | Configures the buffer limit (in KB) for pausing | os9 | +| ``priority_params.resume`` | integer | Configures buffer offset limit (in KB) for resume | os9 | +| ``priority_params.state`` | string: absent,present\* | Deletes the priority flow parameters of the DCB buffer if set to absent | os9 | +| ``dcb_buffer.intf`` | list | Configures the DCB buffer to the interface (see ``intf.*``) | os9 | +| ``intf.name`` | string (required) | Configures the DCB buffer to the interface with this interface name | os9 | +| ``intf.state`` | string: absent,present\* | Deletes the DCB buffer from the interface if set to absent | os9 | +| ``dcb_buffer.state`` | string: absent,present\* | Deletes the DCB buffer profile if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_dcb* role to completely configure DCB map and DCB buffer profiles and assigns it to interfaces. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os9_dcb* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_dcb: + dcb_map: + - name: test + priority_pgid: 0 0 0 3 3 3 3 0 + priority_group: + - pgid: 0 + bandwidth: 20 + pfc: true + state: present + - pgid: 3 + bandwidth: 80 + pfc: true + state: present + intf: + - name: fortyGigE 1/8 + state: present + - name: fortyGigE 1/9 + state: present + state: present + dcb_buffer: + - name: buffer + description: + priority_params: + - pgid: 0 + buffer_size: 5550 + pause: 40 + resume: 40 + state: present + intf: + - name: fortyGigE 1/8 + state: present + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9 + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml new file mode 100644 index 00000000..58a95463 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_dcb \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml new file mode 100644 index 00000000..476aabf0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_dcb \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml new file mode 100644 index 00000000..b76457b6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os9_dcb role facilitates the configuration of Data Center Bridging (DCB) attributes in devices + running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml new file mode 100644 index 00000000..cc1b44fe --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating DCB configuration for os9" + template: + src: os9_dcb.j2 + dest: "{{ build_dir }}/dcb9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning DCB configuration for os9" + dellemc.os9.os9_config: + src: os9_dcb.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2 b/ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2 new file mode 100644 index 00000000..be654009 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2 @@ -0,0 +1,216 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ +Purpose: +Configure DCB commands for os9 Devices +os9_dcb: + dcb_enable: true + dcb_map: + - name: test + priority_pgid: 0 0 0 3 3 3 0 3 + priority_group: + - pgid: 0 + bandwidth: 20 + pfc: true + state: present + - pgid: 3 + bandwidth: 20 + pfc: true + state: present + intf: + - name: fortyGigE 1/8 + state: present + - name: fortyGigE 1/9 + state: present + state: present + dcb_buffer: + - name: buffer + description: + priority_params: + - pgid: 0 + buffer_size: 5550 + pause: 40 + resume: 40 + state: present + intf: + - name: fortyGigE 1/6 + state: present + state: present +################################} +{% if os9_dcb is defined and os9_dcb %} +{% set dcb_vars = os9_dcb %} +{% if dcb_vars.dcb_enable is defined %} + {% if dcb_vars.dcb_enable %} +dcb enable + {% else %} +no dcb enable + {% endif %} +{% endif %} +{% if dcb_vars.dcb_map is defined and dcb_vars.dcb_map %} + {% for map in dcb_vars.dcb_map %} + {% if map.name is defined and map.name %} + {% if map.state is defined and map.state == "absent" %} + {% if map.intf is defined and map.intf %} + {% for intf in map.intf %} + {% if intf.state is defined and intf.state == "absent" %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + no dcb-map {{ map.name }} + exit + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +no dcb-map {{ map.name }} + {% else %} +dcb-map {{ map.name }} + {% set pgid_set = {'value': False} %} + {% if map.priority_group is defined and map.priority_group %} + {% for group in map.priority_group %} + {% if group.pgid is defined and group.pgid >= 0 %} + {% if group.state is defined and group.state == "absent" %} + {% if not pgid_set['value'] %} + {% if map.priority_pgid is defined %} + {% if pgid_set.update({'value': True}) %} {% endif %} + {% if map.priority_pgid %} + priority-pgid {{ map.priority_pgid }} + {% else %} + no priority-pgid + {% endif %} + {% endif %} + {% endif %} + no priority-group {{ group.pgid }} + {% else %} + {% if group.bandwidth is defined and group.bandwidth %} + {% if group.pfc is defined %} + {% if group.pfc %} + priority-group {{ group.pgid }} bandwidth {{ group.bandwidth }} pfc on + {% else %} + priority-group {{ group.pgid }} bandwidth {{ group.bandwidth }} pfc off + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if not pgid_set['value'] %} + {% if map.priority_pgid is defined %} + {% if map.priority_pgid %} + priority-pgid {{ map.priority_pgid }} + {% else %} + no priority-pgid + {% endif %} + {% endif %} + {% endif %} + {% if map.intf is defined and map.intf %} + {% for intf in map.intf %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if intf.state is defined and intf.state == "absent" %} + no dcb-map {{ map.name }} + {% else %} + dcb-map {{ map.name }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +{% if dcb_vars.dcb_buffer is defined and dcb_vars.dcb_buffer %} + {% for buf in dcb_vars.dcb_buffer %} + {% if buf.name is defined and buf.name %} + {% if buf.state is defined and buf.state == "absent" %} +no dcb-buffer-threshold {{ buf.name }} + {% else %} +dcb-buffer-threshold {{ buf.name }} + {% if buf.description is defined and buf.description %} + description {{ buf.description }} + {% elif buf.description is defined and not buf.description %} + no description + {% endif %} + + {% if buf.priority_params is defined and buf.priority_params %} + {% for params in buf.priority_params %} + {% if params.pgid is defined and params.pgid >= 0 %} + {% if params.state is defined and params.state == "absent" %} + {% if params.buffer_size is defined and params.buffer_size %} + {% if params.pause is defined and params.pause %} + {% if params.resume is defined and params.resume %} + no priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }} + {% else %} + no priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }} + {% endif %} + {% else %} + {% if params.resume is defined and params.resume %} + no priority {{ params.pgid }} buffer-size {{ params.buffer_size }} resume-offset {{ params.resume }} + {% else %} + no priority {{ params.pgid }} buffer-size {{ params.buffer_size }} + {% endif %} + {% endif %} + {% else %} + {% if params.pause is defined and params.pause %} + {% if params.resume is defined and params.resume %} + no priority {{ params.pgid }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }} + {% else %} + no priority {{ params.pgid }} pause-threshold {{ params.pause }} + {% endif %} + {% else %} + {% if params.resume is defined and params.resume %} + no priority {{ params.pgid }} resume-offset {{ params.resume }} + {% else %} + no priority {{ params.pgid }} + {% endif %} + {% endif %} + {% endif %} + {% else %} + {% if params.buffer_size is defined and params.buffer_size %} + {% if params.pause is defined and params.pause %} + {% if params.resume is defined and params.resume %} + priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }} + {% else %} + priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }} + {% endif %} + {% else %} + {% if params.resume is defined and params.resume %} + priority {{ params.pgid }} buffer-size {{ params.buffer_size }} resume-offset {{ params.resume }} + {% else %} + priority {{ params.pgid }} buffer-size {{ params.buffer_size }} + {% endif %} + {% endif %} + {% else %} + {% if params.pause is defined and params.pause %} + {% if params.resume is defined and params.resume %} + priority {{ params.pgid }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }} + {% else %} + priority {{ params.pgid }} pause-threshold {{ params.pause }} + {% endif %} + {% else %} + {% if params.resume is defined and params.resume %} + priority {{ params.pgid }} resume-offset {{ params.resume }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if buf.intf is defined and buf.intf %} + {% for intf in buf.intf %} + {% if intf.name is defined and intf.name %} +interface {{ intf.name }} + {% if intf.state is defined and intf.state == "absent" %} + no dcb-policy buffer-threshold {{ buf.name }} + {% else %} + dcb-policy buffer-threshold {{ buf.name }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml new file mode 100644 index 00000000..4c19958f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml @@ -0,0 +1,38 @@ +--- +# vars file for dellemc.os9.os9_dcb, +# below gives a sample configuration +# Sample variables for OS9 device +os9_dcb: + dcb_enable: true + dcb_map: + - name: test + priority_pgid: 0 0 0 3 0 0 0 0 + priority_group: + - pgid: 0 + bandwidth: 50 + pfc: false + state: present + - pgid: 3 + bandwidth: 50 + pfc: true + state: present + intf: + - name: fortyGigE 1/8 + state: absent + - name: fortyGigE 1/9 + state: present + dcb_buffer: + - name: buffer + description: testbuffer + priority_params: + - pgid: 0 + buffer_size: 70 + pause: 40 + resume: 40 + state: present + intf: + - name: fortyGigE 1/8 + state: present + - name: fortyGigE 1/5 + state: present + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml new file mode 100644 index 00000000..ad59857a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_dcb \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml new file mode 100644 index 00000000..dec87c76 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_dcb \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/LICENSE b/ansible_collections/dellemc/os9/roles/os9_dns/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/README.md b/ansible_collections/dellemc/os9/roles/os9_dns/README.md new file mode 100644 index 00000000..ad97999f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/README.md @@ -0,0 +1,94 @@ +DNS role +======== + +This role facilitates the configuration of the domain name service (DNS). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The DNS role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take the dellemc.os9.os9 as a value +- If `os9_cfg_generate` set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_dns keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``name_server`` | list | Configures DNS (see ``name_server.*``) | os9 | +| ``name_server.domain_lookup`` | boolean | Enables or disables domain name lookup | os9 | +| ``name_server.ip`` | list | Configures the name server IP | os9 | +| ``name_server.vrf`` | list | Configures VRF for each IP | os9 | +| ``name_server.state`` | string: absent,present\* | Deletes the name server IP if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_dns* role to completely set up the DNS server configuration. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os9_dns* role. By including the role, you automatically get access to all of the tasks to configure DNS. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_dns: + domain_lookup: true + name_server: + - ip: + - 1.1.1.1 + - 1.1.1.2 + vrf: + - test + - management + state: absent + - ip: + - 2.2.2.2 + - ip: + - 3.3.2.2 + state: absent + +**Simple playbook to setup DNS — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_dns + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml new file mode 100644 index 00000000..a5b36f9e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_dns \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml new file mode 100644 index 00000000..4e8ac24c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_dns \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml new file mode 100644 index 00000000..75373c4d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_dns role facilitates the configuration DNS attributes in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml new file mode 100644 index 00000000..f9a732c3 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating DNS configuration for os9" + template: + src: os9_dns.j2 + dest: "{{ build_dir }}/dns9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning DNS configuration for os9" + dellemc.os9.os9_config: + src: os9_dns.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2 b/ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2 new file mode 100644 index 00000000..12f013af --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2 @@ -0,0 +1,111 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### +Purpose: +Configure DNS commands for os9 devices +os9_dns: + domain_lookup: true + domain_name: dns.search.name + name_server: + - ip: + - 3.1.1.1 + - 3.1.1.2 + vrf: + - test + - test1 + - vrf: + - test1 + state: absent + - ip: + - 2.2.2.2 + - ip: + - 3.3.2.2 + state: absent + domain_list: + - vrf: + - test + - test1 + state: absent + - name: + - dname3 + - dname4 + - name: + - dname5 + - dname6 + state: absent + - name: + - dname7 + - dname8 + vrf: + - test + - test1 +#####################################} +{% if (os9_dns is defined and os9_dns) %} + {% if os9_dns.domain_lookup is defined and os9_dns.domain_lookup == true %} + ip domain-lookup + {% elif os9_dns.domain_lookup is defined and os9_dns.domain_lookup == false %} + no ip domain-lookup + {% endif %} + {% if os9_dns.domain_name is defined and os9_dns.domain_name %} + ip domain-name {{ os9_dns.domain_name }} + {% elif os9_dns.domain_name is defined and os9_dns.domain_name %} + no ip domain-name {{ os9_dns.domain_name }} + {% endif %} + {% if (os9_dns.name_server is defined and os9_dns.name_server) %} + {% for name_server in os9_dns.name_server %} + {% set absent = "" %} + {% if name_server.state is defined and name_server.state == "absent" %} + {% set absent = "no " %} + {% endif %} + + {% set vrf_name_list = name_server.vrf %} + {% if (vrf_name_list is defined and vrf_name_list ) %} + {% for vrf_name in vrf_name_list %} + {% set ip_list = name_server.ip %} + {% if (ip_list is defined and ip_list ) %} + {% for ip_val in ip_list %} + {{ absent }}ip name-server vrf {{ vrf_name }} {{ ip_val }} + {% endfor %} + {% elif name_server.state is defined and name_server.state == "absent"%} + {{ absent }}ip name-server vrf {{ vrf_name }} + {% endif %} + {% endfor %} + {% else %} + {% set ip_list = name_server.ip %} + {% if (ip_list is defined and ip_list ) %} + {% for ip_val in ip_list %} + {{ absent }}ip name-server {{ ip_val }} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if (os9_dns.domain_list is defined and os9_dns.domain_list) %} + {% for domain in os9_dns.domain_list %} + {% set absent = "" %} + {% if domain.state is defined and domain.state == "absent" %} + {% set absent = "no " %} + {% endif %} + + {% set vrf_name_list = domain.vrf %} + {% if (vrf_name_list is defined and vrf_name_list ) %} + {% for vrf_name in vrf_name_list %} + {% set name_list = domain.name %} + {% if (name_list is defined and name_list ) %} + {% for name_val in name_list %} + {{ absent }}ip domain-list vrf {{ vrf_name }} {{ name_val }} + {% endfor %} + {% elif domain.state is defined and domain.state == "absent"%} + {{ absent }}ip domain-list vrf {{ vrf_name }} + {% endif %} + {% endfor %} + {% else %} + {% set name_list = domain.name %} + {% if (name_list is defined and name_list ) %} + {% for name_val in name_list %} + {{ absent }}ip domain-list {{ name_val }} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml new file mode 100644 index 00000000..28efa043 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml @@ -0,0 +1,40 @@ +--- +# vars file for dellemc.os9.os9_dns, +# below gives a sample configuration +# Sample variables for OS9 device +os9_dns: + domain_lookup: true + name_server: + - ip: + - 3.1.1.1 + - 3.1.1.2 + vrf: + - test + - test1 + - vrf: + - test1 + state: absent + - ip: + - 2.2.2.2 + state: absent + - ip: + - 3.3.2.2 + state: absent + domain_list: + - vrf: + - test + - test1 + state: absent + - name: + - dname3 + - dname4 + - name: + - dname5 + - dname6 + state: absent + - name: + - dname7 + - dname8 + vrf: + - test + - test1 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml new file mode 100644 index 00000000..87942483 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_dns \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml new file mode 100644 index 00000000..e5d083a0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_dns \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/README.md b/ansible_collections/dellemc/os9/roles/os9_ecmp/README.md new file mode 100644 index 00000000..3c59d11d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/README.md @@ -0,0 +1,89 @@ +ECMP role +========= + +This role facilitates the configuration of equal cost multi-path (ECMP), and it supports the configuration of ECMP for IPv4. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The ECMP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take the dellemc.os9.os9 as a value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_ecmp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``weighted_ecmp`` | boolean: true,false | Configures weighted ECMP | os9 | +| ``ecmp_group_max_paths`` | integer | Configures the number of maximum-paths per ecmp-group | os9 | +| ``ecmp_group_path_fallback`` | boolean: true,false | Configures ECMP group path management | os9 | +| ``ecmp `` | dictionary | Configures ECMP group (see ``ecmp .*``) | os9 | +| ``ecmp .interface`` | list | Configures interface into an ECMP group | os9 | +| ``ecmp .link_bundle_monitor`` | boolean: true,false | Configures link-bundle monitoring | os9 | +| ``ecmp .state`` | string: present\*,absent | Deletes the ECMP group if set to absent | os9 | + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_ecmp* role to configure ECMP for IPv4. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The example writes a simple playbook that only references the *os9_ecmp* role. The sample *host_vars* is provided for OS9 only. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_ecmp: + ecmp 1: + interface: + - fortyGigE 1/49 + - fortyGigE 1/51 + link_bundle_monitor: true + state: present + weighted_ecmp: true + ecmp_group_max_paths: 3 + ecmp_group_path_fallback: true + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_ecmp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml new file mode 100644 index 00000000..8c84dde3 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_ecmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml new file mode 100644 index 00000000..99b79b66 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_ecmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml new file mode 100644 index 00000000..2f355abc --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_ecmp role facilitates the configuration of ECMP group attributes in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml new file mode 100644 index 00000000..0ffec8d6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# tasks file for os9 + + - name: "Generating ECMP configuration for os9" + template: + src: os9_ecmp.j2 + dest: "{{ build_dir }}/ecmp9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning ECMP configuration for os9" + dellemc.os9.os9_config: + src: os9_ecmp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j2 b/ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j2 new file mode 100644 index 00000000..05176467 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j2 @@ -0,0 +1,62 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### +Purpose: +Configure ECMP commands for os9 devices +os9_ecmp: + weighted_ecmp: true + ecmp_group_max_paths: 3 + ecmp_group_path_fallback: true + ecmp 1: + interface: + - fortyGigE 1/49 + - fortyGigE 1/51 + link_bundle_monitor: true + state: present +#####################################} +{% if os9_ecmp is defined and os9_ecmp %} + {% if os9_ecmp.weighted_ecmp is defined %} + {% if os9_ecmp.weighted_ecmp %} +ip ecmp weighted + {% else %} +no ip ecmp weighted + {% endif %} + {% endif %} + {% if os9_ecmp.ecmp_group_max_paths is defined %} + {% if os9_ecmp.ecmp_group_max_paths %} +ip ecmp-group maximum-paths {{ os9_ecmp.ecmp_group_max_paths }} + {% else %} +no ip ecmp-group maximum-paths 2 + {% endif %} + {% endif %} + {% if os9_ecmp.ecmp_group_path_fallback is defined %} + {% if os9_ecmp.ecmp_group_path_fallback %} +ip ecmp-group path-fallback + {% else %} +no ip ecmp-group path-fallback + {% endif %} + {% endif %} + + {% for key in os9_ecmp.keys() %} + {% if " " in key %} + {% set ecmp_vars = os9_ecmp[key] %} + {% set group_num = key.split(" ") %} + {% if ecmp_vars.state is defined and ecmp_vars.state == "absent" %} +no ecmp-group {{ group_num[1] }} + {% else %} +ecmp-group {{ group_num[1] }} + {% if ecmp_vars.interface is defined and ecmp_vars.interface %} + {% for intf in ecmp_vars.interface %} + interface {{ intf }} + {% endfor %} + {% endif %} + {% if ecmp_vars.link_bundle_monitor is defined %} + {% if ecmp_vars.link_bundle_monitor %} + link-bundle-monitor enable + {% else %} + no link-bundle-monitor enable + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml new file mode 100644 index 00000000..00bb8af6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml @@ -0,0 +1,14 @@ +--- +# vars file for dellemc.os9.os9_ecmp, +# below gives a sample configuration +# Sample variables for OS9 device +os9_ecmp: + ecmp 1: + interface: + - fortyGigE 1/49 + - fortyGigE 1/51 + link_bundle_monitor: true + state: present + weighted_ecmp: true + ecmp_group_max_paths: 3 + ecmp_group_path_fallback: true \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml new file mode 100644 index 00000000..6c4fea5e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_ecmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml new file mode 100644 index 00000000..532506f0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml @@ -0,0 +1,3 @@ + +--- +# vars file for dellemc.os9.os9_ecmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/LICENSE b/ansible_collections/dellemc/os9/roles/os9_interface/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/README.md b/ansible_collections/dellemc/os9/roles/os9_interface/README.md new file mode 100644 index 00000000..0597e069 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/README.md @@ -0,0 +1,173 @@ +Interface role +============== + +This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The interface role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os9_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name +- For physical interfaces, the interface name must be in * * format; for logical interfaces, the interface must be in * * format; physical interface name can be *fortyGigE 1/1* +- For interface ranges, the interface name must be in *range * format; +- Logical interface names can be *vlan 1* or *port-channel 1* +- Variables and values are case-sensitive + +> **NOTE**: Only define supported variables for the interface type. For example, do not define the *switchport* variable for a logical interface, and do not configure port mode when *switchport* is present in OS9 devices. + +**interface name keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``desc`` | string | Configures a single line interface description | os9 | +| ``portmode`` | string | Configures port-mode according to the device type | access and trunk, os9 (hybrid) | +| ``switchport`` | boolean: true,false\* | Configures an interface in L2 mode | os9 | +| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os9 | +| ``mtu`` | integer | Configures the MTU size for L2 and L3 interfaces (594 to 12000; 1280 to 65535 to set globally) | os9 | +| ``fanout`` | string:dual, single, quad (os9); string:10g-4x, 40g-1x, 25g-4x, 100g-1x, 50g-2x) | Configures fanout to the appropriate value | os9 | +| ``fanout_speed`` | string: 10G, 25G, 40G, 50G | Configures speed for the fanout port based on the fanout mode specified | os9 | +| ``fanout_state`` | string: present, absent* | Configures the fanout mode to a port if state is set to present | os9 | +| ``keepalive`` | boolean: true,false | Configures keepalive on the port if set to true | os9 | +| ``speed`` | string:10,100,1000,auto | Configures interface speed parameters | os9 | +| ``duplex`` | string: full,half | Configures interface duplex parameters | os9 | +| ``auto_neg`` | boolean: true,false | Configures auto-negotiation mode if set to true | os9 | +| ``cr4_auto_neg`` | boolean: true,false | Configures auto-negotiation mode on a CR4 interface type if set to true | os9 | +| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os9 | +| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os9 | +| ``ipv6_type_dynamic`` | boolean: true,false | Configures an IPv6 address for DHCP if set to true (*ipv6_and_mask* is ignored if set to true) | +| ``ipv6_autoconfig`` | boolean: true,false | Configures stateless configuration of IPv6 addresses if set to true (*ipv6_and_mask* is ignored if set to true) | +| ``class_vendor_identifier`` | string: present,absent,string | Configures the vendor-class identifier without a user-defined string if set to present; configures a vendor-class identifier with a user-defined string when a string is specified; ignored when *ip_type_dynamic* is set to false | os9 | +| ``option82`` | boolean: true,false\* | Configures option82 with the remote-id MAC address if *remote_id* is undefined; ignored when *ip_type_dynamic* is set to false | os9 | +| ``remote_id`` |string: hostname,mac,string | Configures option82 with the specified *remote-id*; ignored when *option82* is set to false | os9 | +| ``vrf`` | string | Configures the specified VRF to be associated to the interface | os9 | +| ``min_ra`` | string | Configures RA minimum interval time period | os9 | +| ``max_ra`` | string | Configures RA maximum interval time period | os9 | +| ``ip_and_mask`` | string | Configures the specified IP address to the interface; configures the specified IP address to the interface VLAN on devices (192.168.11.1/24 format) | os9 | +| ``ip_and_mask_secondary`` | string | Configures the specified IP address as secondary address to the interface on os9 an devices (192.168.11.2/24 format) | os9 | +| ``ip_virtual_gateway_ip`` | string | Configures an anycast gateway IP address for a VxLAN virtual network | +| ``secondary_ip_state`` | string: absent,present\* | Deletes the secondary IP address if set to absent | os9 | +| ``ipv6_and_mask`` | string | Configures a specified IPv6 address to the interface; configures a specified IP address to the interface VLAN on devices (2001:4898:5808:ffa2::1/126 format) | os9 | +| ``state_ipv6`` | string: absent,present\* | Deletes the IPV6 address if set to absent | +| ``ipv6_reachabletime`` | integer | Configures the reachability time for IPv6 neighbor discovery (0 to 3600000) | os9 | +| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os9 | +| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os9 | +| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os9 | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the` ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | /os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_interface* role to set up description, MTU, admin status, portmode, and switchport details for an interface. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os9_interface* role. + +**Sample hosts file** + + leaf3 ansible_host= + +**Sample host_vars/leaf3** + + hostname: "leaf3" + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_interface: + TenGigabitEthernet 1/8: + desc: "Connected to Spine1" + portmode: + switchport: False + mtu: 2500 + admin: up + auto_neg: true + speed: auto + duplex: full + keepalive: true + ipv6_and_mask: 2001:4898:5808:ffa2::5/126 + suppress_ra : present + ip_type_dynamic: true + ip_and_mask: 192.168.23.22/24 + class_vendor_identifier: present + option82: true + remote_id: hostname + fortyGigE 1/9: + desc: "Connected to Spine2" + switchport: False + mtu: 2500 + admin: up + cr4_auto_neg: true + ip_and_mask: 192.168.234.20/31 + ip_and_mask_secondary: "192.168.234.21/31" + secondary_ip_state: present + suppress_ra: absent + ip_type_dynamic: false + class_vendor_identifier: absent + option82: true + remote_id: hostname + ipv6_and_mask: 2001:4898:5808:ffa2::9/126 + flowcontrol: + mode: "receive" + enable: "on" + state: "present" + vlan 100: + mtu: 4096 + admin: down + ip_and_mask: + ipv6_and_mask: 2002:4898:5408:faaf::1/64 + suppress_ra: present + state_ipv6: absent + ip_helper: + - ip: 10.0.0.36 + state: absent + ipv6_reachabletime: 600000 + virtual-network 888: + vrf: "green" + desc: "virtual-network interface" + ip_and_mask: "172.17.17.251/24" + ip_virtual_gateway_ip: "172.17.17.1" + admin: up + vlan 20: + suppress_ra: absent + min_ra: 3 + max_ra: 4 + admin: up + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf3 + roles: + - dellemc.os9.os9_interface + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml new file mode 100644 index 00000000..7c8c24e0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_interface \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml new file mode 100644 index 00000000..617eb3fc --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_interface \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml new file mode 100644 index 00000000..ff7e1baa --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_interface role facilitates the configuration of interface attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml new file mode 100644 index 00000000..4301ea42 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating interface configuration for os9" + template: + src: os9_interface.j2 + dest: "{{ build_dir }}/intf9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os== "dellemc.os9.os9") and (os9_cfg_generate | default('False') | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning interface configuration for os9" + dellemc.os9.os9_config: + src: os9_interface.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2 b/ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2 new file mode 100644 index 00000000..2a98c850 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2 @@ -0,0 +1,237 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################################### +Purpose: +Configure interface commands for os9 Devices. +os9_interface: + TenGigabitEthernet 1/36: + desc: "OS9 intf" + portmode: hybrid + mtu: 2000 + switchport: False + admin: up + auto_neg: true + keepalive: true + ip_and_mask: "192.168.13.1/24" + ip_and_mask_secondary: "192.168.14.1/24" + secondary_ip_state: present + suppress_ra: present + ip_type_dynamic: true + ipv6_and_mask: 2001:4898:5808:ffa2::9/126 + ipv6_reachabletime: 60000 + ip_helper: + - ip: 10.0.0.33 + state: present + class_vendor_identifier: present + option82: true + remote_id: hostname + speed: auto + duplex: half + fortyGigE 1/1: + fanout: single + fanout_speed: 40G + fanout_state: present + fortyGigE 0/8: + cr4_auto_neg: true +####################################################} +{% if os9_interface is defined and os9_interface %} +{% for key in os9_interface.keys() %} + {% set intf_vars = os9_interface[key] %} + {% set intf = key.split(" ") %} + {% set port = intf[1].split('/') %} + {% if intf_vars.fanout is defined %} + {% if intf_vars.fanout %} + {% if intf_vars.fanout_state is defined and intf_vars.fanout_state == "present" %} + {% if intf_vars.fanout_speed is defined and intf_vars.fanout_speed %} +stack-unit {{ port[0] }} port {{ port[1] }} portmode {{ intf_vars.fanout}} speed {{ intf_vars.fanout_speed }} no-confirm + {% else %} +stack-unit {{ port[0] }} port {{ port[1] }} portmode {{ intf_vars.fanout }} no-confirm + {% endif %} + {% else %} +no stack-unit {{ port[0] }} port {{ port[1] }} portmode {{ intf_vars.fanout }} no-confirm + {% endif %} + {% endif %} + {% endif %} +{% endfor %} +{% for key in os9_interface.keys() %} +{% set intf_vars = os9_interface[key] %} +{% set intf = key.split(" ") %} +{% set port = intf[1].split('/') %} + {% if (intf_vars.fanout is defined and not intf_vars.fanout) or (intf_vars.fanout is not defined)%} +interface {{ key }} + {% if intf_vars.desc is defined %} + {% if intf_vars.desc %} + description {{ intf_vars.desc }} + {% else %} + no description + {% endif %} + {% endif %} + + {% if intf_vars.portmode is defined %} + {% if intf_vars.switchport is defined and intf_vars.switchport == False %} + no switchport + {% endif %} + {% if intf_vars.portmode %} + portmode {{ intf_vars.portmode}} + {% else %} + no portmode hybrid + {% endif %} + {% endif %} + + {% if intf_vars.switchport is defined %} + {% if intf_vars.switchport == True %} + switchport + {% endif %} + {% if intf_vars.portmode is not defined %} + {% if intf_vars.switchport is defined and intf_vars.switchport == False %} + no switchport + {% endif %} + {% endif %} + {% endif %} + + {% if intf_vars.mtu is defined %} + {% if intf_vars.mtu %} + mtu {{ intf_vars.mtu }} + {% else %} + no mtu + {% endif %} + {% endif %} + + {% if intf_vars.keepalive is defined %} + {% if intf_vars.keepalive %} + keepalive + {% else %} + no keepalive + {% endif %} + {% endif %} + + {% if intf_vars.speed is defined %} + {% if intf_vars.speed %} + speed {{ intf_vars.speed }} + {% else %} + no speed + {% endif %} + {% endif %} + + {% if intf_vars.duplex is defined %} + {% if intf_vars.duplex %} + duplex {{ intf_vars.duplex }} + {% else %} + no duplex + {% endif %} + {% endif %} + + {% if intf_vars.auto_neg is defined %} + {% if intf_vars.auto_neg %} + negotiation auto + {% else %} + no negotiation auto + {% endif %} + {% endif %} + + {% if intf_vars.cr4_auto_neg is defined %} + {% if intf_vars.cr4_auto_neg %} + intf-type cr4 autoneg + {% else %} + no intf-type cr4 autoneg + {% endif %} + {% endif %} + + {% if intf_vars.suppress_ra is defined %} + {% if intf_vars.suppress_ra == "present" %} + ipv6 nd suppress-ra + {% else %} + no ipv6 nd suppress-ra + {% endif %} + {% endif %} + + {% if intf_vars.ip_type_dynamic is defined and intf_vars.ip_type_dynamic %} + {% if intf_vars.class_vendor_identifier is defined and intf_vars.class_vendor_identifier == "present" %} + {% if intf_vars.option82 is defined and intf_vars.option82 %} + {% if intf_vars.remote_id is defined and intf_vars.remote_id %} + ip address dhcp vendor-class-identifier relay information-option remote-id {{ intf_vars.remote_id }} + {% else %} + ip address dhcp relay information-option vendor-class-identifier + {% endif %} + {% else %} + ip address dhcp vendor-class-identifier + {% endif %} + + {% elif intf_vars.class_vendor_identifier is defined and (intf_vars.class_vendor_identifier|length >1 and not intf_vars.class_vendor_identifier == "absent") %} + {% if intf_vars.option82 is defined and intf_vars.option82 %} + {% if intf_vars.remote_id is defined and intf_vars.remote_id %} + ip address dhcp relay information-option remote-id {{ intf_vars.remote_id }} vendor-class-identifier {{ intf_vars.class_vendor_identifier }} + {% else %} + ip address dhcp relay information-option vendor-class-identifier {{ intf_vars.class_vendor_identifier }} + {% endif %} + {% else %} + ip address dhcp vendor-class-identifier {{ intf_vars.class_vendor_identifier }} + {% endif %} + + {% else %} + {% if intf_vars.option82 is defined and intf_vars.option82 %} + {% if intf_vars.remote_id is defined and intf_vars.remote_id %} + ip address dhcp relay information-option remote-id {{ intf_vars.remote_id }} + {% else %} + ip address dhcp relay information-option + {% endif %} + {% else %} + ip address dhcp + {% endif %} + {% endif %} + {% else %} + {% if intf_vars.ip_and_mask is defined %} + {% if intf_vars.ip_and_mask %} + ip address {{ intf_vars.ip_and_mask }} + {% else %} + no ip address + {% endif %} + {% endif %} + {% if intf_vars.ip_and_mask_secondary is defined and intf_vars.ip_and_mask_secondary %} + {% if intf_vars.secondary_ip_state is defined and intf_vars.secondary_ip_state == "absent" %} + no ip address {{ intf_vars.ip_and_mask_secondary }} secondary + {% else %} + ip address {{ intf_vars.ip_and_mask_secondary }} secondary + {% endif %} + {% endif %} + {% endif %} + + + {% if intf_vars.ipv6_and_mask is defined %} + {% if intf_vars.ipv6_and_mask %} + ipv6 address {{ intf_vars.ipv6_and_mask }} + {% else %} + no ipv6 address + {% endif %} + {% endif %} + + {% if intf_vars.ipv6_reachabletime is defined %} + {% if intf_vars.ipv6_reachabletime %} + ipv6 nd reachable-time {{ intf_vars.ipv6_reachabletime }} + {% else %} + no ipv6 nd reachable-time + {% endif %} + {% endif %} + + {% if intf_vars.ip_helper is defined and intf_vars.ip_helper %} + {% for helper in intf_vars.ip_helper %} + {% if helper.ip is defined and helper.ip %} + {% if helper.state is defined and helper.state == "absent" %} + no ip helper-address {{ helper.ip }} + {% else %} + ip helper-address {{ helper.ip }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if intf_vars.admin is defined %} + {% if intf_vars.admin == "up" %} + no shutdown + {% elif intf_vars.admin == "down" %} + shutdown + {% endif %} + {% endif %} + + {% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml new file mode 100644 index 00000000..79023381 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml @@ -0,0 +1,50 @@ +--- +# vars file for dellemc.os9.os9_interface +# Sample variables for OS9 device +os9_interface: + TenGigabitEthernet 1/3: + desc: "Connected to Spine1" + portmode: + switchport: False + suppress_ra: present + mtu: 2500 + admin: up + auto_neg: true + keepalive: true + speed: auto + duplex: full + ipv6_and_mask: 2001:4898:5808:ffa2::5/126 + ip_type_dynamic: true + ip_and_mask: 192.168.23.22/24 + class_vendor_identifier: present + option82: true + remote_id: hostname + fortyGigE 1/9: + desc: "Connected to Spine2" + switchport: False + mtu: 2500 + admin: up + ip_and_mask: 192.168.234.20/31 + ip_and_mask_secondary: "192.168.14.1/24" + secondary_ip_state: present + cr4_auto_neg: true + keepalive: false + ip_type_dynamic: false + class_vendor_identifier: absent + option82: true + remote_id: hostname + ipv6_and_mask: 2001:4898:5808:ffa2::9/126 + fortyGigE 1/12: + fanout: single + fanout_speed: 40G + fanout_state: present + Vlan 100: + mtu: 4096 + admin: down + ip_and_mask: + ipv6_and_mask: 2002:4898:5408:faaf::1/64 + state_ipv6: absent + ip_helper: + - ip: 10.0.0.36 + state: absent + ipv6_reachabletime: 600000 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml new file mode 100644 index 00000000..7663d936 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_interface \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml new file mode 100644 index 00000000..35ddaae4 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_interface \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/LICENSE b/ansible_collections/dellemc/os9/roles/os9_lag/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/README.md b/ansible_collections/dellemc/os9/roles/os9_lag/README.md new file mode 100644 index 00000000..19aa5f34 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/README.md @@ -0,0 +1,110 @@ +LAG role +======== + +This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The LAG role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- Object drives the tasks in this role +- `os9_lag` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device +- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- `os9_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po ` format (1 to 4096) +- Variables and values are case-sensitive + +**port-channel ID keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os9 | +| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 64 for os9) | os9 | +| ``lacp`` | dictionary | Specifies LACP fast-switchover or long timeout options | os9 | +| ``lacp.fast_switchover`` | boolean | Configures the fast-switchover option if set to true | os9 | +| ``lacp.long_timeout`` | boolean | Configures the long-timeout option if set to true | os9 | +| ``lacp_system_priority`` | integer | Configures the LACP system-priority value (1 to 65535) | os9 | +| ``lacp_ungroup_vlt`` | boolean | Configures all VLT LACP members to be switchports if set to true | os9 | +| ``lacp_ungroup`` | list | Specifies the list of port-channels to become switchports (see ``lacp_ungroup.*``) | os9 | +| ``lacp_ungroup.port_channel`` | integer (required) | Specifies valid port-channel numbers | os9 | +| ``lacp_ungroup.state`` | string: present,absent\* | Deletes the ungroup association if set to absent | os9 | +| ``channel_members`` | list | Specifies the list of port members to be associated to the port-channel (see ``channel_members.*``) | os9 | +| ``channel_members.port`` | string | Specifies valid os9 | os9 | +| ``channel_members.state`` | string: absent,present | Deletes the port member association if set to absent | os9 | +| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port-channel ID if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port-channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_lag* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_lag: + Po 127: + type: static + min_links: 3 + lacp: + long_timeout: true + fast_switchover: true + lacp_system_priority: 1 + lacp_ungroup_vlt: true + lacp_ungroup: + - port-channel:1 + state: present + channel_members: + - port: fortyGigE 1/4 + state: present + - port: fortyGigE 1/5 + state: present + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_lag + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml new file mode 100644 index 00000000..bcfbb897 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_lag \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml new file mode 100644 index 00000000..cddda15c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_lag \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml new file mode 100644 index 00000000..2463cb89 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_lag role facilitates the configuration of LAG attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml new file mode 100644 index 00000000..b581b870 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating LAG configuration for os9" + template: + src: os9_lag.j2 + dest: "{{ build_dir }}/lag9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning LAG configuration for os9" + dellemc.os9.os9_config: + src: os9_lag.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2 b/ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2 new file mode 100644 index 00000000..f708efc1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2 @@ -0,0 +1,114 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ +Purpose: +Configure LAG commands for os9 Devices. +os9_lag: + Po 1: + type: static + min_links: 3 + lacp: + long_timeout: true + fast_switchover: true + lacp_system_priority: 2 + lacp_ungroup: + - port_channel: 1 + state: present + lacp_ungroup_vlt: true + channel_members: + - port: fortyGigE 0/4 + state: present + state: present + ###############################} +{% if os9_lag is defined and os9_lag %} +{% for key in os9_lag.keys() %} +{% set channel_id = key.split(" ") %} +{% set lag_vars = os9_lag[key] %} + + {% if lag_vars.lacp_system_priority is defined %} + {% if lag_vars.lacp_system_priority %} +lacp system-priority {{ lag_vars.lacp_system_priority }} + {% else %} +no lacp system-priority + {% endif %} + {% endif %} + + {% if lag_vars.lacp_ungroup_vlt is defined %} + {% if lag_vars.lacp_ungroup_vlt %} +lacp ungroup member-independent vlt + {% else %} +no lacp ungroup member-independent vlt + {% endif %} + {% endif %} + + {% if lag_vars.lacp_ungroup is defined %} + {% if lag_vars.lacp_ungroup %} + {% for port in lag_vars.lacp_ungroup %} + {% if port.port_channel is defined and port.port_channel %} + {% if port.state is defined and port.state == "absent" %} +no lacp ungroup member-independent port-channel {{ port.port_channel }} + {% else %} +lacp ungroup member-independent port-channel {{ port.port_channel }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% if lag_vars.state is defined and lag_vars.state == "absent" %} +no interface Port-channel {{ channel_id[1] }} + {% else %} +interface Port-channel {{ channel_id[1] }} + + {% if lag_vars.min_links is defined %} + {% if lag_vars.min_links %} + minimum-links {{ lag_vars.min_links }} + {% else %} + no minimum-links + {% endif %} + {% endif %} + + {% if lag_vars.lacp is defined and lag_vars.lacp %} + {% if lag_vars.lacp.fast_switchover is defined %} + {% if lag_vars.lacp.fast_switchover %} + lacp fast-switchover + {% else %} + no lacp fast-switchover + {% endif %} + {% endif %} + {% if lag_vars.lacp.long_timeout is defined %} + {% if lag_vars.lacp.long_timeout %} + lacp long-timeout + {% else %} + no lacp long-timeout + {% endif %} + {% endif %} + {% endif %} + + {% if lag_vars.channel_members is defined %} + {% for ports in lag_vars.channel_members %} + {% if lag_vars.type is defined and lag_vars.type == "static" %} + {% if ports.port is defined and ports.port %} + {% if ports.state is defined and ports.state == "absent" %} + no channel-member {{ ports.port }} + {% else %} + channel-member {{ ports.port }} + {% endif %} + {% endif %} + {% elif lag_vars.type is defined and lag_vars.type == "dynamic" %} + {% if ports.port is defined and ports.port %} + {% if ports.state is defined and ports.state == "absent" %} +interface {{ ports.port }} + no port-channel-protocol LACP + {% else %} +interface {{ ports.port }} + port-channel-protocol LACP + port-channel {{ channel_id[1] }} mode active + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml new file mode 100644 index 00000000..cbb19bdd --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml @@ -0,0 +1,21 @@ +--- +# vars file for dellemc.os9.os9_lag +# Sample variables for os9 device +os9_lag: + Po 127: + type: dynamic + lacp: + long_timeout: true + fast_switchover: true + lacp_ungroup_vlt: true + lacp_system_priority: 1 + lacp_ungroup: + - port_channel: 1 + state: present + min_links: 3 + channel_members: + - port: fortyGigE 1/4 + state: present + - port: fortyGigE 1/8 + state: present + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml new file mode 100644 index 00000000..0f672937 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_lag \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml new file mode 100644 index 00000000..cada8d7e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_lag \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/README.md b/ansible_collections/dellemc/os9/roles/os9_lldp/README.md new file mode 100644 index 00000000..802adc68 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/README.md @@ -0,0 +1,245 @@ +LLDP role +========= + +This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The LLDP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_lldp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``global_lldp_state`` | string: absent,present | Deletes LLDP at a global level if set to absent | os9 | +| ``enable`` | boolean | Enables or disables LLDP at a global level | os9 | +| ``hello`` | integer | Configures the global LLDP hello interval (5 to 180) | os9 | +| ``mode`` | string: rx,tx | Configures global LLDP mode configuration | os9 | +| ``multiplier`` | integer | Configures the global LLDP multiplier (2 to 10) | os9 | +| ``fcoe_priority_bits`` | integer | Configures priority bits for FCoE traffic (1 to FF) | os9 | +| ``iscsi_priority_bits`` | integer | Configures priority bits for iSCSI traffic (1 to FF) | os9 | +| ``dcbx`` | dictionary | Configures DCBx parameters at the global level (see ``dcbx.*``) | os9 | +| ``dcbx.version`` | string | Configures the DCBx version | os9 | +| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os9 | +| ``advertise.dcbx_tlv`` | string | Configures DCBx TLVs advertisements | os9 | +| ``advertise.dcbx_tlv_state`` | string: present,absent | Deletes DCBx TLVs advertisement if set to absent | os9 | +| ``advertise.dcbx_appln_tlv`` | string | Configures DCBx application priority TLVs advertisement | os9 | +| ``advertise.dcbx_appln_tlv_state`` | string: present,absent | Deletes DCBx application priority TLVs advertisement if set to absent | os9 | +| ``advertise.dot1_tlv`` | dictionary | Configures 802.1 TLVs advertisement (see ``dot1_tlv.*``) | os9 | +| ``dot1_tlv.port_tlv`` | dictionary | Configures 802.1 TLVs advertisement (see ``port_tlv.*``) | os9 | +| ``port_tlv.protocol_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement | os9 | +| ``port_tlv.port_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement | os9 | +| ``dot1_tlv.vlan_tlv`` | dictionary | Configures 802.1 VLAN TLVs advertisement (see ``vlan_tlv.*``) | os9 | +| ``vlan_tlv.vlan_range`` | string | Configures 802.1 VLAN name TLVs advertisement | os9 | +| ``advertise.dot3_tlv`` | dictionary | Configures 802.3 TLVs advertisement (see ``dot3_tlv.*``) | os9 | +| ``dot3_tlv.max_frame_size`` | boolean | Configures 802.3 maximum frame size TLVs advertisement | os9 | +| ``advertise.port_descriptor`` | boolean | Configures global port descriptor advertisement | os9 | +| ``advertise.management_tlv`` | string | Configures global management TLVs advertisement | os9 | +| ``advertise.management_tlv_state`` | string: absent,present | Deletes global TLVs advertisement if set to absent | os9 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | , os9 | +| ``med.global_med`` | boolean | Configures global MED TLVs advertisement | os9 | +| ``med.application`` | list | Configures global MED TLVs advertisement for an application (see ``application.*``) | os9 | +| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os9 | +| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement (1 to 4094) | os9 | +| ``application.priority_tagged`` | boolean | Configures priority tagged for the application MED TLVs advertisement; mutually exclusive with *application.vlan_id* | os9 | +| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement (0 to 7) | os9 | +| ``application.code_point_value`` | integer | Configures differentiated services code point values for MED TLVs advertisement (0 to 63) | os9 | +| ``med.location_identification`` | list | Configures MED location identification TLVs advertisement (see ``location_identification.*``) | os9 | +| ``location_identification.loc_info`` | string | Configures location information for MED TLVs advertisement | os9 | +| ``location_identification.value`` | string | Configures location information values | os9 | +| ``location_identification.state`` | string: absent,present | Deletes the location information if set to absent | os9 | +| ``management_interface`` | dictionary | Configures LLDP on the management interface (see ``management_interface.*``) | os9 | +| ``management_interface.enable`` | boolean | Enables/disables LLDP on the management interface | os9 | +| ``management_interface.hello`` | integer | Configures LLDP hello interval on the management interface (5 to 180) | os9 | +| ``management_interface.mode`` | string: rx,tx | Configures LLDP mode on the management interface | os9 | +| ``management_interface.multiplier`` | integer | Configures LLDP multiplier on the management interface (2 to 10) | os9 | +| ``management_interface.advertise`` | dictionary | Configures TLV advertisement on the management interface (see ``advertise.*``) | os9 | +| ``advertise.port_descriptor`` | boolean | Configures port descriptor advertisement on the management interface | os9 | +| ``advertise.management_tlv`` | string | Configures management TLVs advertisement | os9 | +| ``advertise.management_tlv_state`` | string: absent,present | Deletes management TLVs advertisement if set to absent | os9 | +| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os9 | +| ``local_interface.`` | dictionary | Configures LLDP at the interface level (see ``interface name.*``) | os9 | +| ``.state`` | string: absent,present | Deletes LLDP at the interface level if set to absent | os9 | +| ``.enable`` | boolean | Enables or disables LLDP at the interface level | os9 | +| ``.hello`` | integer | Configures LLDP hello interval at the interface level (5 to 180) | os9 | +| ``.mode`` | string: rx,tx | Configures LLDP mode configuration at the interface level | os9 | +| ``.multiplier`` | integer | Configures LLDP multiplier at the interface level (2 to 10) | os9 | +| ``.dcbx`` | dictionary | Configures DCBx parameters at the interface level (see ``dcbx.*``) | os9 | +| ``dcbx.version`` | string | Configures DCBx version at the interface level | os9 | +| ``.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os9 | +| ``advertise.dcbx_tlv`` | string | Configures DCBx TLVs advertisement at the interface level | os9 | +| ``advertise.dcbx_tlv_state`` | string: present,absent | Deletes interface level DCBx TLVs advertisement if set to absent | os9 | +| ``advertise.dcbx_appln_tlv`` | string | Configures DCBx application priority TLVs advertisement at the interface level | os9 | +| ``advertise.dcbx_appln_tlv_state`` | string: present,absent | Deletes interface level DCBx application priority TLVs advertisement if set to absent | os9 | +| ``advertise.dot1_tlv`` | dictionary | Configures 802.1 TLVs advertisement at the interface level (see ``dot1_tlv.*``) | os9 | +| ``dot1_tlv.port_tlv`` | dictionary | Configures 802.1 TLVs advertisement at the interface level (see ``port_tlv.*``) | os9 | +| ``port_tlv.protocol_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement at the interface level | os9 | +| ``port_tlv.port_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement at the interface level | os9 | +| ``dot1_tlv.vlan_tlv`` | dictionary | Configures 802.1 VLAN TLVs advertisement at the interface level (see ``vlan_tlv.*``) | os9 | +| ``vlan_tlv.vlan_range`` | string | Configures 802.1 VLAN name TLVs advertisement at the interface level | os9 | +| ``advertise.dot3_tlv`` | dictionary | Configures 802.3 TLVs advertisement at the interface level (see ``dot3_tlv.*``) | os9 | +| ``dot3_tlv.max_frame_size`` | boolean | Configures 802.3 maximum frame size TLVs advertisement at the interface level | os9 | +| ``advertise.port_descriptor`` | boolean | Configures port descriptor advertisement at the interface level | os9 | +| ``advertise.management_tlv`` | string | Configures TLVs advertisement at the interface level | os9 | +| ``advertise.management_tlv_state`` | string: absent,present | Deletes TLVs advertisement at the interface level if set to absent | os9 | +| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os9 | +| ``med.global_med`` | boolean | Configures MED TLVs advertisement at the interface level | os9 | +| ``med.application`` | list | Configures MED TLVs advertisement for the application at the interface level (see ``application.*``) | os9 | +| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os9 | +| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement at the interface level (1 to 4094) | os9 | +| ``application.priority_tagged`` | boolean | Configures priority tagged for the application MED TLVs advertisement at the interface level; mutually exclusive with *application.vlan_id* | os9 | +| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement at the interface level (0 to 7) | os9 | +| ``application.code_point_value`` | integer | Configures differentiated services code point value for MED TLVs advertisement at the interface level (0 to 63) | os9 | +| ``med.location_identification`` | list | Configures MED location identification TLVs advertisement at the interface level (see ``location_identification.*``) | os9 | +| ``location_identification.loc_info`` | string | Configures location information for MED TLVs advertisement at the interface level | os9 | +| ``location_identification.value`` | string | Configures the location information value for MED TLVs advertisement at the interface level | os9 | +| ``location_identification.state`` | string: absent,present | Deletes the interface level MED location information if set to absent | os9 | + + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_lldp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_lldp: + global_lldp_state: present + enable: false + mode: rx + multiplier: 3 + fcoe_priority_bits: 3 + iscsi_priority_bits: 3 + hello: 6 + dcbx: + version: auto + management_interface: + hello: 7 + multiplier: 3 + mode: tx + enable: true + advertise: + port_descriptor: false + management_tlv: management-address system-capabilities + management_tlv_state: absent + advertise: + dcbx_tlv: pfc + dcbx_tlv_state: absent + dcbx_appln_tlv: fcoe + dcbx_appln_tlv_state: + dot1_tlv: + port_tlv: + protocol_vlan_id: true + port_vlan_id: true + vlan_tlv: + vlan_range: 2-4 + dot3_tlv: + max_frame_size: false + port_descriptor: false + management_tlv: management-address system-capabilities + management_tlv_state: absent + med: + global_med: true + application: + - name: "guest-voice" + vlan_id: 2 + l2_priority: 3 + code_point_value: 4 + - name: voice + priority_tagged: true + l2_priority: 3 + code_point_value: 4 + location_identification: + - loc_info: ecs-elin + value: 12345678911 + state: present + local_interface: + fortyGigE 1/3: + lldp_state: present + enable: false + mode: rx + multiplier: 3 + hello: 8 + dcbx: + version: auto + advertise: + dcbx_tlv: pfc + dcbx_tlv_state: present + dcbx_appln_tlv: fcoe + dcbx_appln_tlv_state: absent + dot1_tlv: + port_tlv: + protocol_vlan_id: true + port_vlan_id: true + vlan_tlv: + vlan_range: 2-4 + state: present + dot3_tlv: + max_frame_size: true + port_descriptor: true + management_tlv: management-address system-capabilities + management_tlv_state: absent + med: + application: + - name: guest-voice + vlan_id: 2 + l2_priority: 3 + code_point_value: 4 + - name: voice + priority_tagged: true + l2_priority: 3 + code_point_value: 4 + location_identification: + - loc_info: ecs-elin + value: 12345678911 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_lldp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml new file mode 100644 index 00000000..11d293a1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_lldp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml new file mode 100644 index 00000000..38e013e1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_lldp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml new file mode 100644 index 00000000..4b76193d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os9_lldp role facilitates the configuration of Link Layer Discovery Protocol(LLDP) attributes in devices + running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml new file mode 100644 index 00000000..97e34914 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating LLDP configuration for os9" + template: + src: os9_lldp.j2 + dest: "{{ build_dir }}/lldp9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning LLDP configuration for os9" + dellemc.os9.os9_config: + src: os9_lldp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2 b/ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2 new file mode 100644 index 00000000..375fba74 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2 @@ -0,0 +1,514 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################################### +Purpose: +Configure LLDP commands for os9 Devices. + +os9_lldp: + global_lldp_state: present + enable: false + mode: rx + multiplier: 3 + iscsi_priority_bits: 3 + fcoe_priority_bits: 3 + hello: 8 + dcbx: + version: auto + management_interface: + hello: 6 + multiplier: 3 + mode: tx + enable: true + advertise: + port_descriptor: false + management_tlv: management-address system-capabilities + management_tlv_state: present + advertise: + dcbx_tlv: pfc + dcbx_tlv_state: present + dcbx_appln_tlv: fcoe + dcbx_appln_tlv_state: + dot1_tlv: + port_tlv: + protocol_vlan_id: true + port_vlan_id: true + vlan_tlv: + vlan_range: 2-4 + state: present + dot3_tlv: + max_frame_size: true + port_descriptor: true + management_tlv: management-address system-capabilities + management_tlv_state: present + med: + application: + - name: guest-voice + vlan_id: 2 + l2_priority: 3 + code_point_value: 4 + - name: voice + priority_tagged: true + l2_priority: 3 + code_point_value: 4 + location_identification: + - loc_info: ecs-elin + value: 12345678911 + local_interface: + fortyGigE 1/3: + lldp_state: present + enable: false + mode: rx + multiplier: 3 + hello: 8 + dcbx: + version: auto + advertise: + dcbx_tlv: pfc + dcbx_tlv_state: present + dcbx_appln_tlv: fcoe + dcbx_appln_tlv_state: + dot1_tlv: + port_tlv: + protocol_vlan_id: true + port_vlan_id: true + vlan_tlv: + vlan_range: 2-4 + state: present + dot3_tlv: + max_frame_size: true + port_descriptor: true + management_tlv: management-address system-capabilities + management_tlv_state: present + med: + application: + - name: guest-voice + vlan_id: 2 + l2_priority: 3 + code_point_value: 4 + - name: voice + priority_tagged: true + l2_priority: 3 + code_point_value: 4 + location_identification: + - loc_info: ecs-elin + value: 12345678911 + + +####################################################} +{% if os9_lldp is defined and os9_lldp %} + {% set global_state = [] %} + {% if global_state.append(True) %}{% endif %} + + {% for key in os9_lldp.keys() %} + {% set lldp_vars = os9_lldp[key] %} + {% if key == "global_lldp_state" and lldp_vars == "absent" %} +no protocol lldp + {% if global_state.insert(False,0) %}{% endif %} + {% endif %} + {% endfor %} + +{% if global_state[0] %} +protocol lldp +{% endif %} + +{% for key in os9_lldp.keys() %} +{% set lldp_vars = os9_lldp[key] %} +{% if global_state[0] %} +{% if key == "management_interface" %} + management-interface + {% if lldp_vars.hello is defined and lldp_vars.hello %} + hello {{ lldp_vars.hello }} + {% else %} + no hello + {% endif %} + {% if lldp_vars.enable is defined and lldp_vars.enable %} + no disable + {% else %} + disable + {% endif %} + {% if lldp_vars.mode is defined and lldp_vars.mode %} + mode {{ lldp_vars.mode }} + {% else %} + no mode + {% endif %} + {% if lldp_vars.multiplier is defined and lldp_vars.multiplier %} + multiplier {{ lldp_vars.multiplier }} + {% else %} + no multiplier + {% endif %} + {% if lldp_vars.advertise is defined and lldp_vars.advertise %} + {% if lldp_vars.advertise.port_descriptor is defined %} + {% if lldp_vars.advertise.port_descriptor %} + advertise interface-port-desc + {% else %} + no advertise interface-port-desc + {% endif %} + {% endif %} + {% if lldp_vars.advertise.management_tlv is defined and lldp_vars.advertise.management_tlv %} + {% if lldp_vars.advertise.management_tlv_state is defined and lldp_vars.advertise.management_tlv_state == "absent" %} + no advertise management-tlv {{ lldp_vars.advertise.management_tlv }} + {% else %} + advertise management-tlv {{ lldp_vars.advertise.management_tlv }} + {% endif %} + {% endif %} + {% endif %} +{% endif %} +{% if key == "enable" %} + {% if lldp_vars %} + no disable + {% else %} + disable + {% endif %} +{% endif %} +{% if key == "fcoe_priority_bits" %} + {% if lldp_vars %} + fcoe priority-bits {{ lldp_vars }} + {% else %} + no fcoe priority-bits + {% endif %} +{% endif %} +{% if key == "hello" %} + {% if lldp_vars %} + hello {{ lldp_vars }} + {% else %} + no hello + {% endif %} +{% endif %} +{% if key == "mode" %} + {% if lldp_vars %} + mode {{ lldp_vars }} + {% else %} + no mode + {% endif %} +{% endif %} +{% if key == "multiplier" %} + {% if lldp_vars %} + multiplier {{ lldp_vars }} + {% else %} + no multiplier + {% endif %} +{% endif %} +{% if key == "iscsi_priority_bits" %} + {% if lldp_vars %} + iscsi priority-bits {{ lldp_vars }} + {% else %} + no iscsi priority-bits + {% endif %} +{% endif %} +{% if key == "dcbx" %} + {% if lldp_vars.version is defined and lldp_vars.version %} + dcbx version {{ lldp_vars.version }} + {% else %} + no dcbx version + {% endif %} +{% endif %} +{% if key == "advertise" %} +{% if lldp_vars.management_tlv is defined and lldp_vars.management_tlv %} + {% if lldp_vars.management_tlv_state is defined and lldp_vars.management_tlv_state == "absent" %} + no advertise management-tlv {{ lldp_vars.management_tlv }} + {% else %} + advertise management-tlv {{ lldp_vars.management_tlv }} + {% endif %} +{% endif %} + +{% if lldp_vars.port_descriptor is defined %} + {% if lldp_vars.port_descriptor %} + advertise interface-port-desc + {% else %} + no advertise interface-port-desc + {% endif %} +{% endif %} + +{% if lldp_vars.med is defined and lldp_vars.med %} + {% for med in lldp_vars.med.keys() %} + {% set med_vars = lldp_vars.med[med] %} + {% if med == "global_med" %} + {% if med_vars %} + advertise med + {% else %} + no advertise med + {% endif %} + {% endif %} + {% if med == "location_identification" %} + {% for loc in med_vars %} + {% if loc.loc_info is defined and loc.loc_info %} + {% if loc.value is defined and loc.value %} + {% if loc.state is defined and loc.state == "absent" %} + no advertise med location-identification {{ loc.loc_info }} {{ loc.value }} + {% else %} + advertise med location-identification {{ loc.loc_info }} {{ loc.value }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if med == "application" %} + {% for app in med_vars %} + {% if app.name is defined and app.name %} + {% if app.vlan_id is defined and app.vlan_id %} + {% set vlan_or_tag = app.vlan_id %} + {% elif app.priority_tagged is defined and app.priority_tagged %} + {% set vlan_or_tag = "priority-tagged" %} + {% endif %} + {% if vlan_or_tag is defined and vlan_or_tag %} + {% if app.l2_priority is defined and app.l2_priority %} + {% if app.code_point_value is defined and app.code_point_value %} + {% if app.state is defined and app.state == "absent" %} + no advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }} + {% else %} + advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} +{% endif %} + +{% if lldp_vars.dcbx_tlv is defined and lldp_vars.dcbx_tlv %} + {% if lldp_vars.dcbx_tlv_state is defined and lldp_vars.dcbx_tlv_state == "absent" %} + no advertise dcbx-tlv {{ lldp_vars.dcbx_tlv }} + {% else %} + advertise dcbx-tlv {{ lldp_vars.dcbx_tlv }} + {% endif %} +{% endif %} + +{% if lldp_vars.dcbx_appln_tlv is defined and lldp_vars.dcbx_appln_tlv %} + {% if lldp_vars.dcbx_appln_tlv_state is defined and lldp_vars.dcbx_appln_tlv_state == "absent" %} + no advertise dcbx-appln-tlv {{ lldp_vars.dcbx_appln_tlv }} + {% else %} + advertise dcbx-appln-tlv {{ lldp_vars.dcbx_appln_tlv }} + {% endif %} +{% endif %} + +{% if lldp_vars.dot3_tlv is defined and lldp_vars.dot3_tlv %} + {% for dot3 in lldp_vars.dot3_tlv.keys() %} + {% set dot3_vars = lldp_vars.dot3_tlv[dot3] %} + {% if dot3 == "max_frame_size" %} + {% if dot3_vars %} + advertise dot3-tlv max-frame-size + {% else %} + no advertise dot3-tlv max-frame-size + {% endif %} + {% endif %} + {% endfor %} +{% endif %} + +{% if lldp_vars.dot1_tlv is defined and lldp_vars.dot1_tlv %} + {% for dot1 in lldp_vars.dot1_tlv.keys() %} + {% set dot1_vars = lldp_vars.dot1_tlv[dot1] %} + {% if dot1 == "port_tlv" %} + {% if dot1_vars.protocol_vlan_id is defined and dot1_vars.protocol_vlan_id %} + {% if dot1_vars.port_vlan_id is defined %} + {% if dot1_vars.port_vlan_id %} + advertise dot1-tlv port-protocol-vlan-id port-vlan-id + {% else %} + advertise dot1-tlv port-protocol-vlan-id + no advertise dot1-tlv port-vlan-id + {% endif %} + {% else %} + advertise dot1-tlv port-protocol-vlan-id + {% endif %} + {% else %} + {% if not dot1_vars.protocol_vlan_id %} + no advertise dot1-tlv port-protocol-vlan-id + {% endif %} + {% if dot1_vars.port_vlan_id is defined %} + {% if dot1_vars.port_vlan_id %} + advertise dot1-tlv port-vlan-id + {% else %} + no advertise dot1-tlv port-vlan-id + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if dot1 == "vlan_tlv" %} + {% if dot1_vars.vlan_range is defined and dot1_vars.vlan_range %} + {% if dot1_vars.state is defined and dot1_vars.state == "absent" %} + no advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }} + {% else %} + advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +{% endif %} +{% endif %} +{% endfor %} +{% endif %} +{% if os9_lldp is defined and os9_lldp %} +{% for key in os9_lldp.keys() %} +{% set lldp_vars = os9_lldp[key] %} +{% if key == "local_interface" %} + {% for intf in lldp_vars.keys() %} + {% set intf_vars = lldp_vars[intf] %} +interface {{ intf }} + {% if intf_vars.lldp_state is defined and intf_vars.lldp_state == "absent" %} + no protocol lldp + {% else %} + protocol lldp + {% if intf_vars.hello is defined and intf_vars.hello %} + hello {{ intf_vars.hello }} + {% else %} + no hello + {% endif %} + {% if intf_vars.enable is defined and intf_vars.enable %} + no disable + {% else %} + disable + {% endif %} + {% if intf_vars.mode is defined and intf_vars.mode %} + mode {{ intf_vars.mode }} + {% else %} + no mode + {% endif %} + {% if intf_vars.multiplier is defined and intf_vars.multiplier %} + multiplier {{ intf_vars.multiplier }} + {% else %} + no multiplier + {% endif %} + {% if intf_vars.dcbx is defined and intf_vars.dcbx %} + {% if intf_vars.dcbx.version is defined and intf_vars.dcbx.version %} + dcbx version {{ intf_vars.dcbx.version }} + {% else %} + no dcbx version + {% endif %} + {% endif %} + {% if intf_vars.advertise is defined and intf_vars.advertise %} + {% if intf_vars.advertise.port_descriptor is defined %} + {% if intf_vars.advertise.port_descriptor %} + advertise interface-port-desc + {% else %} + no advertise interface-port-desc + {% endif %} + {% endif %} + {% if intf_vars.advertise.management_tlv is defined and intf_vars.advertise.management_tlv %} + {% if intf_vars.advertise.management_tlv_state is defined and intf_vars.advertise.management_tlv_state == "absent" %} + no advertise management-tlv {{ intf_vars.advertise.management_tlv }} + {% else %} + advertise management-tlv {{ intf_vars.advertise.management_tlv }} + {% endif %} + {% endif %} + {% if intf_vars.advertise.med is defined and intf_vars.advertise.med %} + {% for med in intf_vars.advertise.med.keys() %} + {% set med_vars = intf_vars.advertise.med[med] %} + {% if med == "global_med" %} + {% if med_vars %} + advertise med + {% else %} + no advertise med + {% endif %} + {% endif %} + {% if med == "location_identification" %} + {% for loc in med_vars %} + {% if loc.loc_info is defined and loc.loc_info %} + {% if loc.value is defined and loc.value %} + {% if loc.state is defined and loc.state == "absent" %} + no advertise med location-identification {{ loc.loc_info }} {{ loc.value }} + {% else %} + advertise med location-identification {{ loc.loc_info }} {{ loc.value }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if med == "application" %} + {% for app in med_vars %} + {% if app.name is defined and app.name %} + {% if app.vlan_id is defined and app.vlan_id %} + {% set vlan_or_tag = app.vlan_id %} + {% elif app.priority_tagged is defined and app.priority_tagged %} + {% set vlan_or_tag = "priority-tagged" %} + {% endif %} + {% if vlan_or_tag is defined and vlan_or_tag %} + {% if app.l2_priority is defined and app.l2_priority %} + {% if app.code_point_value is defined and app.code_point_value %} + {% if app.state is defined and app.state == "absent" %} + no advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }} + {% else %} + advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endfor %} + {% endif %} + + {% if intf_vars.advertise.dcbx_tlv is defined and intf_vars.advertise.dcbx_tlv %} + {% if intf_vars.advertise.dcbx_tlv_state is defined and intf_vars.advertise.dcbx_tlv_state == "absent" %} + no advertise dcbx-tlv {{ intf_vars.advertise.dcbx_tlv }} + {% else %} + advertise dcbx-tlv {{ intf_vars.advertise.dcbx_tlv }} + {% endif %} + {% endif %} + + {% if intf_vars.advertise.dcbx_appln_tlv is defined and intf_vars.advertise.dcbx_appln_tlv %} + {% if intf_vars.advertise.dcbx_appln_tlv_state is defined and intf_vars.advertise.dcbx_appln_tlv_state == "absent" %} + no advertise dcbx-appln-tlv {{ intf_vars.advertise.dcbx_appln_tlv }} + {% else %} + advertise dcbx-appln-tlv {{ intf_vars.advertise.dcbx_appln_tlv }} + {% endif %} + {% endif %} + + {% if intf_vars.advertise.dot3_tlv is defined and intf_vars.advertise.dot3_tlv %} + {% for dot3 in intf_vars.advertise.dot3_tlv.keys() %} + {% set dot3_vars = intf_vars.advertise.dot3_tlv[dot3] %} + {% if dot3 == "max_frame_size" %} + {% if dot3_vars %} + advertise dot3-tlv max-frame-size + {% else %} + no advertise dot3-tlv max-frame-size + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if intf_vars.advertise.dot1_tlv is defined and intf_vars.advertise.dot1_tlv %} + {% for dot1 in intf_vars.advertise.dot1_tlv.keys() %} + {% set dot1_vars = intf_vars.advertise.dot1_tlv[dot1] %} + {% if dot1 == "port_tlv" %} + {% if dot1_vars.protocol_vlan_id is defined and dot1_vars.protocol_vlan_id %} + {% if dot1_vars.port_vlan_id is defined %} + {% if dot1_vars.port_vlan_id %} + advertise dot1-tlv port-protocol-vlan-id port-vlan-id + {% else %} + advertise dot1-tlv port-protocol-vlan-id + no advertise dot1-tlv port-vlan-id + {% endif %} + {% else %} + advertise dot1-tlv port-protocol-vlan-id + {% endif %} + {% else %} + {% if not dot1_vars.protocol_vlan_id %} + no advertise dot1-tlv port-protocol-vlan-id + {% endif %} + {% if dot1_vars.port_vlan_id is defined %} + {% if dot1_vars.port_vlan_id %} + advertise dot1-tlv port-vlan-id + {% else %} + no advertise dot1-tlv port-vlan-id + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if dot1 == "vlan_tlv" %} + {% if dot1_vars.vlan_range is defined and dot1_vars.vlan_range %} + {% if dot1_vars.state is defined and dot1_vars.state == "absent" %} + no advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }} + {% else %} + advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml new file mode 100644 index 00000000..ab40de8d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml @@ -0,0 +1,94 @@ +--- +# vars file for dellemc.os9.os9_lldp, +# below gives a sample configuration +# Sample variables for OS9 device +os9_lldp: + global_lldp_state: present + enable: false + mode: rx + multiplier: 3 + fcoe_priority_bits: 3 + iscsi_priority_bits: 3 + hello: 6 + dcbx: + version: auto + management_interface: + hello: 7 + multiplier: 3 + mode: tx + enable: true + advertise: + port_descriptor: false + management_tlv: management-address system-capabilities + management_tlv_state: absent + advertise: + dcbx_tlv: pfc + dcbx_tlv_state: absent + dcbx_appln_tlv: fcoe + dcbx_appln_tlv_state: + dot1_tlv: + port_tlv: + protocol_vlan_id: true + port_vlan_id: true + vlan_tlv: + vlan_range: 2-4 + dot3_tlv: + max_frame_size: false + port_descriptor: false + management_tlv: management-address system-capabilities system-name + management_tlv_state: present + med: + global_med: true + application: + - name: "guest-voice" + vlan_id: 2 + l2_priority: 3 + code_point_value: 4 + - name: voice + priority_tagged: true + l2_priority: 3 + code_point_value: 4 + location_identification: + - loc_info: ecs-elin + value: 12345678911 + state: present + local_interface: + fortyGigE 1/3: + lldp_state: present + enable: false + mode: rx + multiplier: 3 + hello: 8 + dcbx: + version: auto + port_role: auto-upstream + advertise: + dcbx_tlv: pfc + dcbx_tlv_state: present + dcbx_appln_tlv: fcoe + dcbx_appln_tlv_state: + dot1_tlv: + port_tlv: + protocol_vlan_id: true + port_vlan_id: true + vlan_tlv: + vlan_range: 2-4 + state: present + dot3_tlv: + max_frame_size: true + port_descriptor: true + management_tlv: management-address system-capabilities + management_tlv_state: present + med: + application: + - name: guest-voice + vlan_id: 2 + l2_priority: 3 + code_point_value: 4 + - name: voice + priority_tagged: true + l2_priority: 3 + code_point_value: 4 + location_identification: + - loc_info: ecs-elin + value: 12345678911 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml new file mode 100644 index 00000000..49901101 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml @@ -0,0 +1,6 @@ + +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_lldp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml new file mode 100644 index 00000000..b10424ea --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_lldp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/LICENSE b/ansible_collections/dellemc/os9/roles/os9_logging/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/README.md b/ansible_collections/dellemc/os9/roles/os9_logging/README.md new file mode 100644 index 00000000..ee10bbc9 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/README.md @@ -0,0 +1,148 @@ +Logging role +============ + +This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The Logging role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If the `os9_cfg_generate` variable is set to true, it generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_logging keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``logging`` | list | Configures the logging server (see ``logging.*``) | os9 | +| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os9 | +| ``logging.secure_port`` | integer | Specifies log messages over the TLS port, CA certificates must be installed to specify the log messages over TLS port | os9 | +| ``logging.tcp_port`` | integer | Specifies log messages over the TCP port if *secure_port* is not defined | os9 | +| ``logging.udp_port`` | integer | Specifies log messages over the UDP port if both TCP and the secure port key are not defined | os9 | +| ``logging.vrf`` | dict | Specifies a VRF instance to be used to reach the host | os9 | +| ``logging.vrf.name`` | string | Specifies the VRF name | os9 | +| ``logging.vrf.secure_port`` | integer | Specifies log messages over the TLS port, CA certificates must be installed to specify the log messages over TLS port | os9 | +| ``logging.vrf.tcp_port`` | integer | Specifies log messages over the TCP port if *secure_port key* is not defined | os9 | +| ``logging.vrf.udp_port`` | integer | Specifies log messages over the UDP port if both TCP and *secure_port_key* is not defined | os9 | +| ``logging.vrf.state`` | string: absent,present\* | Deletes VRF instance of the logging server if set to absent | os9 | +| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os9 | +| ``buffer`` | integer | Specifies the buffered logging severity level (0 to 7) | os9 | +| ``console_level`` | integer | Configures the console logging level (0 to 7) | os9 | +| ``trap_level`` | integer | Configures the syslog server severity level (0 to 7) | os9| +| ``syslog_version`` | integer | Configures the syslog version (0/1) | os9 | +| ``monitor`` | integer | Configures the terminal line logging level (0 to 7) | os9| +| ``history`` | integer | Configures the syslog history table (0 to 7) | os9 | +| ``history_size`` | integer | Specifies the history table size | os9 | +| ``on`` | boolean | Enables logging to all supported destinations if set to true | os9 | +| ``extended`` | boolean | Enables extended logging if set to true | os9 | +| ``coredump`` | dict | Configures coredump logging | os9 | +| ``coredump.server`` | dict | Specifies all server details | os9 | +| ``coredump.server.server_ip`` | string (required) | Specifies the IPv4/IPv6 address of the logging server | os9 | +| ``coredump.server.username`` | string | Specifies the username to be configured | os9 | +| ``coredump.server.password`` | string | Specifies the password to be configured | os9 | +| ``coredump.server.state`` | string: present,absent\* | Deletes the coredump server if set to absent | os9 | +| ``coredump.stackunit`` |dict | Specifies details for enabling a coredump on the stack-unit | os9 | +| ``coredump.stackunit.all`` | boolean | Enables a coredump on all stack-units | os9 | +| ``coredump.stackunit.unit_num`` | integer | Specifies the stack-unit number (0 to 5) | os9 | +| ``coredump.stackunit.state`` | string: present,absent\*| Deletes the stack-unit coredump if set to absent | os9 | +| ``source_interface`` | string | Configures the source interface for logging | os9 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USE`R environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_logging: + logging: + - ip : 1.1.1.1 + state: present + - ip: 2.2.2.2 + secure_port: 1025 + tcp_port: 1024 + udp_port: 2000 + state: present + - ip: 3.3.3.3 + vrf: + name: test + secure_port: 1024 + tcp_port: 1025 + udp_port: 2000 + state: present + secure_port: 1025 + tcp_port: 2000 + udp_port: 1025 + state: present + buffer: 5 + console: 7 + trap: 5 + version: 5 + history: 4 + history_size: 3 + monitor: 5 + on: true + extended: true + coredump: + server: + server_ip: 2.2.2.2 + username: u1 + password: pwd + state: present + stackunit: + all: true + unit_num: 5 + state: present + source_interface: "fortyGigE 1/9" + +**Simple playbook to setup logging — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_logging + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml new file mode 100644 index 00000000..ef0a1c97 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_logging \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml new file mode 100644 index 00000000..36b3d65a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_logging \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml new file mode 100644 index 00000000..e3895760 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_logging role facilitates the configuration of logging attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml new file mode 100644 index 00000000..6e649725 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating logging configuration for os9" + template: + src: os9_logging.j2 + dest: "{{ build_dir }}/logging9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning logging configuration for os9" + dellemc.os9.os9_config: + src: os9_logging.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2 b/ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2 new file mode 100644 index 00000000..02437565 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2 @@ -0,0 +1,198 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure logging commands for os9 Devices +os9_logging: + logging: + - ip : 1.1.1.1 + state: present + - ip: 2.2.2.2 + secure_port: 1025 + tcp_port: 1024 + udp_port: 2000 + state: present + - ip: 3.3.3.3 + vrf: + name: test + secure_port: 1024 + tcp_port: 1025 + udp_port: 2000 + state: present + secure_port: 1025 + tcp_port: 2000 + udp_port: 1025 + state: present + buffer: 6 + console: 7 + trap: 5 + version: 5 + history: 4 + history_size: 3 + monitor: 5 + on: true + extended: true + coredump: + server: + server_ip: 2.2.2.2 + username: u1 + password: pwd + state: present + stackunit: + all: true + unit_num: 5 + state: present + source_interface: "fortyGigE 1/3" +###################################################} +{% if os9_logging is defined and os9_logging %} +{% for key,value in os9_logging.items() %} + {% if key == "buffer" %} + {% if value %} +logging buffered {{ value }} + {% else %} +no logging buffered + {% endif %} + + {% elif key == "console" %} + {% if value %} +logging console {{ value }} + {% else %} +no logging console + {% endif %} + + {% elif key == "monitor" %} + {% if value %} +logging monitor {{ value }} + {% else %} +no logging monitor + {% endif %} + + {% elif key == "source_interface" %} + {% if value %} +logging source-interface {{ value }} + {% else %} +no logging source-interface + {% endif %} + + {% elif key == "version" %} + {% if value %} +logging version {{ value }} + {% else %} +no logging version + {% endif %} + + {% elif key == "history" %} + {% if value %} +logging history {{ value }} + {% else %} +no logging history + {% endif %} + + {% elif key == "history_size" %} + {% if value %} +logging history size {{ value }} + {% else %} +no logging history size + {% endif %} + + {% elif key == "trap" %} + {% if value %} +logging trap {{ value }} + {% else %} +no logging trap + {% endif %} + + {% elif key == "extended" %} + {% if value %} +logging extended + {% else %} +no logging extended + {% endif %} + + {% elif key == "on" %} + {% if value %} +logging on + {% else %} +no logging on + {% endif %} + + {% elif key == "logging" %} + {% if value %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% if item.vrf is defined and item.vrf %} + {% if item.vrf.name is defined and item.vrf.name %} + {% if item.vrf.state is defined and item.vrf.state == "absent" %} + {% if item.vrf.secure_port is defined and item.vrf.secure_port %} +no logging {{ item.ip }} vrf {{ item.vrf.name }} secure {{ item.vrf.secure_port }} + {% elif item.vrf.tcp_port is defined and item.vrf.tcp_port %} +no logging {{ item.ip }} vrf {{ item.vrf.name }} tcp {{ item.vrf.tcp_port }} + {% elif item.vrf.udp_port is defined and item.vrf.udp_port %} +no logging {{ item.ip }} vrf {{ item.vrf.name }} udp {{ item.vrf.udp_port }} + {% endif %} + {% else %} + {% if item.vrf.secure_port is defined and item.vrf.secure_port %} +logging {{ item.ip }} vrf {{ item.vrf.name }} secure {{ item.vrf.secure_port }} + {% elif item.vrf.tcp_port is defined and item.vrf.tcp_port %} +logging {{ item.ip }} vrf {{ item.vrf.name }} tcp {{ item.vrf.tcp_port }} + {% elif item.vrf.udp_port is defined and item.vrf.udp_port %} +logging {{ item.ip }} vrf {{ item.vrf.name }} udp {{ item.vrf.udp_port }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if item.state is defined and item.state == "absent" %} + {% if item.secure_port is defined and item.secure_port %} +no logging {{ item.ip }} secure {{ item.secure_port }} + {% elif item.tcp_port is defined and item.tcp_port %} +no logging {{ item.ip }} tcp {{ item.tcp_port }} + {% elif item.udp_port is defined and item.udp_port %} +no logging {{ item.ip }} udp {{ item.udp_port }} + {% else %} +no logging {{ item.ip }} + {% endif %} + {% else %} + {% if item.secure_port is defined and item.secure_port %} +logging {{ item.ip }} secure {{ item.secure_port }} + {% elif item.tcp_port is defined and item.tcp_port %} +logging {{ item.ip }} tcp {{ item.tcp_port }} + {% elif item.udp_port is defined and item.udp_port %} +logging {{ item.ip }} udp {{ item.udp_port }} + {% else %} +logging {{ item.ip }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key == "coredump" %} + {% if value %} + {% if value.server is defined and value.server %} + {% if value.server.server_ip is defined and value.server.server_ip %} + {% if value.server.state is defined and value.server.state == "absent" %} +no logging coredump server {{ value.server.server_ip }}] + {% else %} + {% if value.server.username is defined and value.server.username and value.server.password is defined and value.server.password %} +logging coredump server {{ value.server.server_ip }} username {{ value.server.username }} password {{ value.server.password }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if value.stackunit is defined and value.stackunit %} + {% if value.stackunit.all is defined and value.stackunit.all %} + {% set my_str = "all " %} + {% else %} + {% if value.stackunit.unit_num is defined and value.stackunit.unit_num %} + {% set my_str = value.stackunit.unit_num|string %} + {% endif %} + {% endif %} + {% if value.stackunit.state is defined and value.stackunit.state == "absent" %} +no logging coredump stack-unit {{ my_str }} + {% else %} +logging coredump stack-unit {{ my_str }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml new file mode 100644 index 00000000..0ff9482c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml @@ -0,0 +1,44 @@ +--- +# vars file for dellemc.os9.os9_logging, +# below gives a sample configuration +# Sample variables for OS9 device +os9_logging: + logging: + - ip: 1.1.1.1 + state: present + - ip: 2.2.2.2 + secure_port: 1025 + tcp_port: 1024 + udp_port: 2000 + state: present + - ip: 3.3.3.3 + vrf: + name: test + secure_port: 1024 + tcp_port: 1025 + udp_port: 2000 + state: present + secure_port: 1025 + tcp_port: 2000 + udp_port: 1025 + state: present + buffer: 6 + console: 7 + trap: 5 + version: 5 + history: 4 + history_size: 3 + monitor: 5 + on: true + extended: true + coredump: + server: + server_ip: 2.2.2.2 + username: u1 + password: pwd + state: present + stackunit: + all: true + unit_num: 5 + state: present + source_interface: "fortyGigE 1/9" \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml new file mode 100644 index 00000000..3f87d4c6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_logging \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml new file mode 100644 index 00000000..bc9f7c33 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_logging \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/README.md b/ansible_collections/dellemc/os9/roles/os9_ntp/README.md new file mode 100644 index 00000000..81f5f39f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/README.md @@ -0,0 +1,98 @@ +NTP role +======== + +This role facilitates the configuration of network time protocol (NTP) attributes, and it specifically enables configuration of NTP server. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The NTP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value. +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_ntp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``server`` | list | Configures the NTP server (see ``server.*``) | os9 | +| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os9 | +| ``server.vrf`` | list | Configures the NTP server for VRF instance; list item contains the names of the VRF instance | os9 | +| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When the `os9_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os9_ntp* role. By including the role, you automatically get access to all of the tasks to configure NTP attributes. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + host: leaf1 + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_ntp: + source: ethernet 1/1/2 + master: 5 + authenticate: true + authentication_key: + - key_num: 123 + key_string_type: 7 + key_string: test + state: present + trusted_key: + - key_num: 1323 + state: present + server: + - ip: 2.2.2.2 + key: 345 + prefer: true + state: present + intf: + ethernet 1/1/2: + disable: true + broadcast: true + +**Simple playbook to setup NTP — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_ntp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml new file mode 100644 index 00000000..835ccd0a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_ntp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml new file mode 100644 index 00000000..f8519dd1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_ntp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml new file mode 100644 index 00000000..1def65b1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_ntp role facilitates the configuration of NTP attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml new file mode 100644 index 00000000..9ca82a36 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating NTP configuration for os9" + template: + src: os9_ntp.j2 + dest: "{{ build_dir }}/ntp9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning NTP configuration for os9" + dellemc.os9.os9_config: + src: os9_ntp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2 b/ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2 new file mode 100644 index 00000000..be4536c3 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2 @@ -0,0 +1,41 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure NTP commands for os9 Devices +os9_ntp: + server: + - ip: 2.2.2.2 + vrf: + - test + - management + state: present +###################################################} +{% if os9_ntp is defined and os9_ntp %} + +{% for key,value in os9_ntp.items() %} + {% if key == "server" and value %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% if item.state is defined and item.state == "absent" %} + {% if item.vrf is defined and item.vrf %} + {% for vrf_name in item.vrf %} +no ntp server vrf {{ vrf_name }} {{ item.ip }} + {% endfor %} + {% else %} +no ntp server {{ item.ip }} + {% endif %} + {% else %} + {% if item.vrf is defined and item.vrf %} + {% for vrf_name in item.vrf %} +ntp server vrf {{ vrf_name }} {{ item.ip }} + {% endfor %} + {% else %} +ntp server {{ item.ip }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml new file mode 100644 index 00000000..f5f4680b --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml @@ -0,0 +1,11 @@ +--- +# vars file for +# below gives a sample configuration +# Sample variables for OS9 device +os9_ntp: + server: + - ip: 2.2.2.2 + vrf: + - test + - tes + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml new file mode 100644 index 00000000..0e636d6c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + connection: network_cli + roles: + - dellemc.os9.os9_ntp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml new file mode 100644 index 00000000..7b69f09e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_ntp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE b/ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md b/ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md new file mode 100644 index 00000000..a33434f3 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md @@ -0,0 +1,110 @@ +Prefix-list role +================ + +This role facilitates the configuration of a prefix-list. It supports the configuration of an IP prefix-list, and assigns the prefix-list to line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The prefix-list role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_prefix_list keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``type`` | string (required): ipv4,ipv6 | Configures an L3 (IPv4/IPv6) prefix-list | os9 | +| ``name`` | string (required) | Configures the prefix-list name | os9 | +| ``description`` | string | Configures the prefix-list description | os9 | +| ``entries`` | list | Configures rules in the prefix-list (see ``seqlist.*``) | os9 | +| ``entries.number`` | int (required) | Specifies the sequence number of the prefix-list rule | os9 | +| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true, and specifies to reject packets if set to false | os9 | +| ``entries.net_num`` | string (required) | Specifies the network number | os9 | +| ``entries.mask`` | string (required) | Specifies the mask | os9 | +| ``entries.condition_list`` | list | Configures conditions to filter packets (see ``condition_list.*``)| os9 | +| ``condition_list.condition`` | list | Specifies the condition to filter packets from the source address | os9 | +| ``condition_list.prelen`` | string (required) | Specifies the allowed prefix length | os9 | +| ``entries.state`` | string: absent,present\* | Deletes the rule from the prefix-list if set to absent | os9 | +| ``state`` | string: absent,present\* | Deletes the prefix-list if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_prefix_list* role to configure prefix_list for both IPv4 and IPv6. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_prefix_list* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_prefix_list: + - type: ipv4 + name: spine-leaf + description: Redistribute loopback and leaf networks + entries: + - number: 5 + permit: true + net_num: 10.0.0.0 + mask: 23 + condition_list: + - condition: ge + prelen: 32 + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_prefix_list + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml new file mode 100644 index 00000000..3226617a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_prefix_list \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml new file mode 100644 index 00000000..e1a2d959 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_prefix_list \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml new file mode 100644 index 00000000..27affba9 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2017-2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_prefix_list role facilitates the configuration of prefix list attributes in devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml new file mode 100644 index 00000000..4ab6c224 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating prefix list configuration for os9" + template: + src: os9_prefix_list.j2 + dest: "{{ build_dir }}/prefixlist9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning prefix list configuration for os9" + dellemc.os9.os9_config: + src: os9_prefix_list.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j2 b/ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j2 new file mode 100644 index 00000000..63c7086a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j2 @@ -0,0 +1,81 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{#################################### +Purpose: +Configure pl on OS9 devices +os9_prefix_list: + - name: testpl + type: ipv4 + description: pl + entries: + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present + state: present +#####################################} +{% if (os9_prefix_list is defined and os9_prefix_list) %} + {% for val in os9_prefix_list %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} + {% if val.type is defined and val.type == "ipv4" %} +no ip prefix-list {{ val.name }} + {% elif val.type is defined and val.type == "ipv6" %} +no ipv6 prefix-list {{ val.name }} + {% endif %} + {% else %} + {% if val.type is defined and val.type == "ipv4" %} +ip prefix-list {{ val.name }} + {% elif val.type is defined and val.type == "ipv6" %} +ipv6 prefix-list {{ val.name }} + {% endif %} + {% if val.description is defined %} + {% if val.description %} + description {{ val.description }} + {% else %} + no description + {% endif %} + {% endif %} + {% if val.entries is defined and val.entries %} + {% for rule in val.entries %} + {% if rule.number is defined and rule.number %} + {% if rule.state is defined and rule.state == "absent" %} + no seq {{ rule.number }} + {% else %} + {% if rule.permit is defined %} + {% if rule.permit %} + {% set is_permit = "permit" %} + {% else %} + {% set is_permit = "deny" %} + {% endif %} + {% endif %} + {% if rule.net_num is defined and rule.net_num %} + {% if rule.net_num == "any" %} + seq {{rule.number}} {{is_permit}} any + {% elif rule.mask is defined and rule.mask %} + {% if rule.condition_list is defined and rule.condition_list %} + {% set condition_string = [' '] %} + {% set item = "" %} + {% for condition in rule.condition_list %} + {% set item= condition_string[0] + condition.condition + ' ' + condition.prelen|string + ' ' %} + {% if condition_string.insert(0,item) %} {% endif %} + {% endfor %} + seq {{rule.number}} {{is_permit}} {{rule.net_num}}/{{rule.mask}}{{ condition_string[0] }} + {% else %} + seq {{rule.number}} {{is_permit}} {{rule.net_num}}/{{rule.mask}} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {%endfor%} +{%endif%} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml new file mode 100644 index 00000000..aceb1cd4 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml @@ -0,0 +1,33 @@ +--- +# vars file for dellemc.os9.os9_prefix_list, +# below gives a sample configuration +# Sample variables for OS9 device +os9_prefix_list: + - type: ipv4 + name: spine-leaf + description: Redistribute loopback and leaf networks + entries: + - number: 5 + permit: true + net_num: 10.0.0.0 + mask: 23 + condition_list: + - condition: ge + prelen: 32 + - number: 10 + permit: true + net_num: 10.0.0.0 + mask: 8 + condition_list: + - condition: ge + prelen: 26 + - number: 19 + permit: true + net_num: 20.0.0.0 + mask: 16 + condition_list: + - condition: ge + prelen: 17 + - condition: le + prelen: 18 + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml new file mode 100644 index 00000000..09ef1a38 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_prefix_list \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml new file mode 100644 index 00000000..9b3bccf5 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_prefix_list \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE b/ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/README.md b/ansible_collections/dellemc/os9/roles/os9_sflow/README.md new file mode 100644 index 00000000..80b3ed90 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/README.md @@ -0,0 +1,120 @@ +sFlow role +========== + +This role facilitates the configuration of global and interface level sFlow attributes. It supports the configuration of sFlow collectors at the global level, enable/disable, and specification of sFlow polling-interval, sample-rate, max-datagram size, and so on are supported at the interface and global level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The sFlow role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os9_sflow` (dictionary) contains keys along with *interface name* (dictionary) +- Interface name can correspond to any of the valid os9 physical interfaces with the unique interface identifier name +- Interface name must be in * * format; physical interface name can be in *fortyGigE 1/1* format +- Variables and values are case-sensitive + +**os9_sflow keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``sflow_enable`` | boolean: true,false\* | Enables sFlow at a global level | os9 | +| ``collector`` | list | Configures collector information (see ``collector.*``); only two collectors can be configured on os9 devices | os9 | +| ``collector.collector_ip`` | string (required) | Configures an IPv4/IPv6 address for the collector | os9 | +| ``collector.agent_addr`` | string (required) | Configures an IPv4/IPv6 address for the sFlow agent to the collector | os9 | +| ``collector.udp_port`` | integer | Configures UDP port range at the collector level (1 to 65535) | os9 | +| ``collector.max_datagram_size`` | integer | Configures the maximum datagram size for the sFlow datagrams generated (400 to 1500) | os9 | +| ``collector.vrf`` | boolean: true,false* | Configures the management VRF to reach collector if set to true; can be enabled only for IPv4 collector addresses | os9 | +| ``polling_interval`` | integer | Configures the global default counter polling-interval (15 to 86400) | os9 | +| ``sample_rate`` | integer | Configures the global default sample-rate (256 to 8388608) | os9 | +| ``extended_switch`` | boolean: true,false\* | Enables packing extended information for the switch if set to true | os9 | +| ``max_header_size`` | boolean: true,false\* | Enables extended header copy size of 256 bytes if set to true at the global level | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +**interface name keys** + +| Key | Type | Notes | +|------------|---------------------------|---------------------------------------------------------| +| ``sflow_enable`` | boolean: true,false\* | Enables sFlow at the interface level | +| ``ingress_enable`` | boolean: true,false\* | Enables ingress sFlow at the interface level | +| ``polling_interval`` | integer | Configures the interface level default counter polling-interval (15 to 86400) | +| ``max_header_size`` | boolean: true,false\* | Enables extended header copy size of 256 bytes if set to true at the interface level | +| ``sample_rate`` | integer | Configures the interface level default sample-rate (256 to 8388608) | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories,or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_sflow* role to configure sFlow attributes at interface and global level. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_sflow* role. By including the role, you automatically get access to all of the tasks to configure sFlow features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_sflow: + sflow_enable: true + collector: + - collector_ip: 1.1.1.1 + agent_addr: 2.2.2.2 + udp_port: 2 + max_datagram_size: 1000 + vrf: true + state: present + polling_interval: 30 + sample_rate: 1024 + extended_switch : true + max_header_size: true + fortyGigE 1/1: + sflow_enable : true + ingress_enable: true + polling_interval: 30 + sample_rate: 1024 + max_header_size: true + +**Simple playbook to setup sflow — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_sflow + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml new file mode 100644 index 00000000..ecfc7066 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_sflow \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml new file mode 100644 index 00000000..1441cc30 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_sflow \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml new file mode 100644 index 00000000..ca9409c6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_sflow role facilitates the configuration of sflow attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml new file mode 100644 index 00000000..63d0c2a8 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating sflow configuration for os9" + template: + src: os9_sflow.j2 + dest: "{{ build_dir }}/sflow9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning sflow configuration for os9" + dellemc.os9.os9_config: + src: os9_sflow.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2 b/ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2 new file mode 100644 index 00000000..be9c47d1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2 @@ -0,0 +1,143 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure sflow commands for os9 Devices +os9_sflow: + sflow_enable: true + collector: + - collector_ip: 1.1.1.1 + agent_addr: 2.2.2.2 + udp_port: 2 + max_datagram_size: 1000 + vrf: true + state: present + polling_interval: 30 + sample_rate: 1024 + extended_switch : true + max_header_size: true + fortyGigE 1/1: + sflow_enable : true + ingress_enable: true + polling_interval: 30 + sample_rate: 1024 + max_header_size: true +###################################################} +{% if os9_sflow is defined and os9_sflow %} + +{% if os9_sflow %} +{% for key,value in os9_sflow.items() %} + {% if key == "sflow_enable" %} + {% if value %} +sflow enable + {% else %} +no sflow enable + {% endif %} + + {% elif key == "collector" %} + {% if value %} + {% for item in value %} + {% if item.state is defined and item.state == "absent" %} + {% if item.collector_ip is defined and item.agent_addr is defined %} + {% if item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size and item.vrf is defined and item.vrf %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }} vrf management + {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }} + {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.vrf is defined and item.vrf %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} vrf management + {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf and item.max_datagram_size is defined and item.max_datagram_size %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }} vrf management + {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} + {% elif item.collector_ip and item.agent_addr and item.max_datagram_size is defined and item.max_datagram_size %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }} + {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} vrf management + {% elif item.collector_ip and item.agent_addr %} +no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} + {% endif %} + {% endif %} + {% else %} + {% if item.collector_ip is defined and item.agent_addr is defined %} + {% if item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size and item.vrf is defined and item.vrf %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }} vrf management + {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }} + {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.vrf is defined and item.vrf %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} vrf management + {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf and item.max_datagram_size is defined and item.max_datagram_size %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }} vrf management + {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} + {% elif item.collector_ip and item.agent_addr and item.max_datagram_size is defined and item.max_datagram_size %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }} + {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} vrf management + {% elif item.collector_ip and item.agent_addr %} +sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key =="polling_interval" %} + {% if value %} +sflow polling-interval {{ value }} + {% else %} +no sflow polling-interval + {% endif %} + + {% elif key =="sample_rate" %} + {% if value %} +sflow sample-rate {{ value }} + {% else %} +no sflow sample-rate + {% endif %} + + {% elif key == "extended_switch" %} + {% if value %} +sflow extended-switch enable + {% else %} +no sflow extended-switch enable + {% endif %} + + {% elif key == "max_header_size" %} + {% if value %} +sflow max-header-size extended + {% else %} +no sflow max-header-size extended + {% endif %} + + {% elif '/' in key %} + {% set intf_vars = os9_sflow[key] %} +interface {{ key }} + {% if intf_vars.sflow_enable is defined and intf_vars.sflow_enable %} + sflow enable + {% else %} + no sflow enable + {% endif %} + {% if intf_vars.ingress_enable is defined and intf_vars.ingress_enable %} + sflow ingress-enable + {% else %} + no sflow ingress-enable + {% endif %} + {% if intf_vars.max_header_size is defined and intf_vars.max_header_size %} + sflow max-header-size extended + {% else %} + no sflow max-header-size extended + {% endif %} + {% if intf_vars.polling_interval is defined and intf_vars.polling_interval %} + sflow polling-interval {{ intf_vars.polling_interval }} + {% else %} + no sflow polling-interval + {% endif %} + {% if intf_vars.sample_rate is defined and intf_vars.sample_rate %} + sflow sample-rate {{ intf_vars.sample_rate }} + {% else %} + no sflow sample-rate + {% endif %} + + {% endif %} +{% endfor %} +{% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml new file mode 100644 index 00000000..548611c0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml @@ -0,0 +1,35 @@ +--- +# vars file for dellemc.os9.os9_sflow, +# below gives a sample configuration +# Sample variables for OS9 device +os9_sflow: + sflow: true + collector: + - collector_ip: 1.1.1.1 + agent_addr: 2.2.2.2 + udp_port: + max_datagram_size: 1000 + vrf: true + state: present + - collector_ip: 2.2.2.2 + agent_addr: 2.2.2.2 + udp_port: 3 + max_datagram_size: 1002 + vrf: test + state: absent + polling_interval: 24 + sample_rate: 256 + extended_switch: true + max_header_size: true + fortyGigE 1/1: + sflow: true + ingress_enable: true + polling_interval: 30 + sample_rate: 1024 + max_header_size: true + fortyGigE 1/2: + sflow: true + ingress_enable: true + polling_interval: 20 + sample_rate: 256 + max_header_size: true \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml new file mode 100644 index 00000000..8f931d3a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_sflow \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml new file mode 100644 index 00000000..e79c81ba --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_sflow \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/README.md b/ansible_collections/dellemc/os9/roles/os9_snmp/README.md new file mode 100644 index 00000000..0e458b96 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/README.md @@ -0,0 +1,192 @@ +SNMP role +========= + +This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The SNMP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_snmp keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``snmp_contact`` | string | Configures SNMP contact information | os9 | +| ``snmp_server_vrf`` | string | Specifies vrf instance for snmp requests, removes the vrf instance for snmp requests if kept blank | os9 | +| ``snmp_location`` | string | Configures SNMP location information | os9 | +| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os9 | +| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os9 | +| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os9 | +| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os9 | +| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os9 | +| ``snmp_host.ipv4`` | string | Configures the IPv4 address for the SNMP trap host | os9 | +| ``snmp_host.ipv6`` | stirng | Configures the IPv6 address for the SNMP trap host | os9 | +| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host | os9 | +| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os9 | +| ``snmp_host.version`` | string (required) | Specifies the SNMP version of the host (either 1 or 2c or 3) | os9 | +| ``snmp_host.vrf`` | list | Configures the SNMP VRF trap for the SNMP host (list of VRF names) | os9 | +| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os9 | +| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os9 | +| ``snmp_traps.name`` | string | Enables SNMP traps | os9 | +| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os9 | +| ``snmp_engine_id`` | string | Configures the SNMPv3 engineID for the local agent | os9 | +| ``snmp_view`` | list | Configures SNMPv3 view information (see ``snmp_view.*``) | os9 | +| ``snmp_view.name`` | string | Configures the SNMP view name (up to 20 characters) | os9 | +| ``snmp_view.oid_subtree`` | integer | Configures the SNMP view for the OID subtree | os9 | +| ``snmp_view.include`` | boolean: true,false | Specifies whether the MIB family should be included or excluded from the view | os9 | +| ``snmp_user`` | list | Configures SNMP users for each group name (see ``snmp_user.*``) | os9 | +| ``snmp_user.name`` | string (required) | Configures the SNMP user name | os9 | +| ``snmp_user.group_name`` | string (required) | Configures the SNMP group name for the user | os9 | +| ``snmp_user.version`` | string: 1,2c,3 (required) | Configures a user entry with the specified SNMP version (either 1 or 2c or 3) | os9 | +| ``snmp_user.access_list`` | dictionary | Configures access-list details; required to configure or negate if defined | os9 | +| ``snmp_user.access_list.access`` | string | Configures the access-list associated with the user | os9 | +| ``snmp_user.access_list.ipv6`` | string | Configures the IPv6 access-list associated with the user | os9 | +| ``snmp_user.encryption`` | boolean: true,false\* | Specifies the encryption for the SNMP user if set to true | os9 | +| ``snmp_user.auth_algorithm`` | string: md5,sha | Configures the authorization algorithm for the SNMP user | os9 | +| ``snmp_user.auth_pass`` | string | Configures the authentication password for the user | os9 | +| ``snmp_user.state`` | string: absent,present\* | Deletes the SNMP user if set to absent | os9 | +| ``snmp_group`` | list | Configures SNMP groups (see ``snmp_group.*``) | os9 | +| ``snmp_group.name`` | string (required) | Configures the SNMP group name | os9 | +| ``snmp_group.version`` | string (required) | Configures the group entry with the specified SNMP version (either 1 or 2c or 3) | os9 | +| ``snmp_group.access_list`` | dict | Configures access-list entries for the group; required to configure or negate if defined | os9 | +| ``snmp_group.access_list.access`` | string | Configures the access-list associated with the group | os9 | +| ``snmp_group.access_list.ipv6`` | string | Configures the IPv6 access-list associated with the group | os9 | +| ``snmp_group.view`` | dict | Configures view entries for the group; required to configure or negate if defined | os9 | +| ``snmp_group.view.notify`` | string | Configures notify view associated with the group | os9 | +| ``snmp_group.view.read`` | string | Configures read view associated with the group | os9 | +| ``snmp_group.view.write`` | string | Configures write view associated with the group | os9 | +| ``snmp_group.context`` | list | Configures context list entries (see ``snmp_group.context.*``) | os9 | +| ``snmp_group.context.context_name`` | string | Configures SNMP-group entries with specified context name | os9 | +| ``snmp_group.context.access_list`` | dictionary | Configures access-list entries for the group with context | os9 | +| ``snmp_group.context.access_list.access`` | string | Configures the access-list associated with the group | os9 | +| ``snmp_group.context.access_list.ipv6`` | string | Configures the IPv6 access-list associated with the group | os9 | +| ``snmp_group.context.view`` | dictionary | Configures view entries for the group with context | os9 | +| ``snmp_group.context.view.notify`` | string | Configures notify view associated with the group | os9 | +| ``snmp_group.context.view.read`` | string | Configures read view associated with the group | os9 | +| ``snmp_group.context.view.write`` | string | Configures write view associated with the group | os9 | +| ``snmp_group.context.state`` | string: absent,present | Deletes the context entries with the group if set to absent | os9 | +| ``snmp_group.state`` | string: absent,present\* | Deletes the associated SNMP group if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_snmp: + snmp_contact: test + snmp_location: chennai + snmp_server_vrf: test + snmp_community: + - name: public + access_mode: ro + state: present + - name: private + access_mode: rw + state: present + snmp_host: + - ipv6: 2001:4898:f0:f09b::2000 + version: "3" + security_level: auth + communitystring: + udpport: + state: absent + snmp_traps: + - name: config + state: present + snmp_engine_id: 1234567890 + snmp_view: + - name: view_1 + oid_subtree: 2 + include: false + state: absent + snmp_user: + - name: user_1 + group_name: grp1 + version: 3 + access_list: + access: a1 + ipv6: ip1 + encryption: true + auth_algorithm: md5 + auth_pass: 12345678 + state: present + snmp_group: + - name: group_1 + version: 2c + access_list: + access: a1 + ipv6: ip1 + state: absent + - name: group_2 + version: 3 + security_level: priv + access_list: + access: a1 + ipv6: ip1 + context: + - context_name: c1 + state: present + - context_name: c2 + access_list: + access: a1 + view: + read: r1 + state: present + state: present + +**Simple playbook to setup snmp — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_snmp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml new file mode 100644 index 00000000..22c7b89b --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_snmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml new file mode 100644 index 00000000..f04bb2b5 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_snmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml new file mode 100644 index 00000000..9c7bc2e0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_snmp role facilitates the configuration of snmp attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml new file mode 100644 index 00000000..18e77e05 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating SNMP configuration for os9" + template: + src: os9_snmp.j2 + dest: "{{ build_dir }}/snmp9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning SNMP configuration for os9" + dellemc.os9.os9_config: + src: os9_snmp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2 b/ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2 new file mode 100644 index 00000000..6033604e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2 @@ -0,0 +1,524 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure snmp commands for os9 Devices +os9_snmp: + snmp_contact: test + snmp_location: chennai + snmp_server_vrf: test + snmp_community: + - name: public + access_mode: ro + state: present + - name: private + access_mode: rw + state: present + snmp_context: + - name: SNMP1 + state: absent + snmp_packet_size: 8 + snmp_host: + - ipv4: 1.1.1.1 + version: 3 + security_level: auth + vrf: + - test + - management + communitystring: msft + udpport: 162 + state: absent + - ipv6: 2001:4898:f0:f09b::2000 + version: 1 + state: present + snmp_traps: + - name: config + state: present + snmp_engine_id: 1234567890 + snmp_view: + - name: view_1 + oid_subtree: 2 + include: true + state: absent + snmp_user: + - name: user_1 + group_name: grp1 + encryption : true + auth_algorithm: md5 + auth_pass: 12345678 + version: 3 + access_list: + access: a2 + ipv6: ip1 + state: present + - name: user_2 + group_name: grp2 + version: "2c" + access_list: + ipv6: ip1 + state: absent + snmp_group: + - name: group_1 + version: 2c + access_list: + access: a1 + ipv6: ip1 + view: + read: r1 + write: w1 + notify: n1 + context: + - context_name: c1 + access_list: + access: a1 + ipv6: ip1 + view: + read: r1 + write: w1 + notify: n1 + state: present + - context_name: c2 + state: present + state: present + - name: group_2 + version: 3 + security_level: auth + access_list: + access: a1 + ipv6: ip1 + state: present +###################################################} +{% if os9_snmp is defined and os9_snmp %} + +{% if os9_snmp %} +{% for key,value in os9_snmp.items() %} + {% if key == "snmp_contact" %} + {% if value %} +snmp-server contact {{ value }} + {% else %} +no snmp-server contact + {% endif %} + + {% elif key == "snmp_location" %} + {% if value %} +snmp-server location {{ value }} + {% else %} +no snmp-server location + {% endif %} + + {% elif key == "snmp_server_vrf" %} + {% if value %} +snmp-server vrf {{ value }} + {% else %} +no snmp-server vrf + {% endif %} + + {% elif key == "snmp_community" %} + {% if value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server community {{ item.name }} + {% else %} + {% if item.access_mode is defined and item.access_mode %} +snmp-server community {{ item.name }} {{ item.access_mode }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key == "snmp_packet_size" %} + {% if value %} +snmp-server packetsize {{ value }} + {% else %} +no snmp-server packetsize + {% endif %} + + {% elif key == "snmp_context" %} + {% if value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server context {{ item.name }} + {% else %} +snmp-server context {{ item.name }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key == "snmp_host" and value %} + {% for item in value %} + {% if item.version is defined and item.version == "2c" or item.version == 1 %} + {% set my_version = item.version|string %} + {% elif item.version is defined and item.version == 3 %} + {% if item.security_level is defined and item.security_level %} + {% set my_version = "3"+" "+item.security_level %} + {% endif %} + {% endif %} + {% if item.state is defined and item.state == "absent" %} + {% if item.vrf is defined and item.vrf %} + {% for vrf_name in item.vrf %} + {% if item.ipv4 is defined and item.ipv4 %} + {% if item.communitystring is defined and item.communitystring %} + {% if item.udpport is defined and item.udpport %} +no snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }} + {% else %} +no snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }} + {% endif %} + {% else %} +no snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} public udp-port 162 + {% endif %} + {% endif %} + {% endfor %} + {% else %} + {% if item.ipv4 is defined and item.ipv4 %} + {% if item.communitystring is defined and item.communitystring %} + {% if item.udpport is defined and item.udpport %} +no snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }} + {% else %} +no snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }} + {% endif %} + {% else %} +no snmp-server host {{ item.ipv4 }} traps version {{ my_version }} public udp-port 162 + {% endif %} + {% elif item.ipv6 is defined and item.ipv6 %} + {% if item.communitystring is defined and item.communitystring %} + {% if item.udpport is defined and item.udpport %} +no snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }} + {% else %} +no snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }} + {% endif %} + {% else %} +no snmp-server host {{ item.ipv6 }} traps version {{ my_version }} public udp-port 162 + {% endif %} + {% endif %} + {% endif %} + {% else %} + {% if item.vrf is defined and item.vrf %} + {% for vrf_name in item.vrf %} + {% if item.ipv4 is defined and item.ipv4 %} + {% if item.communitystring is defined and item.communitystring %} + {% if item.udpport is defined and item.udpport %} +snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }} + {% else %} +snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }} udp-port 162 + {% endif %} + {% else %} +snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} public udp-port 162 + {% endif %} + {% endif %} + {% endfor %} + {% else %} + {% if item.ipv4 is defined and item.ipv4 %} + {% if item.communitystring is defined and item.communitystring %} + {% if item.udpport is defined and item.udpport %} +snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }} + {% else %} +snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }} udp-port 162 + {% endif %} + {% else %} +snmp-server host {{ item.ipv4 }} traps version {{ my_version }} public udp-port 162 + {% endif %} + {% elif item.ipv6 is defined and item.ipv6 %} + {% if item.communitystring is defined and item.communitystring %} + {% if item.udpport is defined and item.udpport %} +snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }} + {% else %} +snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }} udp-port 162 + {% endif %} + {% else %} +snmp-server host {{ item.ipv6 }} traps version {{ my_version }} public udp-port 162 + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + + {% elif key == "snmp_traps" %} + {% if value %} + {% for val in value %} + {% if val.name is defined and val.name %} + {% if val.state is defined and val.state == "absent" %} + {% if val.name == "all" %} +no snmp-server enable traps + {% else %} +no snmp-server enable traps {{ val.name }} + {% endif %} + {% else %} + {% if val.name == "all" %} + {% set trap_list = ['bgp','snmp authentication coldstart linkdown linkup syslog-reachable syslog-unreachable','vrrp','lacp','entity','stack','stp','ecfm','vlt','fips','ets','xstp','isis','config','pfc','envmon cam-utilization fan supply temperature','ecmp'] %} + {% for name in trap_list %} +snmp-server enable traps {{ name }} + {% endfor %} + {% else %} +snmp-server enable traps {{ val.name }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key == "snmp_engine_id" %} + {% if value %} +snmp-server engineID local {{ value }} + {% else %} +no snmp-server engineID local + {% endif %} + + {% elif key == "snmp_view" %} + {% if value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.oid_subtree is defined and item.oid_subtree %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server view {{ item.name }} {{ item.oid_subtree }} + {% else %} + {% if item.include is defined %} + {% if item.include %} +snmp-server view {{ item.name }} {{ item.oid_subtree }} included + {% elif not item.include %} +snmp-server view {{ item.name }} {{ item.oid_subtree }} excluded + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key == "snmp_user" %} + {% if value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.group_name is defined and item.group_name %} + {% if item.version is defined and item.version == "2c" or item.version == 1 %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} + {% else %} + {% if item.access_list is defined and item.access_list %} + {% if item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }} + {% elif item.access_list.access is defined and item.access_list.access %} +snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} access {{ item.access_list.access }} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} ipv6 {{ item.access_list.ipv6 }} + {% endif %} + {% else %} +snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} + {% endif %} + {% endif %} + {% elif item.version is defined and item.version == 3 %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server user {{ item.name }} {{ item.group_name }} 3 + {% else %} + {% if item.access_list is defined and item.access_list %} + {% if item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }} + {% elif item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} + {% elif item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }} ipv6 {{ item.access_list.ipv6 }} + {% elif item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }} + {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }} + {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} + {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }} + {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }} + {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }} + {% elif item.access_list.access is defined and item.access_list.access %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 access {{ item.access_list.access }} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 %} +snmp-server user {{ item.name }} {{ item.group_name }} 3 ipv6 {{ item.access_list.ipv6 }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key == "snmp_group" and value %} + {% for item in value %} + {% if item.name is defined and item.name %} + {% if item.version is defined and item.version == "2c" or item.version == 1 %} + {% set my_version = item.version|string %} + {% elif item.version is defined and item.version == 3 %} + {% if item.security_level is defined and item.security_level %} + {% set my_version = "3"+" "+item.security_level %} + {% endif %} + {% endif %} + {% if item.context is defined and item.context %} + {% set my_entry = [] %} + {% for it in item.context %} + {% if it.context_name is defined and it.context_name %} + {% if it.access_list is defined and it.access_list and it.view is defined and it.view %} + {% if it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify+" read "+it.view.read) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify+" write "+it.view.write) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" write "+it.view.write+" read "+it.view.read) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" read "+it.view.read) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" write "+it.view.write) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write%} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify+" read "+it.view.read) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify+" write "+it.view.write) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.view.write is defined and it.view.write and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" write "+it.view.write+" read "+it.view.read) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" read "+it.view.read) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" write "+it.view.write) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write%} + {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify+" read "+it.view.read) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify+" write "+it.view.write) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write and it.view.read is defined and it.view.read %} {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" write "+it.view.write+" read "+it.view.read) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify %} {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+"read "+it.view.read) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write %} {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" write "+it.view.write) %}{% endif %} + {% endif %} + {% elif it.access_list is defined and it.access_list %} + {% if it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6) %}{% endif %} + {% elif it.access_list.access is defined and it.access_list.access %} + {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access) %}{% endif %} + {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 %} + {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6) %}{% endif %} + {% endif %} + {% elif it.view is defined and it.view %} + {% if it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %} + {% elif it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify+" read "+it.view.read) %}{% endif %} + {% elif it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify+" write "+it.view.write) %}{% endif %} + {% elif it.view.read is defined and it.view.read and it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" read "+it.view.read+" write "+it.view.write) %}{% endif %} + {% elif it.view.read is defined and it.view.read %} + {% if my_entry.append("context "+it.context_name+" read "+it.view.read) %}{% endif %} + {% elif it.view.notify is defined and it.view.notify %} + {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify) %}{% endif %} + {% elif it.view.write is defined and it.view.write %} + {% if my_entry.append("context "+it.context_name+" write "+it.view.write) %}{% endif %} + {% endif %} + {% else %} + {% if my_entry.append("context "+it.context_name) %}{% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if item.access_list is defined and item.access_list and item.view is defined and item.view %} + {% if item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %} + {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" read "+item.view.read %} + {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" read "+item.view.read+" write "+item.view.write %} + {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" write "+item.view.write %} + {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify %} + {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" read "+item.view.read %} + {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6+" write "+item.view.write %} + {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access+" notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %} + {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %} + {% set my_en = "access "+item.access_list.access+" notify "+item.view.notify+" read "+item.view.read %} {% elif item.access_list.access is defined and item.access_list.access and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access+" read "+item.view.read+" write "+item.view.write %} {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access +" notify "+item.view.notify+" write "+item.view.write %} + {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify %} + {% set my_en = "access "+item.access_list.access +" notify "+item.view.notify %} + {% elif item.access_list.access is defined and item.access_list.access and item.view.read is defined and item.view.read %} + {% set my_en = "access "+item.access_list.access+" read "+item.view.read %} + {% elif item.access_list.access is defined and item.access_list.access and item.view.write is defined and item.view.write %} + {% set my_en = "access "+item.access_list.access+" write "+item.view.write %} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "ipv6 "+item.access_list.ipv6+" notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %} + {% set my_en = "ipv6 "+item.access_list.ipv6+" notify "+item.view.notify+" read "+item.view.read %} {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "ipv6 "+item.access_list.ipv6+" read "+item.view.read+" write "+item.view.write %} {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %} + {% set my_en = "ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" write "+item.view.write %} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify %} + {% set my_en = "ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify %} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read %} + {% set my_en = "ipv6 "+item.access_list.ipv6+" read "+item.view.read %} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.write is defined and item.view.write %} + {% set my_en = "ipv6 "+item.access_list.ipv6+" write "+item.view.write %} + + {% endif %} + {% elif item.access_list is defined and item.access_list %} + {% if item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %} + {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 %} + {% elif item.access_list.access is defined and item.access_list.access %} + {% set my_en = "access "+item.access_list.access %} + {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 %} + {% set my_en = "ipv6 "+item.access_list.ipv6 %} + {% endif %} + {% elif item.view is defined and item.view %} + {% if item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %} + {% elif item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %} + {% set my_en = "notify "+item.view.notify+" read "+item.view.read %} + {% elif item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %} + {% set my_en ="notify "+item.view.notify+" write "+item.view.write %} + {% elif item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %} + {% set my_en = "read "+item.view.read+" write "+item.view.write %} + {% elif item.view.read is defined and item.view.read %} + {% set my_en = "read "+item.view.read %} + {% elif item.view.notify is defined and item.view.notify %} + {% set my_en = "notify "+item.view.notify %} + {% elif item.view.write is defined and item.view.write %} + {% set my_en = "write "+item.view.write %} + {% endif %} + {% endif %} + {% if item.state is defined and item.state == "absent" %} +no snmp-server group {{ item.name }} {{ my_version }} + {% else %} + {% if my_en is defined and my_en %} +snmp-server group {{ item.name }} {{ my_version }} {{ my_en }} + {% else %} +snmp-server group {{ item.name }} {{ my_version }} + {% endif %} + {% endif %} + {% set my_en = "" %} + {% if item.context is defined %} + {% set i = 0 %} + {% for it in item.context %} + {% if it.state is defined and it.state == "absent" %} +no snmp-server group {{ item.name }} {{ my_version }} context {{ it.context_name }} + {% else %} +snmp-server group {{ item.name }} {{ my_version }} {{ my_entry[i] }} + {% endif %} + {% set i = i+1 %} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endfor %} +{% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml new file mode 100644 index 00000000..98cba1e5 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml @@ -0,0 +1,83 @@ +--- +# vars file for dellemc.os9.os9_snmp, +# below gives a sample configuration +# Sample variables for OS9 device +os9_snmp: + snmp_contact: test + snmp_location: chennai + snmp_server_vrf: test + snmp_context: + - name: SNMP1 + state: present + snmp_packet_size: 16 + snmp_community: + - name: public + access_mode: ro + state: present + - name: private + access_mode: rw + state: present + snmp_host: + - ipv4: 1.1.1.1 + version: 3 + vrf: + - test + - management + security_level: noauth + communitystring: ab + udpport: 1 + state: absent + - ipv6: 2001:4898:f0:f09b::2000 + version: 1 + state: present + snmp_traps: + - name: config + state: present + snmp_engine_id: 1234567890 + snmp_view: + - name: view_1 + oid_subtree: 2 + include: false + state: absent + snmp_user: + - name: user_1 + group_name: grp1 + version: 3 + access_list: + access: a1 + encryption: true + auth_algorithm: md5 + auth_pass: 12345678 + state: present + - name: user_2 + group_name: grp1 + version: "2c" + access_list: + access: a2 + ipv6: ip1 + state: present + snmp_group: + - name: group_1 + version: 2c + access_list: + access: a1 + ipv6: ip1 + context: + - context_name: c1 + state: present + - context_name: c2 + access_list: + access: a1 + ipv6: ip1 + view: + notify: n1 + read: r1 + write: w1 + state: absent + - name: group_2 + version: 3 + security_level: priv + access_list: + access: a1 + ipv6: ip1 + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml new file mode 100644 index 00000000..cc1736a4 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_snmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml new file mode 100644 index 00000000..9fa36434 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_snmp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_system/LICENSE b/ansible_collections/dellemc/os9/roles/os9_system/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_system/README.md b/ansible_collections/dellemc/os9/roles/os9_system/README.md new file mode 100644 index 00000000..36e5e163 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/README.md @@ -0,0 +1,223 @@ +System role +=========== + +This role facilitates the configuration of global system attributes, and it specifically enables configuration of hostname and enable password. It supports the configuration of management route, hash alogrithm, clock, line terminal, banner, and reload type. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The System role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc_netowrking.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_system keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``hostname`` | string | Configures a hostname to the device (no negate command) | os9 | +| ``unique_hostname`` | boolean: true,false\* | Configures a unique hostname in the switch, only applicable to the FN IOM and MXL platform modules | os9 | +| ``enable_password`` | string | Configures the enable password | os9 | +| ``management_rt`` | list | Configures the management route | os9 | +| ``management_rt.ip`` | string (required) | Configures the IP destination prefix for the management route (A.B.C.D format for IPv4, A:B:C:D::E format for IPv6) | os9 | +| ``management_rt.ipv4`` | boolean: true\*,false | Specifies if the management route is an IPv4 or IPv6 address; if false or undefined, the IP is set as IPv6 | os9 | +| ``management_rt.state`` | string: absent,present\* | Deletes the management route if set to absent | os9 | +| ``line_terminal`` | dictionary | Configures the terminal line (see ``line_terminal.*``) | os9 | +| ``line_terminal.`` | dictionary | Configures the primary or virtual terminal line (console or vty values) | os9 | +| ``.exec_timeout`` | string | Configures the EXEC timeout ( values) | os9 | +| ``.exec_banner`` | boolean: true,false\* | Configures the EXEC banner | os9 | +| ``.login_banner`` | boolean: true,false\* | Configures the login banner | os9 | +| ``.motd_banner`` | boolean: true,false\* | Configures the MOTD banner | os9 | +| ``service_passwd_encryption`` | boolean: true,false | Configures system password encryption | os9 | +| ``hash_algo`` | dictionary | Configures hash algorithm commands (see ``hash_algo.*``) | os9 | +| ``hash_algo.algo`` | list | Configures hashing algorithm (see ``algo.*``) | os9 | +| ``algo.name`` | string (required) | Configures the name of the hashing algorithm | os9 | +| ``algo.mode`` | string (required) | Configures the hashing algorithm mode | os9 | +| ``algo.stack_unit`` | integer | Configures the stack-unit for the hashing algorithm | os9 | +| ``algo.port_set`` | integer | Configures the port-pipe set for the hashing algorithm | os9 | +| ``algo.state`` | string: absent,present\* | Deletes the hashing algorithm if set to absent | os9 | +| ``hash_algo.seed`` | list | Configures the hashing algorithm seed (see ``seed.*``) | os9 | +| ``seed.value`` | integer (required) | Configures the hashing algorithm seed value | os9 | +| ``seed.stack_unit`` | integer | Configures the stack-unit for the hashing algorithm seed | os9 | +| ``seed.port_set`` | integer | Configures the port-pipe set for the hashing algorithm seed | os9 | +| ``seed.state`` | string: absent,present\* | Deletes the hashing algorithm seed if set to absent | os9 | +| ``banner`` | dictionary | Configures global banner commands (see ``banner.*``) | os9 | +| ``banner.login`` | dictionary | Configures the login banner (see ``login.*``) | os9 | +| ``login.ack_enable`` | boolean: true,false | Configures positive acknowledgement | os9 | +| ``login.ack_prompt`` | string | Configures the positive acknowledgement prompt | os9 | +| ``login.keyboard_interactive`` | boolean: true,false | Configures the keyboard interactive prompt | os9 | +| ``login.banner_text`` | string | Configures the banner text for the login banner; 'c c' format where 'c' is a delimiting character | os9 | +| ``banner.exec`` | string | Configures the banner text for EXEC process creation banner; 'c c' where 'c' is a delimiting character for os9 | os9 | +| ``banner.motd`` | string | Configures the banner text for the message of the day banner; 'c c' where 'c' is a delimiting character for os9 | os9 | +| ``load_balance`` | dictionary | Configures the global traffic load balance (see ``load_balance.*``) | os9 | +| ``load_balance.ingress_port`` | boolean: true,false | Specifies whether to use the source port ID for the hashing algorithm | os9 | +| ``load_balance.tcp_udp`` | boolean: true, false | Configures whether to use TCP/UDP ports in packets for hashing algorithm | os9 | +| ``load_balance.ip_selection`` | list | Configures IPv4 key fields to use in hashing algorithm; mutually exclusive with *load_balance.tcp_udp* for os9 devices (see ``ip_selection.*``) | os9 | +| ``ip_selection.field`` | string | Configures IPv4 key fields to use in hashing algorithm | os9 | +| ``ip_selection.state`` | string: absent,present\* | Deletes the IPv4 key fields if set to absent | os9 | +| ``load_balance.ipv6_selection`` | list | Configures IPv6 key fields to use in hashing algorithm; mutually exclusive with *load_balance.tcp_udp* for os9 devices (see ``ipv6_selection.*``) | os9 | +| ``ipv6_selection.field`` | string | Configures IPv6 key fields to use in hashing algorithm | os9 | +| ``ipv6_selection.state`` | string: absent,present\* | Deletes the IPv6 key fields if set to absent | os9 | +| ``load_balance.tunnel`` | dictionary | Configures tunnel key fields to use in hashing algorithm (see ``tunnel.*``) | os9 | +| ``tunnel.hash_field`` | list | Configures hash field selection (see ``hash_field.*``) | os9 | +| ``hash_field.name`` | string (required) | Configures the hash field selection | os9 | +| ``hash_field.header`` | string | Configures header for load balance | os9 | +| ``hash_field.state`` | string: absent,present\* | Deletes the hash key selection field if set to absent | os9 | +| ``clock`` | dictionary | Configures time-of-day clock (see ``clock.*``) | os9 | +| ``clock.summer_time`` | dictionary | Configures summer (daylight savings) time (see ``summer_time.*``) | os9 | +| ``summer_time.timezone_name`` | string (required) | Configures the time zone name | os9 | +| ``summer_time.type`` | string (required) | Configures absolute or recurring summer time | os9 | +| ``summer_time.start_datetime`` | string | Configures start datetime; format | os9 | +| ``summer_time.end_datetime`` | string | Configures end datetime; format | os9 | +| ``summer_time.offset_mins`` | integer | Configures offset minutes to add (1 to 1440) | os9 | +| ``summer_time.state`` | string: absent,present\* | Deletes the summer time clock if set to absent | os9 | +| ``clock.timezone`` | dictionary | Configures timezone (see ``timezone.*``) | os9 | +| ``timezone.name`` | string (required) | Configures the timezone name | os9 | +| ``timezone.offset_hours`` | integer | Configures offset hours to add (-23 to 23) | os9 | +| ``timezone.offset_mins`` | integer | Configures offset minutes to add (0 to 59) | os9 | +| ``timezone.state`` | string: absent,present\* | Deletes the time zone if set to absent | os9 | +| ``reload_type`` | dictionary | Configures the reload type (see ``reload_type.*``) | os9 | +| ``reload_type.auto_save`` | boolean: true,false\* | Configures the auto save option for downloaded configuration/script file | os9 | +| ``reload_type.boot_type`` | string: bmp-reload,normal-reload | Configures the boot type | os9 | +| ``reload_type.boot_type_state`` | string: absent,present\* | Deletes the boot type if set to absent | os9 | +| ``reload_type.config_scr_download`` | boolean: true,false\* | Configures whether config/script file needs to be downloaded | os9 | +| ``reload_type.dhcp_timeout`` | integer | Configures DHCP timeout in minutes (0 to 50) | os9 | +| ``reload_type.retry_count`` | integer | Configures the number of retries for image and configuration download (0 to 6) | os9 | +| ``reload_type.relay`` | boolean: true,false\* | Configures the addition of option82 in DHCP client packets | os9 | +| ``reload_type.relay_remote_id`` | string | Configures customize remote ID | os9 | +| ``reload_type.vendor_class_identifier`` | boolean: true,false\* | Configures vendor-class-identifier for DHCP option60 | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +******************** + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os9_system* role. By including the role, you automatically get access to all of the tasks to configure system features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_system: + hostname: host1 + unique_hostname: True + enable_password: dell + service_passwd_encryption: true + banner: + exec: t hai t + login: + ack_enable: true + ack_prompt: testbanner + keyboard_interactive: true + banner_text: cloginbannerc + motd: t ansibletest t + hash_algo: + algo: + - name: lag + mode: xor1 + stack_unit: 0 + port_set: 0 + state: present + - name: ecmp + mode: xor1 + stack_unit: 0 + port_set: 0 + state: present + seed: + - value: 3 + stack_unit: 0 + port_set: 0 + state: present + - value: 2 + state: present + load_balance: + ingress_port: true + ip_selection: + - field: vlan dest-ip + state: present + ipv6_selection: + - field: dest-ipv6 vlan + state: present + tunnel: + hash_field: + - name: mac-in-mac + header: tunnel-header-mac + state: present + clock: + summer_time: + timezone_name: PST + type: date + start_datetime: 2 jan 1993 22:33 + end_datetime: 3 jan 2017 22:33 + offset_mins: 20 + timezone: + name: IST + offset_hours: -5 + offset_mins: 20 + reload_type: + auto_save: true + boot_type: normal-reload + boot_type_state: absent + config_scr_download: true + dhcp_timeout: 5 + retry_count: 3 + relay: true + relay_remote_id: ho + vendor_class_identifier: aa + management_rt: + - ip: 10.16.148.254 + state: present + ipv4: True + line_terminal: + vty 0: + exec_timeout: 40 + exec_banner: true + vty 1: + exec_timeout: 40 200 + motd_banner: true + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_system + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml new file mode 100644 index 00000000..2892046b --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_system \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml new file mode 100644 index 00000000..d19126d0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_system \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml new file mode 100644 index 00000000..9b716440 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_system role facilitates the configuration of system attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml new file mode 100644 index 00000000..a52c5041 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating system configuration for os9" + template: + src: os9_system.j2 + dest: "{{ build_dir }}/system9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning system configuration for os9" + dellemc.os9.os9_config: + src: os9_system.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2 b/ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2 new file mode 100644 index 00000000..594179c9 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2 @@ -0,0 +1,422 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# + +Purpose: +Configure system commands for os9 Devices + +os9_system: + hostname: os9 + unique_hostname: true + enable_password: xxxxx + service_passwd_encryption: true + banner: + exec: t hai t + login: + ack_enable: true + ack_prompt: testbanner + keyboard_interactive: true + banner_text: cloginbannerc + motd: t ansibletest t + hash_algo: + algo: + - name: lag + mode: xor1 + stack_unit: 0 + port_set: 0 + state: present + - name: ecmp + mode: xor1 + stack_unit: 0 + port_set: 0 + state: present + seed: + - value: 3 + stack_unit: 0 + port_set: 0 + state: present + - value: 2 + state: present + load_balance: + ingress_port: true + ip_selection: + - field: vlan dest-ip + state: present + ipv6_selection: + - field: dest-ipv6 vlan + state: present + tunnel: + hash_field: + - name: mac-in-mac + header: tunnel-header-ipv4 + state: present + clock: + summer_time: + timezone_name: PST + type: date + start_datetime: 2 jan 1991 22:33 + end_datetime: 3 jan 2017 22:33 + offset_mins: 20 + timezone: + name: IST + offset_hours: -5 + offset_mins: 20 + reload_type: + auto_save: true + boot_type: normal-reload + config_scr_download: true + dhcp_timeout: 5 + retry_count: 3 + relay: true + relay_remote_id: host + vendor_class_identifier: aa + management_rt: + - ip: 10.16.148.254 + state: present + ipv4: True + line_terminal: + vty 0: + exec_timeout: 40 + exec_banner: true + vty 1: + exec_timeout: 40 200 + motd_banner: true +###################################################} +{% if os9_system is defined and os9_system %} + +{% if os9_system.hostname is defined and os9_system.hostname %} +hostname {{ os9_system.hostname }} +{% endif %} +{% if os9_system %} +{% for key,value in os9_system.items() %} + {% if key == "unique_hostname" %} + {% if value %} +feature unique-name + {% else %} +no feature unique-name + {% endif %} + + {% elif key == "enable_password" %} + {% if value %} +enable password {{ value }} + {% else %} +no enable password + {% endif %} + + {% elif key == "service_passwd_encryption" %} + {% if value %} +service password-encryption + {% else %} +no service password-encryption + {% endif %} + + {% elif key == "clock" and value %} + {% if value.summer_time is defined and value.summer_time %} + {% set time_vars = value.summer_time %} + {% if time_vars.state is defined and time_vars.state == "absent" %} +no clock summer-time + {% else %} + {% if time_vars.timezone_name is defined and time_vars.timezone_name %} + {% if time_vars.type is defined and time_vars.type %} + {% if time_vars.start_datetime is defined and time_vars.start_datetime %} + {% if time_vars.end_datetime is defined and time_vars.end_datetime %} + {% if time_vars.offset_mins is defined and time_vars.offset_mins %} +clock summer-time {{ time_vars.timezone_name }} {{ time_vars.type }} {{ time_vars.start_datetime }} {{ time_vars.end_datetime }} {{ time_vars.offset_mins }} + {% else %} +clock summer-time {{ time_vars.timezone_name }} {{ time_vars.type }} {{ time_vars.start_datetime }} {{ time_vars.end_datetime }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if value.timezone is defined and value.timezone %} + {% set timezone_vars = value.timezone %} + {% if timezone_vars.state is defined and timezone_vars.state == "absent" %} +no clock timezone + {% else %} + {% if timezone_vars.name is defined and timezone_vars.name %} + {% if timezone_vars.offset_hours is defined and timezone_vars.offset_hours %} + {% if timezone_vars.offset_mins is defined and timezone_vars.offset_mins %} +clock timezone {{ timezone_vars.name }} {{ timezone_vars.offset_hours }} {{ timezone_vars.offset_mins }} + {% else %} +clock timezone {{ timezone_vars.name }} {{ timezone_vars.offset_hours }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + + + {% elif key == "hash_algo" and value %} + {% if value.algo is defined and value.algo %} + {% for item in value.algo %} + {% if item.name is defined and item.name %} + {% if item.mode is defined and item.mode %} + {% if item.state is defined and item.state == "absent" %} + {% if item.stack_unit is defined and item.stack_unit >= 0 %} + {% if item.port_set is defined and item.port_set >= 0 %} +no hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }} + {% else %} +no hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }} + {% endif %} + {% else %} +no hash-algorithm {{ item.name }} {{ item.mode }} + {% endif %} + {% else %} + {% if item.stack_unit is defined and item.stack_unit >= 0 %} + {% if item.port_set is defined and item.port_set >= 0 %} +hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }} + {% else %} +hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }} + {% endif %} + {% else %} +hash-algorithm {{ item.name }} {{ item.mode }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if value.seed is defined and value.seed %} + {% for item in value.seed %} + {% if item.value is defined and item.value >= 0 %} + {% if item.state is defined and item.state == "absent" %} + {% if item.stack_unit is defined and item.stack_unit >= 0 %} + {% if item.port_set is defined and item.port_set >= 0 %} +no hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }} + {% else %} +no hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }} + {% endif %} + {% else %} +no hash-algorithm seed {{ item.value }} + {% endif %} + {% else %} + {% if item.stack_unit is defined and item.stack_unit >= 0 %} + {% if item.port_set is defined and item.port_set >= 0 %} +hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }} + {% else %} +hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }} + {% endif %} + {% else %} +hash-algorithm seed {{ item.value }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% elif key == "banner" and value %} + {% if value.exec is defined %} + {% if value.exec %} +banner exec {{ value.exec }} + {% else %} +no banner exec + {% endif %} + {% endif %} + {% if value.motd is defined %} + {% if value.motd %} +banner motd {{ value.motd }} + {% else %} +no banner motd + {% endif %} + {% endif %} + {% if value.login is defined and value.login %} + {% set login_vars = value.login %} + {% if login_vars.ack_enable is defined %} + {% if login_vars.ack_enable %} +banner login acknowledgment enable + {% else %} +no banner login acknowledgment enable + {% endif %} + {% endif %} + {% if login_vars.ack_prompt is defined %} + {% if login_vars.ack_prompt %} +banner login acknowledgment prompt {{ login_vars.ack_prompt }} + {% else %} +no banner login acknowledgment prompt + {% endif %} + {% endif %} + {% if login_vars.keyboard_interactive is defined %} + {% if login_vars.keyboard_interactive %} +banner login keyboard-interactive + {% else %} +no banner login keyboard-interactive + {% endif %} + {% endif %} + {% if login_vars.banner_text is defined %} + {% if login_vars.banner_text %} +banner login {{ login_vars.banner_text }} + {% else %} +no banner login + {% endif %} + {% endif %} + {% endif %} + {% elif key == "load_balance" and value %} + {% if value.ingress_port is defined %} + {% if value.ingress_port %} +load-balance ingress-port enable + {% else %} +no load-balance ingress-port enable + {% endif %} + {% endif %} + {% if value.tcp_udp is defined %} + {% if value.tcp_udp %} +load-balance tcp-udp enable + {% else %} +no load-balance tcp-udp enable + {% endif %} + {% endif %} + {% if value.ip_selection is defined and value.ip_selection %} + {% for item in value.ip_selection %} + {% if item.field is defined and item.field %} + {% if item.state is defined and item.state == "absent" %} +no load-balance ip-selection + {% else %} +load-balance ip-selection {{ item.field }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if value.ipv6_selection is defined and value.ipv6_selection %} + {% for item in value.ipv6_selection %} + {% if item.field is defined and item.field %} + {% if item.state is defined and item.state == "absent" %} +no load-balance ipv6-selection + {% else %} +load-balance ipv6-selection {{ item.field }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if value.tunnel is defined and value.tunnel %} + {% set tunnel_vars = value.tunnel %} + {% if tunnel_vars.hash_field is defined and tunnel_vars.hash_field %} + {% for item in tunnel_vars.hash_field %} + {% if item.name is defined and item.name %} + {% if item.header is defined and item.header %} + {% if item.state is defined and item.state == "absent" %} +no load-balance tunnel {{ item.name }} {{ item.header }} + {% else %} +load-balance tunnel {{ item.name }} {{ item.header }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + + {% elif key == "reload_type" and value %} +reload-type + {% if value.auto_save is defined %} + {% if value.auto_save %} + auto-save enable + {% else %} + auto-save disable + {% endif %} + {% endif %} + {% if value.boot_type is defined and value.boot_type %} + {% if boot_type_state is defined and boot_type_state =="absent" %} + no boot-type {{ value.boot_type }} + {% else %} + boot-type {{ value.boot_type }} + {% endif %} + {% endif %} + {% if value.config_scr_download is defined %} + {% if value.config_scr_download %} + config-scr-download enable + {% else %} + config-scr-download disable + {% endif %} + {% endif %} + {% if value.dhcp_timeout is defined %} + {% if value.dhcp_timeout >=0 %} + dhcp-timeout {{ value.dhcp_timeout }} + {% else %} + no dhcp-timeout 0 + {% endif %} + {% endif %} + {% if value.retry_count is defined %} + {% if value.retry_count >=0 %} + retry-count {{ value.retry_count }} + {% else %} + no retry-count 0 + {% endif %} + {% endif %} + {% if value.relay is defined %} + {% if value.relay %} + relay enable + {% else %} + relay disable + {% endif %} + {% endif %} + {% if value.relay_remote_id is defined %} + {% if value.relay_remote_id %} + relay remote-id {{ value.relay_remote_id }} + {% else %} + no relay remote-id a + {% endif %} + {% endif %} + {% if value.vendor_class_identifier is defined %} + {% if value.vendor_class_identifier %} + vendor-class-identifier {{ value.vendor_class_identifier }} + {% else %} + no vendor-class-identifier a + {% endif %} + {% endif %} + + {% elif key == "management_rt" and value %} + {% for item in value %} + {% if item.ip is defined and item.ip %} + {% if item.ipv4 is defined and item.ipv4 %} + {% if item.state is defined and item.state == "absent" %} +no management route 0.0.0.0/0 {{ item.ip }} + {% else %} +management route 0.0.0.0/0 {{ item.ip }} + {% endif %} + {% else %} + {% if item.state is defined and item.state == "absent" %} +no management route ::/0 {{ item.ip }} + {% else %} +management route ::/0 {{ item.ip }} + {% endif %} + {% endif%} + {% endif %} + {% endfor %} + + {% elif key == "line_terminal" and value %} + {% for key in value.keys() %} + {% set vty_vars = value[key] %} +line {{ key }} + {% if vty_vars.exec_timeout is defined %} + {% if vty_vars.exec_timeout %} + {% set timeout = (vty_vars.exec_timeout | string).split(" ") %} + {% if timeout | length > 1 %} + exec-timeout {{ vty_vars.exec_timeout }} + {% else %} + exec-timeout {{ vty_vars.exec_timeout }} 0 + {% endif %} + {% else %} + no exec-timeout + {% endif %} + {% endif %} + {% if vty_vars.exec_banner is defined %} + {% if vty_vars.exec_banner %} + exec-banner + {% else %} + no exec-banner + {% endif %} + {% endif %} + {% if vty_vars.motd_banner is defined %} + {% if vty_vars.motd_banner %} + motd-banner + {% else %} + no motd-banner + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + +{% endfor %} +{% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml new file mode 100644 index 00000000..474f282d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml @@ -0,0 +1,74 @@ +--- +# vars file for dellemc.os9.os9_system, +# below gives a sample configuration +# Sample variables for OS9 device +os9_system: + hostname: os9 + enable_password: calvin + line_terminal: + vty 0: + exec_timeout: 40 + vty 1: + exec_timeout: 40 200 + service_passwd_encryption: true + banner: + exec: t hai t + login: + ack_enable: true + ack_prompt: testbanner + keyboard_interactive: true + banner_text: cloginbannerc + motd: t ansibletest t + hash_algo: + algo: + - name: lag + mode: xor1 + stack_unit: 0 + port_set: 0 + state: present + - name: ecmp + mode: xor1 + stack_unit: 0 + port_set: 0 + state: present + seed: + - value: 3 + stack_unit: 0 + port_set: 0 + state: present + - value: 2 + state: present + load_balance: + ingress_port: true + ip_selection: + - field: vlan dest-ip + state: present + ipv6_selection: + - field: dest-ipv6 vlan + state: present + tunnel: + hash_field: + - name: mac-in-mac + header: tunnel-header-mac + state: present + clock: + summer_time: + timezone_name: PST + type: date + start_datetime: 2 jan 1993 22:33 + end_datetime: 3 jan 2017 22:33 + offset_mins: 20 + timezone: + name: IST + offset_hours: -5 + offset_mins: 20 + reload_type: + auto_save: true + boot_type: normal-reload + boot_type_state: absent + config_scr_download: true + dhcp_timeout: 5 + retry_count: 3 + relay: true + relay_remote_id: ho + vendor_class_identifier: aa \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml new file mode 100644 index 00000000..4d142220 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_system \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml new file mode 100644 index 00000000..f056f57b --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_system \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_users/LICENSE b/ansible_collections/dellemc/os9/roles/os9_users/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_users/README.md b/ansible_collections/dellemc/os9/roles/os9_users/README.md new file mode 100644 index 00000000..2fadbe70 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/README.md @@ -0,0 +1,109 @@ +Users role +========== + +This role facilitates the configuration of global system user attributes, and it supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The users role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_users list keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``userrole`` | stirng (required) | Configures the role name which can be configured for users | os9 | +| ``userrole_state`` | string: absent,present\* | Deletes the user role with specified name if set to absent | os9 | +| ``userrole_inherit`` | string: netadmin,netoperator,secadmin,sysadmin\* | Specifies the existing role name to inherit the permissions | os9 | +| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os9 | +| ``password`` | string | Configures the password set for the username; | os9 | +| ``role`` | string | Configures the role assigned to the user | os9 | +| ``privilege`` | int | Configures the privilege level for the user (0 to 15); if this key is ommitted, the default privilege is 1 for both os9 | os9 | +| ``access_class`` | string | Configures the access-class for the user | os9 | +| ``pass_key`` | integer: 0\*,7 | Configures the password as encrypted if set to 7 in os9 devices | os9 | +| ``secret`` | string | Configures line password as secret in os9 devices | os9 | +| ``secret_key`` | integer: 0\*,5 | Configures the secret line password using md5 encrypted algorithm | os9 | +| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_users* role to configure global system user attributes. The example creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os9_users* role. By including the role, you automatically get access to all of the tasks to configure user features. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_users: + - userrole: role1 + userrole_state: present + userrole_inherit: sysadmin + - username: u1 + password: test + role: sysadmin + privilege: 0 + state: absent + - username: u1 + password: false + privilege: 1 + access_class: a1 + role: netadmin + state: present + - username: u2 + secret: test1 + secret_key : 0 + access_class: a2 + privilege: 3 + role: sysadmin + state: present + +**Simple playbook to setup users — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_users + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml new file mode 100644 index 00000000..b0770388 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_users \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml new file mode 100644 index 00000000..0b439791 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_users \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml new file mode 100644 index 00000000..9dae624a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_users role facilitates the configuration of user attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml new file mode 100644 index 00000000..6708c02a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating users configuration for os9" + template: + src: os9_users.j2 + dest: "{{ build_dir }}/users9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning users configuration for os9" + dellemc.os9.os9_config: + src: os9_users.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2 b/ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2 new file mode 100644 index 00000000..64f8256a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2 @@ -0,0 +1,141 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{############################################# +Purpose: +Configure users commands for os9 Devices +os9_users: + - userrole: role1 + userrole_state: present + userrole_inherit: sysadmin + - username: test + password: test + pass_key: 7 + access_class: a1 + role: sysadmin + privilege: 0 + state: present + - username: u1 + password: false + privilege: 1 + access_class: a1 + role: netadmin + state: present + - username: u2 + secret: test1 + secret_key : 0 + access_class: a2 + privilege: 3 + role: sysadmin + state: present +###################################################} +{% if os9_users is defined and os9_users %} +{% for item in os9_users %} + {% if item.userrole is defined and item.userrole %} + {% if item.userrole_state is defined and item.userrole_state == "absent" %} + {% for item in os9_users %} + {% if item.username is defined and item.username %} + {% if item.state is defined and item.state == "absent" %} +no username {{ item.username }} + {% endif %} + {% endif %} + {% endfor %} +no userrole {{ item.userrole }} + {% else %} + {% if item.userrole_inherit is defined and item.userrole_inherit %} +userrole {{ item.userrole }} inherit {{ item.userrole_inherit }} + {% else %} +userrole {{ item.userrole }} + {% endif %} + {% endif %} + {% endif %} + {% if item.role_permission is defined and item.role_permission %} + {% if item.role_permission.mode is defined and (item.role_permission.mode == "configure" or item.role_permission.mode == "exec" or item.role_permission.mode == "interface" or item.role_permission.mode == "line" or item.role_permission.mode == "route-map" or item.role_permission.mode == "router") %} + {% if item.role_permission.action is defined and (item.role_permission.action == "reset" or item.role_permission.action == "addrole" or item.role_permission.action == "deleterole") %} + {% if item.role_permission.line is defined and item.role_permission.line %} + {% if item.role_permission.action != "reset" and item.role_permission.role_name is defined and item.role_permission.role_name %} + {% if item.role_permission.state is defined and item.role_permission.state == "absent" %} +norole {{ item.role_permission.mode }} {{ item.role_permission.action }} {{ item.role_permission.role_name }} {{ item.role_permission.line }} + {% else %} +role {{ item.role_permission.mode }} {{ item.role_permission.action }} {{ item.role_permission.role_name }} {{ item.role_permission.line }} + {% endif %} + {% else %} +role {{ item.role_permission.mode }} reset {{ item.role_permission.line }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if item.username is defined and item.username %} + {% if item.state is defined and item.state == "absent" %} +no username {{ item.username }} + {% else %} + {% if item.password is defined and item.password %} + {% if item.pass_key is defined and item.pass_key %} +{% set passwd = item.pass_key|string+" "+item.password %} + {% else %} +{% set passwd = item.password %} + {% endif %} + {% if item.privilege is defined and item.privilege and item.access_class is defined and item.access_class and item.role is defined and item.role %} +username {{ item.username }} password {{ passwd }} privilege {{ item.privilege }} role {{ item.role }} access-class {{ item.access_class }} + {% elif item.role is defined and item.role and item.privilege is defined and item.privilege %} +username {{ item.username }} password {{ passwd }} privilege {{ item.privilege }} role {{ item.role }} + {% elif item.role is defined and item.role and item.access_class is defined and item.access_class %} +username {{ item.username }} password {{ passwd }} access-class {{ item.access_class }} role {{ item.role }} + {% elif item.privilege is defined and item.privilege and item.access_class is defined and item.access_class %} +username {{ item.username }} password {{ passwd }} access-class {{ item.access_class }} privilege {{ item.privilege }} + {% elif item.role is defined and item.role %} +username {{ item.username }} password {{ passwd }} role {{ item.role }} + {% elif item.privilege is defined and item.privilege %} +username {{ item.username }} password {{ passwd }} privilege {{ item.privilege }} + {% elif item.access_class is defined and item.access_class %} +username {{ item.username }} password {{ passwd }} access-class {{ item.access_class }} + {% else %} +username {{ item.username }} password {{ passwd }} + {% endif %} + {% elif item.secret is defined and item.secret %} + {% if item.secret_key is defined and item.secret_key %} +{% set passwd = item.secret_key|string+" " +item.secret %} + {% else %} +{% set passwd = item.secret %} + {% endif %} + {% if item.privilege is defined and item.privilege and item.access_class is defined and item.access_class and item.role is defined and item.role %} +username {{ item.username }} secret {{ passwd }} role {{ item.role }} privilege {{ item.privilege }} access-class {{ + item.access_class }} + {% elif item.role is defined and item.role and item.privilege is defined and item.privilege %} +username {{ item.username }} secret {{ passwd }} role {{ item.role }} privilege {{ item.privilege }} + {% elif item.role is defined and item.role and item.access_class is defined and item.access_class %} +username {{ item.username }} secret {{ passwd }} role {{ item.role }} access-class {{ item.access_class }} + {% elif item.privilege is defined and item.privilege and item.access_class is defined and item.access_class %} +username {{ item.username }} secret {{ passwd }} privilege {{ item.privilege }} access-class {{ item.access_class }} + {% elif item.role is defined and item.role %} +username {{ item.username }} secret {{ passwd }} role {{ item.role }} + {% elif item.privilege is defined and item.privilege %} +username {{ item.username }} secret {{ passwd }} privilege {{ item.privilege }} + {% elif item.access_class is defined and item.access_class %} +username {{ item.username }} secret {{ passwd }} access-class {{ item.access_class }} + {% else %} +username {{ item.username }} secret {{ passwd }} + {% endif %} + {% else %} + {% if item.privilege is defined and item.privilege and item.access_class is defined and item.access_class and item.role is defined and item.role %} +username {{ item.username }} nopassword role {{ item.role }} privilege {{ item.privilege }} access-class {{ item.access_class }} + {% elif item.role is defined and item.role and item.privilege is defined and item.privilege %} +username {{ item.username }} nopassword role {{ item.role }} privilege {{ item.privilege }} + {% elif item.role is defined and item.role and item.access_class is defined and item.access_class %} +username {{ item.username }} nopassword role {{ item.role }} access-class {{ item.access_class }} + {% elif item.privilege is defined and item.privilege and item.access_class is defined and item.access_class %} +username {{ item.username }} nopassword privilege {{ item.privilege }} access-class {{ item.access_class }} + {% elif item.role is defined and item.role %} +username {{ item.username }} nopassword role {{ item.role }} + {% elif item.privilege is defined and item.privilege %} +username {{ item.username }} nopassword privilege {{ item.privilege }} + {% elif item.access_class is defined and item.access_class %} +username {{ item.username }} nopassword access-class {{ item.access_class }} + {% else %} +username {{ item.username }} nopassword + {% endif %} + + {% endif %} + {% endif %} + {% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml new file mode 100644 index 00000000..e2882d20 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml @@ -0,0 +1,28 @@ +--- +# vars file for dellemc.os9.os9_users, +# below gives a sample configuration +# Sample variables for OS9 device +os9_users: + - userrole: role1 + userrole_state: present + userrole_inherit: sysadmin + - username: test + password: test + pass_key: 7 + access_class: a1 + role: role1 + privilege: 0 + state: present + - username: u1 + password: false + privilege: 1 + access_class: a1 + role: netadmin + state: present + - username: u2 + secret: test1 + secret_key: 0 + access_class: a2 + privilege: 3 + role: sysadmin + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml new file mode 100644 index 00000000..1e264911 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_users \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml new file mode 100644 index 00000000..05fc40b8 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_users \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/README.md b/ansible_collections/dellemc/os9/roles/os9_vlan/README.md new file mode 100644 index 00000000..b2ea5ec1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/README.md @@ -0,0 +1,105 @@ +VLAN role +========= + +This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The VLAN role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration +- `os9_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key. +- VLAN ID key should be in format "vlan " (1 to 4094) +- Variables and values are case-sensitive + +**os9_vlan** + +| Key | Type | Notes | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``default_vlan`` | boolean | Configures the default VLAN feature as diabled if set to true | os9 | + +**VLAN ID keys** + +| Key | Type | Notes | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``name`` | string | Configures the name of the VLAN | os9 | +| ``description`` | string | Configures a single line description for the VLAN | os9 | +| ``tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os9 | +| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os9 | +| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os9 | +| ``untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os9 | +| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os9 | +| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os9 | +| ``state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +## Example playbook + +This example uses the *os9_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the os9_vlan role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_vlan: + default_vlan: true + vlan 100: + name: "Mgmt Network" + description: "Int-vlan" + tagged_members: + - port: fortyGigE 1/30 + state: absent + untagged_members: + - port: fortyGigE 1/14 + state: present + state: present + + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_vlan + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml new file mode 100644 index 00000000..2e62ad6e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_vlan \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml new file mode 100644 index 00000000..93bec0dc --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_vlan \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml new file mode 100644 index 00000000..74ac54cd --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_vlan role facilitates the configuration of VLAN attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml new file mode 100644 index 00000000..d460f275 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating VLAN configuration for os9" + template: + src: os9_vlan.j2 + dest: "{{ build_dir }}/vlan9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning VLAN configuration for os9" + dellemc.os9.os9_config: + src: os9_vlan.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j2 b/ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j2 new file mode 100644 index 00000000..e9da9e5f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j2 @@ -0,0 +1,79 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{########################################## +Purpose: +Configure VLAN Interface commands for os9 Devices +os9_vlan: + default_vlan: true + VLAN 1: + name: "vlan2" + description: "int-vlan" + tagged_members: + - port: fortyGigE 0/32 + state: present + - port: fortyGigE 0/40 + state: absent + untagged_members: + - port: fortyGigE 0/92 + state: absent + - port: fortyGigE 0/44 + state: present + state: present +#########################################} +{% if os9_vlan is defined and os9_vlan %} +{% for key,value in os9_vlan.items() %} + {% if key == "default_vlan" %} + {% if value %} +default-vlan disable + {% else %} +no default-vlan disable + {% endif %} + {% else %} + + {% set vlan_id = key.split(" ") %} + {% set vlan_vars = os9_vlan[key] %} + {% if vlan_vars.state is defined and vlan_vars.state == "absent" %} +no interface Vlan {{ vlan_id[1] }} + {% else %} +interface Vlan {{ vlan_id[1] }} + {% if vlan_vars.name is defined%} + {% if vlan_vars.name %} + name {{ vlan_vars.name }} + {% else %} + no name + {% endif %} + {% endif %} + {% if vlan_vars.description is defined %} + {% if vlan_vars.description %} + description {{ vlan_vars.description }} + {% else %} + no description + {% endif %} + {% endif %} + {% if vlan_vars.untagged_members is defined %} + {% for ports in vlan_vars.untagged_members %} + {% if ports.port is defined and ports.port %} + {% if ports.state is defined and ports.state == "absent" %} + no untagged {{ ports.port }} + {% else %} + untagged {{ ports.port }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% if vlan_vars.tagged_members is defined %} + {% for ports in vlan_vars.tagged_members %} + {% if ports.port is defined and ports.port %} + {% if ports.state is defined and ports.state == "absent" %} + no tagged {{ ports.port }} + {% else %} + tagged {{ ports.port }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} + {% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml new file mode 100644 index 00000000..7f74b3b4 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml @@ -0,0 +1,20 @@ +--- +# vars file for dellemc.os9.os9_vlan, +# below gives a example configuration +# Sample variables for OS9 device +os9_vlan: + default_vlan: true + vlan 100: + name: "Blue Network" + description: "Interface-vlan" + tagged_members: + - port: fortyGigE 1/2 + state: present + - port: fortyGigE 1/11 + state: present + untagged_members: + - port: fortyGigE 1/3 + state: present + - port: fortyGigE 1/10 + state: present + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml new file mode 100644 index 00000000..1dfd42bd --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_vlan \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml new file mode 100644 index 00000000..cd2ceef6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_vlan \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/README.md b/ansible_collections/dellemc/os9/roles/os9_vlt/README.md new file mode 100644 index 00000000..2154fbd8 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/README.md @@ -0,0 +1,132 @@ +VLT role +======== + +This role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The VLT role requires an SSH connection for connectivity to your Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_vlt keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``domain`` | integer (required) | Configures the VLT domain identification number (1 to 1000) | os9 | +| ``backup_destination`` | string | Configures an IPv4 address for the VLT backup link (A.B.C.D format or X:X:X:X::X format) | os9 | +| ``destination_type`` | string | Configures the backup destination based on this destination type (IPv4 or IPv6)| os9 | +| ``backup_destination_vrf`` | string | Configures the virtual routing and forwarding (VRF) instance through which the backup destination IP is reachable (*vrfname* must be present) | os9| +| ``VLTi`` | integer | Configures the peer link port-channel ID for the VLT domain (1 to 4096) | os9 | +| ``peer_routing`` | boolean | Configures VLT peer routing | os9 | +| ``peer_routing_timeout`` | integer | Configures the timeout for peer routing (1 to 65535)| os9 | +| ``multicast_peer_routing_timeout`` | integer | Configures the timeout for multicast peer routing (1 to 1200) | os9 | +| ``priority`` | integer | Configures the primary priority to the corresponding channel ID | os9 | +| ``unit_id`` | integer | Configures the system unit ID for VLT (either 0 or 1) | os9 | +| ``vlt_peers`` | dictionary | Contains objects to configure the VLT peer port-channel (see ``vlt_peers.*``) | os9 | +| ``vlt_peers.`` | dictionary | Configures the VLT peer port-channel (`Po value`) | os9 | +| ``vlt_peers..peer_lag`` | integer | Configures the port-channel ID of the VLT peer lag | os9 | +| ``system_mac`` | string | Configures the system MAC address for VLT | os9 | +| ``delay_restore`` | integer | Configures the delay in bringing up VLT ports after reload or peer-link restoration (default 90)| os9 | +| ``delay_restore_abort_threshold`` | integer | Configures the wait interval for VLT delay-restore timer to abort (default 60) | os9 | +| ``proxy_gateway`` | dictionary | Contains objects to configure the VLT proxy gateway (see ``proxy_gateway.*``) | os9 | +| ``proxy_gateway.static`` | dictionary | Contains objects to configure the static VLT proxy gateway (see ``static.*``) | os9 | +| ``static.remote_mac`` | list | Configures the remote MAC for static VLT proxy gateway (see ``remote_mac.*``) | os9 | +| ``remote_mac.address`` | string | Configures the remote MAC address for the static VLT proxy gateway | os9 | +| ``remote_mac.exclude_vlan_range`` | string | Configures the exclude VLAN for the static VLT proxy gateway | os9 | +| ``remote_mac.state`` | string: absent,present | Deletes the remote MAC address or exclude VLAN configured on the proxy gateway if set to absent | os9 | +| ``static.proxy_static_state`` | string: absent,present | Deletes the static VLT proxy gateway if set to absent | os9 | +| ``proxy_gateway.lldp`` | dictionary | Contains objects to configure LLDP VLT proxy gateway (see ``lldp.*`` for each item); mutually exclusive with *proxy_gateway.static* | os9 | +| ``lldp.peer_domain_link`` | list | Configures the VLT proxy gateway interface (see ``peer_domain_link.*``) | os9 | +| ``peer_domain_link.port_channel_id`` | integer | Configures the port-channel for the VLT proxy gateway | os9 | +| ``peer_domain_link.exclude_vlan_range`` | string | Configures to exclude VLAN for LLDP VLT proxy gateway | os9 | +| ``peer_domain_link.state`` | string: absent,present | Deletes the port-channel or exclude VLAN configured on the proxy gateway if set to absent | os9 | +| ``lldp.proxy_lldp_state`` | string: absent,present | Deletes the LLDP VLT proxy gateway if set to absent | os9 | +| ``lldp.vlt_peer_mac`` | boolean | Configures the proxy gateway transmit for square VLT | os9 | +| ``lldp.peer_timeout`` | integer | Configures the proxy gateway restore timer (1 to 65535) | os9 | +| ``state`` | string: absent,present | Deletes the VLT instance if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network OS roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_vlt* role to setup a VLT-domain. It creates a *hosts* file with the switch details and corresponding variables.The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_vlt* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + os9_vlt: + domain: 1 + backup_destination: 192.168.1.1 + destination_type: "ipv4" + priority: 1 + VLTi: 101 + backup_destination_vrf: VLTi-KEEPALIVE + peer_routing: true + peer_routing_timeout: 200 + multicast_peer_routing_timeout: 250 + unit_id: 0 + system_mac: aa:aa:aa:aa:aa:aa + delay_restore: 100 + delay_restore_abort_threshold: 110 + proxy_gateway: + static: + remote_mac: + - address: aa:aa:aa:aa:aa:aa + exclude_vlan_range: 2 + state: present + proxy_static_state: present + vlt_peers: + Po 12: + peer_lag: 13 + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_vlt + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml new file mode 100644 index 00000000..7d2e3ec8 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_vlt \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml new file mode 100644 index 00000000..703bdba9 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_vlt \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml new file mode 100644 index 00000000..bb33e1c1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_vlt role facilitates the configuration of VLT attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml new file mode 100644 index 00000000..34a53245 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating VLT configuration for os9" + template: + src: os9_vlt.j2 + dest: "{{ build_dir }}/vlt9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning VLT configuration for os9" + dellemc.os9.os9_config: + src: os9_vlt.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2 b/ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2 new file mode 100644 index 00000000..6dd6303e --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2 @@ -0,0 +1,217 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ +Purpose: +Configure VLT commands for os9 Devices. +os9_vlt: + domain: 1 + backup_destination: 192.168.1.1 + destination_type: ipv4 + priority: 1 + VLTi: 101 + backup_destination_vrf: VLTi-KEEPALIVE + unit_id: 0 + peer_routing: True + peer_routing_timeout: 200 + multicast_peer_routing_timeout: 300 + vlt_peers: + Po 12: + peer_lag: 13 + system_mac: aa:aa:aa:aa:aa:aa + delay_restore: 100 + delay_restore_abort_threshold: 110 + proxy_gateway: + static: + remote_mac: + - address: aa:aa:aa:aa:aa:aa + exclude_vlan_range: 2 + state: present + proxy_static_state: present + lldp: + vlt_peer_mac: true + peer_timeout: 20 + peer_domain_link: + - port_channel_id: 10 + exclude_vlan_range: 3 + state: present + proxy_lldp_state: present + + state: present +################################} +{% if os9_vlt is defined and os9_vlt %} + {% if os9_vlt.vlt_peers is defined and os9_vlt.vlt_peers %} + {% for key in os9_vlt.vlt_peers.keys() %} + {% set channel_id = key.split(" ") %} + {% set peer_vars = os9_vlt.vlt_peers[key] %} +interface Port-channel {{ channel_id[1] }} + {% if peer_vars.peer_lag is defined %} + {% if peer_vars.peer_lag %} + vlt-peer-lag port-channel {{ peer_vars.peer_lag}} + {% else %} + no vlt-peer-lag + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if os9_vlt.domain is defined and os9_vlt.domain %} + {% if os9_vlt.state is defined and os9_vlt.state == 'absent' %} +no vlt domain {{ os9_vlt.domain }} + {% else %} +vlt domain {{ os9_vlt.domain }} + {% if os9_vlt.backup_destination is defined %} + {% if os9_vlt.backup_destination %} + {% if os9_vlt.destination_type is defined %} + {% if os9_vlt.destination_type == 'ipv6' %} + back-up destination ipv6 {{ os9_vlt.backup_destination }} + {% elif os9_vlt.destination_type == 'ipv4' %} + {% if os9_vlt.backup_destination_vrf is defined and os9_vlt.backup_destination_vrf %} + back-up destination {{ os9_vlt.backup_destination }} vrf {{ os9_vlt.backup_destination_vrf }} + {% else %} + back-up destination {{ os9_vlt.backup_destination }} + {% endif %} + {% endif %} + {% endif %} + {% else %} + no back-up destination + {% endif %} + {% endif %} + {% if os9_vlt.VLTi is defined %} + {% if os9_vlt.VLTi %} + peer-link port-channel {{ os9_vlt.VLTi }} + {% else %} + no peer-link + {% endif %} + {% endif %} + {% if os9_vlt.priority is defined %} + {% if os9_vlt.priority %} + primary-priority {{ os9_vlt.priority }} + {% else %} + no primary-priority + {% endif %} + {% endif %} + {% if os9_vlt.unit_id is defined %} + {% if os9_vlt.unit_id >= 0 %} + unit-id {{ os9_vlt.unit_id }} + {% else %} + no unit-id + {% endif %} + {% endif %} + {% if os9_vlt.peer_routing is defined %} + {% if os9_vlt.peer_routing == True %} + peer-routing + {% else %} + no peer-routing + {% endif %} + {% endif %} + {% if os9_vlt.peer_routing_timeout is defined %} + {% if os9_vlt.peer_routing_timeout %} + peer-routing-timeout {{ os9_vlt.peer_routing_timeout }} + {% else %} + no peer-routing-timeout + {% endif %} + {% endif %} + {% if os9_vlt.multicast_peer_routing_timeout is defined %} + {% if os9_vlt.multicast_peer_routing_timeout %} + multicast peer-routing timeout {{ os9_vlt.multicast_peer_routing_timeout }} + {% else %} + no multicast peer-routing timeout + {% endif %} + {% endif %} + {% if os9_vlt.system_mac is defined and os9_vlt.system_mac %} + system-mac mac-address {{ os9_vlt.system_mac }} + {% else %} + no system-mac + {% endif %} + {% if os9_vlt.delay_restore is defined %} + {% if os9_vlt.delay_restore %} + delay-restore {{ os9_vlt.delay_restore }} + {% else %} + no delay-restore + {% endif %} + {% endif %} + {% if os9_vlt.delay_restore_abort_threshold is defined %} + {% if os9_vlt.delay_restore_abort_threshold %} + delay-restore abort-threshold {{ os9_vlt.delay_restore_abort_threshold }} + {% else %} + no delay-restore abort-threshold + {% endif %} + {% endif %} + + {% if os9_vlt.proxy_gateway is defined and os9_vlt.proxy_gateway %} + {% for key in os9_vlt.proxy_gateway.keys() %} + {% if key == "static" %} + {% set static_vars = os9_vlt.proxy_gateway[key] %} + {% if static_vars.proxy_static_state is defined and static_vars.proxy_static_state =="absent" %} + no proxy-gateway static + {% else %} + proxy-gateway static + {% if static_vars.remote_mac is defined and static_vars.remote_mac %} + {% for mac in static_vars.remote_mac %} + {% if mac.state is defined and mac.state =="absent" %} + {% if mac.address is defined and mac.address %} + {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %} + no remote-mac-address {{ mac.address }} exclude-vlan {{ mac.exclude_vlan_range }} + {% else %} + no remote-mac-address {{ mac.address }} + {% endif %} + {% endif %} + {% else %} + {% if mac.address is defined and mac.address %} + {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %} + remote-mac-address {{ mac.address }} exclude-vlan {{ mac.exclude_vlan_range }} + {% else %} + remote-mac-address {{ mac.address }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% elif key == "lldp" %} + {% set lldp_vars = os9_vlt.proxy_gateway[key] %} + {% if lldp_vars.proxy_lldp_state is defined and lldp_vars.proxy_lldp_state =="absent" %} + no proxy-gateway lldp + {% else %} + proxy-gateway lldp + {% if lldp_vars.peer_domain_link is defined and lldp_vars.peer_domain_link %} + {% for mac in lldp_vars.peer_domain_link %} + {% if mac.state is defined and mac.state =="absent" %} + {% if mac.port_channel_id is defined and mac.port_channel_id %} + {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %} + no peer-domain-link port-channel {{ mac.port_channel_id }} exclude-vlan {{ mac.exclude_vlan_range }} + {% else %} + no peer-domain-link port-channel {{ mac.port_channel_id }} + {% endif %} + {% endif %} + {% else %} + {% if mac.port_channel_id is defined and mac.port_channel_id %} + {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %} + peer-domain-link port-channel {{ mac.port_channel_id }} exclude-vlan {{ mac.exclude_vlan_range }} + {% else %} + peer-domain-link port-channel {{ mac.port_channel_id }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% if lldp_vars.vlt_peer_mac is defined %} + {% if lldp_vars.vlt_peer_mac %} + vlt-peer-mac transmit + {% else %} + no vlt-peer-mac transmit + {% endif %} + {% endif %} + {% if lldp_vars.peer_timeout is defined %} + {% if lldp_vars.peer_timeout %} + peer-timeout {{ lldp_vars.peer_timeout }} + {% else %} + no peer-timeout 2 + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + + {% endif %} + {% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml new file mode 100644 index 00000000..999d3b5c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml @@ -0,0 +1,39 @@ +--- +# vars file for dellemc.os9.os9_vlt, +# below gives a example configuration +# Sample variables for OS9 device +os9_vlt: + domain: 3 + backup_destination: 1.1.1.1 + destination_type: ipv4 + backup_destination_vrf: test + priority: 1 + VLTi: 100 + peer_routing: True + peer_routing_timeout: 200 + multicast_peer_routing_timeout: 250 + system_mac: aa:aa:aa:aa:aa:aa + delay_restore: 100 + delay_restore_abort_threshold: 110 + proxy_gateway: + static: + remote_mac: + - address: aa:aa:aa:aa:aa:aa + exclude_vlan_range: 2 + state: present + proxy_static_state: present + lldp: + vlt_peer_mac: true + peer_timeout: 20 + peer_domain_link: + - port_channel_id: 10 + exclude_vlan_range: 3 + state: present + proxy_lldp_state: present + vlt_peers: + Po 12: + peer_lag: 13 + Po 10: + peer_lag: 14 + unit_id: 1 + state: present \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml new file mode 100644 index 00000000..c5a1dcf0 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_vlt \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml new file mode 100644 index 00000000..11531674 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_vlt \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/README.md b/ansible_collections/dellemc/os9/roles/os9_vrf/README.md new file mode 100644 index 00000000..22792b6f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/README.md @@ -0,0 +1,125 @@ +VRF role +======== + +This role facilitates to configure the basics of virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The vrf role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the variable `ansible_network_os` that can take the `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**os9_vrf keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``vrfdetails`` | list | Configures the list of VRF instances (see ``instances.*``) | os9 | +| ``vrfdetails.vrf_name`` | string | Specifies the VRF instance name (default is management) | os9 | +| ``vrfdetails.vrf_id`` | integer (required) | Configures the VRF ID for the corresponding VRF | os9 | +| ``vrfdetails.description`` | string | Configures a one line description for the VRF | os9 | +| ``vrfdetails.state`` | string | Deletes the VRF instance name if set to absent | os9 | +| ``vrfdetails.tagged_portname`` | list | Specifies list of valid interface names | os9 | +| ``tagged_portname.port`` | string | Specifies valid interface name | os9 | +| ``tagged_portname.state`` | string | Deletes VRF association in the interface if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Dependencies +------------ + +The *os9_vrf* role is built on modules included in the core Ansible code. These modules were added in Ansible version 2.2.0 + +Example playbook +---------------- + +This example uses the *os9_vrf* role to setup a VRF and associate it to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that references the *os9_vrf* role. + +*upd_src_ip_loopback_id* has an dependency with association of the interface in a VRF, and the *os9_vrf* role needs to be invoked twice with different input dictionary one for the create and one for *upd_src_ip_loopback_id*. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1 for os9 device + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_vrf: + vrfdetails: + - vrf_name: "os9vrf" + state: "present" + ip_route_import: + community_value: "10:20" + state: "present" + ip_route_export: + community_value: "30:40" + state: "present" + ipv6_route_import: + community_value: "40:50" + state: "absent" + ipv6_route_export: + community_value: "60:70" + state: "absent" + map_ip_interface: + - intf_id : "loopback11" + state : "present" + + os9_vrf_upd_src_loopback: + vrfdetails: + - vrf_name: "os9vrf" + state: "present" + upd_src_ip_loopback_id: 11 + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_vrf + +**Simple playbook to setup os9 with upd_src_ip_loopback_id — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_vrf + - hosts: leaf1 + vars: + os9_vrf: "{{ os9_vrf_upd_src_loopback }}" + roles: + - dellemc.os9.os9_vrf + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml new file mode 100644 index 00000000..5f46d646 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_vrf \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml new file mode 100644 index 00000000..accc50fb --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_vrf \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml new file mode 100644 index 00000000..f70f98b6 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_vrf role facilitates the configuration of VRF attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - emc + - dellemc + - os9 \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml new file mode 100644 index 00000000..8f459212 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating VRF configuration for os9" + template: + src: os9_vrf.j2 + dest: "{{ build_dir }}/vrf9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning VRF configuration for os9" + dellemc.os9.os9_config: + src: os9_vrf.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2 b/ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2 new file mode 100644 index 00000000..20690130 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2 @@ -0,0 +1,68 @@ +#jinja2: trim_blocks: True,lstrip_blocks: True +{################################ +Purpose: +Configure VRF on os9 Devices. +os9_vrf: + vrfdetails: + - vrf_id: 1 + vrf_name: VLTi-KEEPALIVE + description: VRF-to-support-Peer-Keepalive-Link + state: present + tagged_portname: + - port: fortyGige 1/2 + state: present + - port: fortyGige 1/3 + state: absent +################################} +{% if (os9_vrf is defined and os9_vrf) %} +{% if os9_vrf.vrfdetails is defined %} + {% for vrf in os9_vrf.vrfdetails %} + {% if vrf.vrf_name is defined %} + {% if vrf.vrf_name %} + {% if vrf.state is defined and vrf.state == 'absent' %} + {% if vrf.tagged_portname is defined and vrf.tagged_portname %} + {% for tag in vrf.tagged_portname %} + {% if tag.state is defined and tag.state == 'absent' %} + {% if tag.port is defined and tag.port %} +interface {{ tag.port }} + no ip vrf forwarding + exit + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +no ip vrf {{ vrf.vrf_name }} + {% else %} + {% if vrf.vrf_id is defined %} + {% if vrf.vrf_id %} +feature vrf +ip vrf {{ vrf.vrf_name }} {{ vrf.vrf_id }} + {% if vrf.description is defined %} + {% if vrf.description %} + description {{ vrf.description }} + {% else %} + no description sample + {% endif %} + {% endif %} + {% if vrf.tagged_portname is defined %} + {% if vrf.tagged_portname %} + {% for tag in vrf.tagged_portname %} + {% if tag.port is defined and tag.port %} +interface {{ tag.port }} + {% if tag.state is defined and tag.state == 'absent' %} + no ip vrf forwarding + {% else %} + ip vrf forwarding {{ vrf.vrf_name }} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml new file mode 100644 index 00000000..865dcc3c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml @@ -0,0 +1,15 @@ +--- +# vars file for dellemc.os9.os9_vrf, +# below gives a sample configuration +# Sample variables for OS9 device +os9_vrf: + vrfdetails: + - vrf_id: 23 + vrf_name: VRFi-KEEPALIVE + description: test + state: absent + tagged_portname: + - port: fortyGigE 1/7 + state: absent + - port: fortyGigE 1/8 + state: absent \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml new file mode 100644 index 00000000..286efc50 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_vrf \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml new file mode 100644 index 00000000..0d4921a9 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_vrf \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/README.md b/ansible_collections/dellemc/os9/roles/os9_vrrp/README.md new file mode 100644 index 00000000..79447158 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/README.md @@ -0,0 +1,148 @@ +VRRP role +========= + +This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The VRRP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value for any variable negates the corresponding configuration +- `os9_vrrp` (dictionary) holds a dictionary with the interface name key +- Interface name can correspond to any of the valid os9 interface with a unique interface identifier name +- Physical interfaces names must be in * * format (for example *fortyGigE 1/1*) +- Logical interface names must be in * * format (for example, *vlan 1* for os9) +- Variables and values are case-sensitive + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|-----------------------| +| ``vrrp`` | dictionary | Configures VRRP commands (see ``vrrp.*``) | os9 | +| ``delay_min`` | integer | Configures the minimum delay timer applied after interface up event (0 to 900 | os9 | +| ``delay_reload`` | integer | Configures the minimum delay timer applied after boot (0 to 900) | os9 | +| ``vrrp_group`` | list | Configures VRRP group commands (see ``vrrp_group.*``) | os9 | +| ``vrrp_group.type`` | string: ipv6,ipv4 | Specifies the type of the VRRP group | os9 | +| ``vrrp_group.group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os9 | +| ``vrrp_group.description`` | string | Configures a single line description for the VRRP group | os9 | +| ``vrrp_group.virtual_address`` | string | Configures a virtual-address to the VRRP group (A.B.C.D format) | os9 | +| ``vrrp_group.enable`` | boolean: true,false | Enables/disables the VRRP group at the interface | os9 | +| ``vrrp_group.preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os9 | +| ``vrrp_group.priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100) | os9 | +| ``vrrp_group.version`` | string: 2\*,3,both | Configures the VRRP version of the VRRP group; not supported when *vrrp_group.type* is "ipv6" | os9 | +| ``vrrp_group.hold_time_centisecs`` | integer | Configures the hold-time for the VRRP group in centiseconds (0 to 65525 and in multiple of 25; default 100); centisecs gets converted into seconds in version 2 | os9 | +| ``vrrp_group.adv_interval_centisecs`` | integer | Configures the advertisement interval for the VRRP group in centiseconds (25 to 4075; default 100) and in multiple of 25; centisecs gets converted into seconds in version 2 | os9 | +| ``vrrp_group.track_interface`` | list | Configures the track interface of the VRRP group (see ``track.*``) | os9 | +| ``track_interface.resource_id`` | integer | Configures the object tracking resource ID of the VRRP group; mutually exclusive with *track.interface* | os9 | +| ``track_interface.interface`` | string | Configures the track interface of the VRRP group ( format) | os9 | +| ``track_interface.priority_cost`` | integer | Configures the priority cost for track interface of the VRRP group (1 to 254; default 10) | os9 | +| ``track_interface.state`` | string: present\*,absent | Deletes the specific track interface from the VRRP group if set to absent | os9 | +| ``vrrp_group.track_interface_state`` | string: present*,absent | Deletes all track interfaces from the VRRP group if set to absent | os9 | +| ``vrrp_group.authentication`` | dictionary | Configures the authentication type for the VRRP group (see ``authentication.*``); not supported when ``vrrp_group.type`` is "ipv6" | os9 | +| ``authentication.key`` | string (required): 0,7,LINE | Configures the authentication key for the VRRP group | os9 | +| ``authentication.key_string`` | string | Configures the user key string; if key is 7, this variable takes the hidden user key string; if key is 0, this variable takes the unencrypted user key (clear-text); supported only if the value of *authentication.key* is 7 or 0 | os9 | +| ``authentication.state`` | string: present\*,absent | Deletes authentication from the interface VRRP group if set to absent | os9 | +| ``vrrp_group.state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent | os9 | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-----------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_vrrp* role. + +**Sample hosts file** + + leaf1 ansible_host= + +**Sample host_vars/leaf1** + + hostname: leaf1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + os9_vrrp: + fortyGigE 1/5: + vrrp: + delay_min: 2 + delay_reload: 3 + vrrp_group: + - group_id: 2 + type: ipv6 + description: "Interface-vrrp-ipv6" + virtual_address: 2001:4898:5808:ffa3::9 + enable: true + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + - interface: port-channel 120 + priority_cost: 20 + - interface: fortyGigE 1/11 + state: present + track_interface_state: present + adv_interval_centisecs: 200 + hold_time_centisecs: 20 + - group_id: 4 + state: present + description: "Interface-vrrp4" + virtual_address: 10.28.0.2 + enable: true + priority: 120 + preempt: false + version: both + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + - interface: port-channel 120 + priority_cost: 20 + - interface: fortGigE 1/10 + state: present + track_interface_state: present + adv_interval_centisecs: 225 + hold_time_centisecs: 25 + authentication: + key: 0 + key_string: vrrpkey + state: present + +**Simple playbook to setup system — leaf.yaml** + + - hosts: leaf1 + roles: + - dellemc.os9.os9_vrrp + +**Run** + + ansible-playbook -i hosts leaf.yaml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml new file mode 100644 index 00000000..2d4f5317 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_vrrp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml new file mode 100644 index 00000000..78760008 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_vrrp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml new file mode 100644 index 00000000..db226e2c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml @@ -0,0 +1,19 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: > + The os9_vrrp role facilitates the configuration of Virtual Router Redundancy Protocol (VRRP) attributes in + devices running Dell EMC Networking Operating Systems. + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - dellemc + - emc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml new file mode 100644 index 00000000..b8d3bed1 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating VRRP configuration for os9" + template: + src: os9_vrrp.j2 + dest: "{{ build_dir }}/vrrp9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning VRRP configuration for os9" + dellemc.os9.os9_config: + src: os9_vrrp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2 b/ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2 new file mode 100644 index 00000000..f3e4a1df --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2 @@ -0,0 +1,218 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{########################################## +Purpose: +Configure VRRP commands for os9 Devices +os9_vrrp: + fortyGigE 1/4: + vrrp: + delay_min: 2 + delay_reload: 2 + vrrp_group: + - group_id: 2 + type: ipv6 + description: "Interface-vrrp-ipv6" + virtual_address: 2001:4898:5808:ffa3::9 + enable: true + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + - interface: port-channel 120 + priority_cost: 20 + - interface: fortyGigE 1/10 + state: present + track_interface_state: present + adv_interval_centisecs: 200 + hold_time_centisecs: 20 + state: present + + - group_id: 4 + state: present + description: "Interface-vrrp4" + virtual_address: 10.2.0.1 + enable: true + priority: 120 + preempt: false + version: 2 + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + - interface: port-channel 120 + priority_cost: 20 + - interface: fortyGigE 1/12 + state: present + track_interface_state: present + adv_interval_centisecs: 200 + hold_time_centisecs: 20 + authentication: + key: 0 + key_string: vrrpkey + state: present +#########################################} +{% if os9_vrrp is defined and os9_vrrp %} +{% for key,value in os9_vrrp.items() %} +interface {{ key }} + {% if value %} + {% if value.vrrp is defined and value.vrrp %} + {% if value.vrrp.delay_min is defined %} + {% if value.vrrp.delay_min >=0 %} + vrrp delay minimum {{ value.vrrp.delay_min }} + {% else %} + no vrrp delay minimum + {% endif %} + {% endif %} + {% if value.vrrp.delay_reload is defined %} + {% if value.vrrp.delay_reload >=0 %} + vrrp delay reload {{ value.vrrp.delay_reload }} + {% else %} + vrrp delay reload {{ value.vrrp.delay_reload }} + {% endif %} + {% endif %} + {% endif %} + {% for group in value.vrrp_group %} + {% if group.group_id is defined and group.group_id %} + {% if group.state is defined and group.state == "absent" %} + {% if group.type is defined and group.type == "ipv6" %} + no vrrp-ipv6-group {{ group.group_id }} + {% else %} + no vrrp-group {{ group.group_id }} + {% endif %} + {% else %} + {% if group.type is defined and group.type == "ipv6" %} + vrrp-ipv6-group {{ group.group_id }} + {% else %} + vrrp-group {{ group.group_id }} + {% endif %} + {% if group.type is not defined or not group.type == "ipv6" %} + {% if group.version is defined %} + {% if group.version %} + version {{ group.version }} + {% else %} + no version + {% endif %} + {% endif %} + {% endif %} + {% if group.adv_interval_centisecs is defined %} + {% if group.adv_interval_centisecs %} + {% if group.version is not defined or (group.version is defined and group.version == 2) %} + {% set adv_int = group.adv_interval_centisecs/100 %} + {% if group.type is defined and group.type == "ipv6" %} + advertise-interval centisecs {{ group.adv_interval_centisecs }} + {% else %} + advertise-interval {{ adv_int|int }} + {% endif %} + {% else %} + advertise-interval centisecs {{ group.adv_interval_centisecs }} + {% endif %} + {% else %} + no advertise-interval + {% endif %} + {% endif %} + {% if group.hold_time_centisecs is defined %} + {% if group.hold_time_centisecs >= 0 %} + {% if group.version is not defined or (group.version is defined and group.version == 2) %} + {% set hold_time = group.hold_time_centisecs/100 %} + {% if group.type is defined and group.type == "ipv6" %} + hold-time centisecs {{ group.hold_time_centisecs }} + {% else %} + hold-time {{ hold_time|int }} + {% endif %} + {% else %} + hold-time centisecs {{ group.hold_time_centisecs }} + {% endif %} + {% else %} + no hold-time + {% endif %} + {% endif %} + {% if group.track_interface_state is defined and group.track_interface_state == "absent" %} + no track + {% else %} + {% if group.track_interface is defined and group.track_interface %} + {% for track_item in group.track_interface %} + {% if track_item.state is defined and track_item.state == "absent" %} + {% if track_item.resource_id is defined and track_item.resource_id %} + no track {{ track_item.resource_id }} + {% elif track_item.interface is defined and track_item.interface %} + no track {{ track_item.interface }} + {% endif %} + {% else %} + {% if track_item.resource_id is defined and track_item.resource_id %} + {% if track_item.priority_cost is defined and track_item.priority_cost %} + track {{ track_item.resource_id }} priority-cost {{ track_item.priority_cost }} + {% else %} + track {{ track_item.resource_id }} + {% endif %} + {% elif track_item.interface is defined and track_item.interface %} + {% if track_item.priority_cost is defined and track_item.priority_cost %} + track {{ track_item.interface }} priority-cost {{ track_item.priority_cost }} + {% else %} + track {{ track_item.interface }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% if group.type is not defined or not group.type == "ipv6" %} + {% if group.authentication is defined and group.authentication %} + {% if group.authentication.state is defined and group.authentication.state == "absent" %} + no authentication-type + {% else %} + {% if group.authentication.key is defined %} + {% if group.version is not defined or (group.version is defined and group.version == 2) %} + {% if group.authentication.key == 0 or group.authentication.key == 7 %} + {% if group.authentication.key_string is defined and group.authentication.key_string %} + authentication-type simple {{ group.authentication.key }} {{ group.authentication.key_string }} + {% endif %} + {% elif group.authentication.key %} + authentication-type simple {{ group.authentication.key }} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% if group.virtual_address is defined %} + {% if group.virtual_address %} + virtual-address {{ group.virtual_address }} + {% else %} + no virtual-address + {% endif %} + {% endif %} + {% if group.description is defined %} + {% if group.description %} + description {{ group.description }} + {% else %} + no description + {% endif %} + {% endif %} + {% if group.preempt is defined %} + {% if group.preempt %} + preempt + {% else %} + no preempt + {% endif %} + {% endif %} + {% if group.enable is defined %} + {% if group.enable %} + no disable + {% else %} + disable + {% endif %} + {% endif %} + {% if group.priority is defined %} + {% if group.priority %} + priority {{ group.priority }} + {% else %} + no priority + {% endif %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml new file mode 100644 index 00000000..856d381c --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml @@ -0,0 +1,59 @@ +--- +# vars file for dellemc.os9.os9_vrrp, +# below gives a example configuration +# Sample variables for OS9 device +os9_vrrp: + fortyGigE 0/28: + vrrp: + delay_min: 4 + delay_reload: 5 + vrrp_group: + - group_id: 2 + type: ipv6 + description: "Interface-vrrp-ipv6" + virtual_address: 2001:4898:5808:ffa3::9 + enable: true + priority: 120 + preempt: false + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + - interface: port-channel 120 + priority_cost: 20 + - interface: fortyGigE 0/40 + state: present + track_interface_state: present + adv_interval_centisecs: 200 + hold_time_centisecs: 20 + state: present + - group_id: 4 + state: present + description: "Interface-vrrp4" + virtual_address: 10.28.0.2 + enable: true + priority: 120 + preempt: false + version: 3 + track_interface: + - resource_id: 3 + priority_cost: 25 + state: present + - interface: port-channel 120 + priority_cost: 20 + - interface: fortyGigE 0/20 + state: absent + track_interface_state: present + adv_interval_centisecs: 200 + hold_time_centisecs: 200 + authentication: + key: 0 + key_string: vrrpkey + state: present + - group_id: 3 + state: present + description: "Interface-vrrp3" + virtual_address: 10.28.0.3 + enable: true + priority: 120 + preempt: false \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml new file mode 100644 index 00000000..a12c274f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_vrrp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml new file mode 100644 index 00000000..c241486b --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_vrrp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE new file mode 100644 index 00000000..2c9b8e1f --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (c) 2020, Dell Inc. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (c) 2020, Dell Inc. All rights reserved. + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/README.md b/ansible_collections/dellemc/os9/roles/os9_xstp/README.md new file mode 100644 index 00000000..09223b8d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/README.md @@ -0,0 +1,127 @@ +# xSTP role + +This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9. + +The xSTP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables. + +Role variables +-------------- + +- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value +- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file +- `os9_xstp` (dictionary) contains the hostname (dictionary) +- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device +- Any role variable with a corresponding state variable set to absent negates the configuration of that variable +- Setting an empty value to any variable negates the corresponding configuration +- Variables and values are case-sensitive + +**hostname keys** + +| Key | Type | Description | Support | +|------------|---------------------------|---------------------------------------------------------|----------------------| +| ``type`` | string (required) | Configures the type of spanning-tree mode specified including STP, RSTP, PVST, and MSTP | os9 | +| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os9 | +| ``stp`` | dictionary | Configures simple spanning-tree protocol (see ``stp.* keys``) | os9 | +| ``stp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os9 | +| ``stp.state`` | string: absent,present\* | Deletes the configured STP if set to absent | os9 | +| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os9 | +| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os9 | +| ``rstp.state ``| string: absent,present\* | Deletes the configured RSTP in os9 devices if set to absent | os9 | +| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os9 | +| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os9 | +| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os9 | +| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os9 | +| ``pvst.state`` | string: absent,present\* | Deletes the configured PVST if set to absent | os9 | +| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os9 | +| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os9 | +| ``mstp_instances.number`` | integer | Configures the multiple spanning-tree instance number | os9 | +| ``mstp_instances.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to the instance number in os9 devices | os9 | +| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os9 | +| ``mstp_instances.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os9 | +| ``mstp.state`` | string: absent,present\* | Deletes the configured MSTP if set to absent | os9 | +| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os9 | +| ``intf ``| dictionary | Configures the interface name (see ``intf..*``) | os9 | +| ``intf..stp_type`` | list: stp,mstp,pvst,rstp | Configures the list of spanning-tree in an interface | os9 | +| ``intf..edge_port`` | boolean: true,false | in os9 devices according to the stp_type EdgePort is configured; | os9 | + +> **NOTE**: Asterisk (_*_) denotes the default value if none is specified. + +Connection variables +-------------------- + +Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself. + +| Key | Required | Choices | Description | +|-------------|----------|------------|-------------------------------------------------------| +| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport | +| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 | +| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used | +| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device | +| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode | +| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used | +| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable | +| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device | + +> **NOTE**: Asterisk (\*) denotes the default value if none is specified. + +Example playbook +---------------- + +This example uses the *os9_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name. + +It writes a simple playbook that only references the *os9_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP. When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in build_dir path. By default, this variable is set to false. The example writes a simple playbook that only references the *os9_xstp* role. + +**Sample hosts file** + + spine1 ansible_host= + +**Sample host_vars/spine1** + + hostname: spine1 + ansible_become: yes + ansible_become_method: xxxxx + ansible_become_pass: xxxxx + ansible_ssh_user: xxxxx + ansible_ssh_pass: xxxxx + ansible_network_os: dellemc.os9.os9 + build_dir: ../temp/os9 + + +**Sample vars/main.yml** + + os9_xstp: + type: rstp + enable: true + stp: + bridge_priority: 4096 + state: present + rstp: + bridge_priority: 4096 + pvst: + vlan: + - range_or_id: 10 + bridge_priority: 4096 + mstp: + mstp_instances: + - number: 1 + vlans: 10,12 + bridge_priority: 4096 + vlans_state: present + intf: + fortyGigE 1/25: + stp_type: + - stp + - mstp + edge_port: true + +**Simple playbook to setup system — spine.yml** + + - hosts: spine + roles: + - dellemc.os9.os9_xstp + +**Run** + + ansible-playbook -i hosts spine.yml + +(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml new file mode 100644 index 00000000..d49cf4a3 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for dellemc.os9.os9_xstp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml new file mode 100644 index 00000000..818e833d --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for dellemc.os9.os9_xstp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml new file mode 100644 index 00000000..009fccea --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Dell Inc. +--- +galaxy_info: + author: Dell EMC Networking Engineering + description: The os9_xstp role facilitates the configuration of STP attributes in devices running Dell EMC Networking Operating Systems. + company: Dell Inc + license: GPLv3 + min_ansible_version: 2.9.6 + + platforms: + - name: os9 + + galaxy_tags: + - networking + - dell + - dellemc + - emc + - os9 diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml new file mode 100644 index 00000000..c98c538a --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for os9 + - name: "Generating xSTP configuration for os9" + template: + src: os9_xstp.j2 + dest: "{{ build_dir }}/xstp9_{{ hostname }}.conf.part" + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool) +# notify: save config os9 + register: generate_output + + - name: "Provisioning xSTP configuration for os9" + dellemc.os9.os9_config: + src: os9_xstp.j2 + when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") +# notify: save config os9 + register: output diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2 b/ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2 new file mode 100644 index 00000000..b21ee592 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2 @@ -0,0 +1,160 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{############################################# +PURPOSE: Configure xSTP commands for os9 Devices +os9_xstp: + type: stp + enable: true + stp: + bridge_priority: 4096 + state: present + + rstp: + bridge_priority: 4096 + state: present + + pvst: + vlan: + - range_or_id: 10 + bridge_priority: 4096 + state: present + + mstp: + mstp_instances: + - number: 1 + vlans: 10,12 + vlans_state: present + bridge_priority: 4096 + state: present + intf: + fortyGigE 1/1: + stp_type: + - rstp + - mstp + edge_port: true +############################################} +{% if os9_xstp is defined and os9_xstp %} +{% set xstp_vars = os9_xstp %} +{% if xstp_vars.type is defined and xstp_vars.type %} + {% if xstp_vars.type == "stp" %} +protocol spanning-tree 0 + {% else %} +protocol spanning-tree {{ xstp_vars.type }} + {% endif %} + {% if xstp_vars.enable is defined %} + {% if xstp_vars.enable %} + no disable + {% else %} + disable + {% endif %} + {% endif %} +{% endif %} + +{% if xstp_vars.stp is defined and xstp_vars.stp %} + {% set val = xstp_vars.stp %} + {% if val.state is defined and val.state == "absent" %} +no protocol spanning-tree 0 + {% else %} + {% if val.bridge_priority is defined %} +protocol spanning-tree 0 + {% if val.bridge_priority == 0 or val.bridge_priority %} + bridge-priority {{ val.bridge_priority }} + {% else %} + no bridge-priority + {% endif %} + {% endif %} + {% endif %} +{% endif %} + +{% if xstp_vars.rstp is defined and xstp_vars.rstp %} + {% set val = xstp_vars.rstp %} + {% if val.state is defined and val.state == "absent" %} +no protocol spanning-tree rstp + {% else %} + {% if val.bridge_priority is defined %} +protocol spanning-tree rstp + {% if val.bridge_priority == 0 or val.bridge_priority %} + bridge-priority {{ val.bridge_priority }} + {% else %} + no bridge-priority + {% endif %} + {% endif %} + {% endif %} +{% endif %} + +{% if xstp_vars.pvst is defined and xstp_vars.pvst %} + {% set val = xstp_vars.pvst %} + {% if val.state is defined and val.state == "absent" %} +no protocol spanning-tree pvst + {% else %} + {% if val.vlan is defined and val.vlan %} +protocol spanning-tree pvst + {% for vlan in val.vlan %} + {% if vlan.range_or_id is defined and vlan.range_or_id %} + {% if vlan.bridge_priority is defined %} + {% if vlan.bridge_priority == 0 or vlan.bridge_priority %} + vlan {{ vlan.range_or_id }} bridge-priority {{ vlan.bridge_priority }} + {% else %} + no vlan {{ vlan.range_or_id }} bridge-priority + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endif %} + +{% if xstp_vars.mstp is defined and xstp_vars.mstp %} + {% set val = xstp_vars.mstp %} + {% if val.state is defined and val.state == "absent" %} +no protocol spanning-tree mstp + {% else %} + {% if val.mstp_instances is defined and val.mstp_instances %} +protocol spanning-tree mstp + {% for instance in val.mstp_instances %} + {% if instance.number is defined and instance.number %} + {% if instance.bridge_priority is defined %} + {% if instance.bridge_priority == 0 or instance.bridge_priority %} + MSTI {{ instance.number }} bridge-priority {{ instance.bridge_priority }} + {% else %} + no MSTI {{ instance.number }} bridge-priority + {% endif %} + {% endif %} + {% if instance.vlans is defined and instance.vlans %} + {% if instance.vlans_state is defined and instance.vlans_state == "absent" %} + no MSTI {{ instance.number }} VLAN {{ instance.vlans }} + {% else %} + MSTI {{ instance.number }} VLAN {{ instance.vlans }} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +{% endif %} + +{% if xstp_vars.intf is defined and xstp_vars.intf %} + {% for intr in xstp_vars.intf.keys() %} + {% set intf_vars = xstp_vars.intf[intr] %} +interface {{ intr }} + {% for type in intf_vars.stp_type %} + {% if type == "stp" %} + {% if intf_vars.edge_port is defined %} + {% if not intf_vars.edge_port %} + no spanning-tree 0 portfast + {% else %} + spanning-tree 0 portfast bpduguard + {% endif %} + {% endif %} + {% else %} + {% if intf_vars.edge_port is defined %} + {% if intf_vars.edge_port %} + spanning-tree {{ type }} edge-port + {% else %} + no spanning-tree {{ type }} edge-port + {% endif %} + {% endif %} + {% endif %} + {% endfor %} + {% endfor %} +{% endif %} +{% endif %} \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml new file mode 100644 index 00000000..5fd33c94 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml @@ -0,0 +1,20 @@ +spine1 ansible_host=100.94.210.44 +spine2 ansible_host=10.11.182.26 +leaf1 ansible_host=10.11.182.27 +leaf2 ansible_host=10.11.182.28 +leaf3 ansible_host=10.11.182.29 +leaf4 ansible_host=10.11.182.30 + +[spine] +spine1 +spine2 + +[leaf] +leaf1 +leaf2 +leaf3 +leaf4 + +[datacenter:children] +spine +leaf diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml new file mode 100644 index 00000000..7f30b083 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml @@ -0,0 +1,34 @@ +--- +# vars file for dellemc.os9.os9_xstp, +# below gives a sample configuration +# Sample variables for OS9 device +os9_xstp: + type: rstp + enable: true + stp: + bridge_priority: 4096 + state: present + + rstp: + bridge_priority: 4096 + state: present + + pvst: + vlan: + - range_or_id: 10 + bridge_priority: 4096 + state: present + + mstp: + mstp_instances: + - number: 1 + vlans: 10,12 + bridge_priority: 4096 + vlans_state: present + state: present + intf: + fortyGigE 1/25: + stp_type: + - stp + - mstp + edge_port: true \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml new file mode 100644 index 00000000..77da9671 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml @@ -0,0 +1,5 @@ +--- +- hosts: datacenter + connection: network_cli + roles: + - dellemc.os9.os9_xstp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml new file mode 100644 index 00000000..d2fefb05 --- /dev/null +++ b/ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for dellemc.os9.os9_xstp \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/tests/.gitignore b/ansible_collections/dellemc/os9/tests/.gitignore new file mode 100644 index 00000000..ea1472ec --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/.gitignore @@ -0,0 +1 @@ +output/ diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/__init__.py b/ansible_collections/dellemc/os9/tests/integration/targets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml new file mode 100644 index 00000000..55a93fc2 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml new file mode 100644 index 00000000..7152815d --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml @@ -0,0 +1,14 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ item }}" + with_items: "{{ test_items }}" \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml new file mode 100644 index 00000000..d4898c29 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator new file mode 100644 index 00000000..42a164c8 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator @@ -0,0 +1,20 @@ +--- +- debug: msg="START cli/bad_operator.yaml" + +- name: test bad operator + os9_command: + commands: + - show version + - show interfaces TenGigabitEthernet 0/0 + wait_for: + - "result[0] contains 'Description : blah'" + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- debug: msg="END cli/bad_operator.yaml" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains new file mode 100644 index 00000000..2f56a11f --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains @@ -0,0 +1,20 @@ +--- +- debug: msg="START cli/contains.yaml" + +- name: test contains operator + os9_command: + commands: + - show version + - show interface TenGigabitEthernet 0/0 + wait_for: + - "result[0] contains 2.0" + - "result[1] contains TenGigabitEthernet " + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- debug: msg="END cli/contains.yaml" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid new file mode 100644 index 00000000..cffc24f8 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid @@ -0,0 +1,28 @@ +--- +- debug: msg="START cli/invalid.yaml" + +- name: run invalid command + os9_command: + commands: ['show foo'] + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed" + +- name: run commands that include invalid command + os9_command: + commands: + - show version + - show foo + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed" + +- debug: msg="END cli/invalid.yaml" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output new file mode 100644 index 00000000..1fd53788 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output @@ -0,0 +1,29 @@ +--- +- debug: msg="START cli/output.yaml" + +- name: get output for single command + os9_command: + commands: ['show version'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for multiple commands + os9_command: + commands: + - show version + - show interfaces + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + - "result.stdout | length == 2" + +- debug: msg="END cli/output.yaml" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml new file mode 100644 index 00000000..80d19518 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml @@ -0,0 +1,74 @@ +--- +- debug: msg="START cli/show_commands.yaml" + +- name: test bad operator + os9_command: + commands: + - show version + - show interfaces TenGigabitEthernet 0/0 + wait_for: + - "result[0] contains 'Description : blah'" + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- name: get output for single command + os9_command: + commands: ['show version'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + +- name: get output for multiple commands + os9_command: + commands: + - show version + - show interfaces + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.stdout is defined" + - "result.stdout | length == 2" + +- name: show run command with grep Option + os9_command: + commands: + - show run | grep username + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.stdout | length == 1" + +- name: Execute multiple show commands continously + os9_command: + commands: + - show system + - show file-systems + - show startup-config + - show tech-support + - show logging + - show system brief | grep Management + provider: "{{ cli }}" + retries: 8 + interval: 5 + register: result + +- assert: + that: + - "result.stdout | length == 6" + +- debug: msg="END cli/show_commands.yaml" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout new file mode 100644 index 00000000..60dbb761 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout @@ -0,0 +1,19 @@ +--- +- debug: msg="START cli/timeout.yaml" + +- name: test bad condition + os9_command: + commands: + - show version + wait_for: + - "result[0] contains bad_value_string" + provider: "{{ cli }}" + register: result + ignore_errors: yes + +- assert: + that: + - "result.failed == true" + - "result.msg is defined" + +- debug: msg="END cli/timeout.yaml" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml new file mode 100644 index 00000000..346bdf2d --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml @@ -0,0 +1,13 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ item }}" + with_items: "{{ test_items }}" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml new file mode 100644 index 00000000..415c99d8 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml new file mode 100644 index 00000000..d737a490 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml @@ -0,0 +1,134 @@ +--- +- debug: msg="START cli/config command execution" + +- name: COnfigure managemnet protocol telnet + os9_config: + lines: ['hostname {{ inventory_hostname }}', 'ip telnet server enable'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + +- name: Create new username and set password + os9_config: + lines: ['username test password test123'] + provider: "{{ cli }}" + register: result + +- name: Update the new user test privilige using replace line Option + os9_config: + lines: + - username test password test123 privilege 15 + provider: "{{ cli }}" + replace: line + save: no + register: result + +- name: Validate the newly created username using show run command use the keyword wait_for + os9_command: + commands: + - show running-config | grep username + - show running-config | grep username | grep test + wait_for: + - "result[0] contains test" + provider: "{{ cli }}" + +- name: Configure SNMP v2 credentials on device and enable traps + os9_config: + lines: + - snmp-server community ansibleread ro + - snmp-server community ansiblewrite rw + - snmp-server enable traps + - snmp-server host 10.16.148.142 traps version 2c public udp-port 162 + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + +- name: Validate is newly created snmp v2 is available in running config + os9_command: + commands: + - show running-config | grep snmp-server + wait_for: + - "result[0] contains ansibleread" + - "result[0] contains ansiblewrite" + provider: "{{ cli }}" + register: result + +- name: Configure Clock timezone + os9_config: + lines: "clock timezone UTC 0 0" + provider: "{{ cli }}" + register: result + +- name: Configure Logging to NMS Server + os9_config: + lines: + - logging 10.16.148.142 + #before: + # - no logging 10.16.148.142 + #ignore: yes + provider: "{{ cli }}" + register: result + +- name: Configure Default Gateway + os9_config: + lines: + - management route 0.0.0.0/0 10.16.148.254 + provider: "{{ cli }}" + register: result + + #- assert: + # that: + # - "result.changed == true" + #- "'management route 0.0.0.0/0 10.16.148.254' in result.updates" + +- name: Enable spanning tree protocol using parent , before and after keywords in config module + os9_config: + lines: + - no disable + - hello-time 1 + - max-age 6 + - forward-delay 4 + - bridge-priority 0 + parents: ['protocol spanning-tree rstp'] + before: ['no protocol spanning-tree rstp'] + after: ['exit'] + provider: "{{ cli }}" + register: result + +- name: save the running config into startup config using save keyword in os9 config module + os9_config: + save: yes + provider: "{{ cli }}" + register: result + +- name: Validate the newly added commands are available in startup-config + os9_command: + commands: + - show startup-config + provider: "{{ cli }}" + register: result + +- name: COnfigure new vlan using src file given as input and backup the configuration + os9_config: + src: vlan_config.txt + provider: "{{ cli }}" + update: merge + backup: yes + register: result + + +- name: Validate the check Option for update in Dell os9 config using the config file provided with config option + os9_config: + src: vlan_config.txt + provider: "{{ cli }}" + update: check + config: Aggregation1_config.2016-09-06@15:26:02 + register: result + +- debug: msg="END cli/configcommands" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml new file mode 100644 index 00000000..65df0afa --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml @@ -0,0 +1,37 @@ +--- +- debug: msg="START cli/toplevel.yaml" + +- name: setup + os9_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + match: none + +- name: configure top level command + os9_config: + lines: ['hostname foo'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == true" + - "'hostname foo' in result.updates" + +- name: configure top level command idempotent check + os9_config: + lines: ['hostname foo'] + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + +- name: teardown + os9_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + match: none + +- debug: msg="END cli/toplevel.yaml" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt new file mode 100644 index 00000000..89405283 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt @@ -0,0 +1,9 @@ +interface Vlan 1000 + description "vlan added from ansible" + name Testansible-1000 + ip unreachables + ip helper-address 100.1.1.1 + ip udp-helper udp-port 1000 + no shutdown +~ + diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml new file mode 100644 index 00000000..5f709c5a --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml new file mode 100644 index 00000000..346bdf2d --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml @@ -0,0 +1,13 @@ +--- +- name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + +- name: run test case + include: "{{ item }}" + with_items: "{{ test_items }}" diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml new file mode 100644 index 00000000..415c99d8 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml @@ -0,0 +1,2 @@ +--- +- { include: cli.yaml, tags: ['cli'] } diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml new file mode 100644 index 00000000..9315f344 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml @@ -0,0 +1,55 @@ +--- +- debug: msg="START cli/testcases_facts.yaml" + +- name: Get all the interfaces facts + os9_facts: + gather_subset: + - interfaces + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts is defined" + - "result.ansible_facts.ansible_net_all_ipv4_addresses is defined" + - "result.ansible_facts.ansible_net_interfaces is defined" + - "result.ansible_facts.ansible_net_neighbors is defined" + +- name: Get all the facts Excpet Interfaces using ! Operator and validate + os9_facts: + gather_subset: + - "!interfaces" + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts is defined" + - "result.ansible_facts.ansible_net_all_ipv4_addresses is not defined" + - "result.ansible_facts.ansible_net_interfaces is not defined" + - "result.ansible_facts.ansible_net_neighbors is not defined" + - "result.ansible_facts.ansible_net_config is defined" + - "result.ansible_facts.ansible_net_filesystems is defined" + +- name: Test with multiple subsets provided + os9_facts: + gather_subset: + - config + - hardware + provider: "{{ cli }}" + register: result + +- assert: + that: + - "result.changed == false" + - "result.ansible_facts is defined" + - "result.ansible_facts.ansible_net_filesystems is defined" + - "result.ansible_facts.ansible_net_memtotal_mb is defined" + - "result.ansible_facts.ansible_net_memfree_mb is defined" + - "result.ansible_facts.ansible_net_config is defined" + + + +- debug: msg="START cli/testcases_facts.yaml" diff --git a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..57ab8ae6 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt @@ -0,0 +1,4 @@ +plugins/action/os9.py action-plugin-docs +plugins/modules/os9_config.py validate-modules:parameter-list-no-elements +plugins/modules/os9_facts.py validate-modules:parameter-list-no-elements +plugins/modules/os9_command.py validate-modules:parameter-list-no-elements \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..57ab8ae6 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt @@ -0,0 +1,4 @@ +plugins/action/os9.py action-plugin-docs +plugins/modules/os9_config.py validate-modules:parameter-list-no-elements +plugins/modules/os9_facts.py validate-modules:parameter-list-no-elements +plugins/modules/os9_command.py validate-modules:parameter-list-no-elements \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..711efc62 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt @@ -0,0 +1 @@ +plugins/action/os9.py action-plugin-docs \ No newline at end of file diff --git a/ansible_collections/dellemc/os9/tests/sanity/requirements.txt b/ansible_collections/dellemc/os9/tests/sanity/requirements.txt new file mode 100644 index 00000000..3e3a9669 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/sanity/requirements.txt @@ -0,0 +1,4 @@ +packaging # needed for update-bundled and changelog +sphinx ; python_version >= '3.5' # docs build requires python 3+ +sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+ +straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+ diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/__init__.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg new file mode 100644 index 00000000..b8f62da5 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg @@ -0,0 +1,13 @@ +! +hostname router +! +interface fortyGigE 1/6 + ip address 1.2.3.4/24 + description test string +! +interface fortyGigE 1/7 + ip address 6.7.8.9/24 + description test string + shutdown +! + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg new file mode 100644 index 00000000..7ab33387 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg @@ -0,0 +1,12 @@ +! +hostname foo +! +interface fortyGigE 1/6 + no ip address +! +interface fortyGigE 1/7 + ip address 6.7.8.9/24 + description test string + shutdown +! + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems new file mode 100644 index 00000000..1c02bb6a --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems @@ -0,0 +1,10 @@ + Size(b) Free(b) Feature Type Flags Prefixes + 6429872128 5582319616 FAT32 USERFLASH rw flash: + - - unformatted USERFLASH rw fcmfs: + 241172480 91893760 Unknown NFSMOUNT rw nfsmount: + - - - network rw ftp: + - - - network rw tftp: + - - - network rw scp: + - - - network rw http: + - - - network rw https: + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces new file mode 100644 index 00000000..5f19f38b --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces @@ -0,0 +1,1259 @@ +TenGigabitEthernet 0/0 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1048580 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 10000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:13:21 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:16:47 + + +TenGigabitEthernet 0/1 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1048708 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 10000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:17:48 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:16:49 + + +TenGigabitEthernet 0/2 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1048836 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 10000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:18:30 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:17:31 + + +TenGigabitEthernet 0/3 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1048964 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 10000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:18:33 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:17:35 + + +fortyGigE 0/4 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1049093 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:38:08 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:37:09 + + +fortyGigE 0/8 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1049605 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:38:08 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:40:18 + + +fortyGigE 0/12 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1050117 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:41:18 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:40:20 + + +fortyGigE 0/16 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1050629 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:42:41 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:41:43 + + +fortyGigE 0/20 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1051141 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:43:10 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:42:12 + + +fortyGigE 0/24 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1051653 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:43:45 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:42:47 + + +fortyGigE 0/28 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1052165 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:44:35 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:43:37 + + +fortyGigE 0/32 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1052677 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:44:53 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:43:54 + + +fortyGigE 0/36 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1053189 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:46:20 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:45:21 + + +fortyGigE 0/40 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1053701 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:46:32 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:45:33 + + +fortyGigE 0/44 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1054213 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:46:56 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:45:58 + + +fortyGigE 0/48 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1054725 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:47:10 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:46:11 + + +fortyGigE 0/52 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1055237 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:47:22 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:46:24 + + +fortyGigE 0/56 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1055749 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:47:47 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:46:48 + + +fortyGigE 0/60 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1056261 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:47:58 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:47:00 + + +fortyGigE 0/64 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1056773 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:48:26 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:47:28 + + +fortyGigE 0/68 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1057285 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:48:38 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:47:40 + + +fortyGigE 0/72 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1057797 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:49:05 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:48:07 + + +fortyGigE 0/76 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1058309 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:49:17 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:48:18 + + +fortyGigE 0/80 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1058821 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:49:36 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:48:37 + + +fortyGigE 0/84 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1059333 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:49:58 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:49:00 + + +fortyGigE 0/88 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1059845 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:50:12 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:49:14 + + +fortyGigE 0/92 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1060357 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:50:36 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:49:37 + + +fortyGigE 0/96 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1060869 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:50:50 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:49:52 + + +fortyGigE 0/100 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1061381 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:51:16 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:50:17 + + +fortyGigE 0/104 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1061893 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:51:26 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:50:28 + + +fortyGigE 0/108 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1062405 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:51:50 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:50:52 + + +fortyGigE 0/112 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1062917 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:52:02 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:51:04 + + +fortyGigE 0/116 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1063429 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:52:14 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:51:15 + + +fortyGigE 0/120 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1063941 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:52:44 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:51:45 + + +fortyGigE 0/124 is down, line protocol is down +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 1064453 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 40000 Mbit +Flowcontrol rx off tx off +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:52:55 +Queueing strategy: fifo +Input Statistics: + 0 packets, 0 bytes + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 runts, 0 giants, 0 throttles + 0 CRC, 0 overrun, 0 discarded +Output Statistics: + 0 packets, 0 bytes, 0 underruns + 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts + 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts + 0 Multicasts, 0 Broadcasts, 0 Unicasts + 0 throttles, 0 discarded, 0 collisions, 0 wreddrops +Rate info (interval 299 seconds): + Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate + Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate +Time since last interface status change: 13:51:56 + + +ManagementEthernet 0/0 is up, line protocol is up +Hardware is DellEth, address is 90:b1:1c:f4:a2:8f + Current address is 90:b1:1c:f4:a2:8f +Pluggable media not present +Interface index is 7340033 +Internet address is 10.16.148.71/16 +Mode of IPv4 Address Assignment : MANUAL +DHCP Client-ID(61): 90b11cf4a28f +Virtual-IP is not set +Virtual-IP IPv6 address is not set +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed 1000 Mbit, Mode full duplex +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:52:17 +Queueing strategy: fifo + Input 111338 packets, 7239813 bytes, 96163 multicast + Received 0 errors, 0 discarded + Output 8316 packets, 1491845 bytes, 0 multicast + Output 0 errors, 0 invalid protocol +Time since last interface status change: 13:52:13 + + +ManagementEthernet 1/0 is up, line protocol is not present +Hardware is DellEth, address is not set +Interface index is 8388609 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed auto, Mode full duplex +ARP type: ARPA, ARP Timeout 04:00:00 +Queueing strategy: fifo +Time since last interface status change: 13:52:33 + + +ManagementEthernet 2/0 is up, line protocol is not present +Hardware is DellEth, address is not set +Interface index is 9437185 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed auto, Mode full duplex +ARP type: ARPA, ARP Timeout 04:00:00 +Queueing strategy: fifo +Time since last interface status change: 13:52:33 + + +ManagementEthernet 3/0 is up, line protocol is not present +Hardware is DellEth, address is not set +Interface index is 10485761 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed auto, Mode full duplex +ARP type: ARPA, ARP Timeout 04:00:00 +Queueing strategy: fifo +Time since last interface status change: 13:52:43 + + +ManagementEthernet 4/0 is up, line protocol is not present +Hardware is DellEth, address is not set +Interface index is 11534337 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed auto, Mode full duplex +ARP type: ARPA, ARP Timeout 04:00:00 +Queueing strategy: fifo +Time since last interface status change: 13:52:43 + + +ManagementEthernet 5/0 is up, line protocol is not present +Hardware is DellEth, address is not set +Interface index is 12582913 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed auto, Mode full duplex +ARP type: ARPA, ARP Timeout 04:00:00 +Queueing strategy: fifo +Time since last interface status change: 13:52:53 + + +Vlan 1 is down, line protocol is down +Address is 90:b1:1c:f4:a2:8f, Current address is 90:b1:1c:f4:a2:8f +Interface index is 1275068928 +Internet address is not set +Mode of IPv4 Address Assignment : NONE +DHCP Client-ID :90b11cf4a28f +MTU 1554 bytes, IP MTU 1500 bytes +LineSpeed auto +ARP type: ARPA, ARP Timeout 04:00:00 +Last clearing of "show interface" counters 13:53:06 +Queueing strategy: fifo +Time since last interface status change: 13:53:06 +Input Statistics: + 0 packets, 0 bytes +Output Statistics: + 0 packets, 0 bytes + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory new file mode 100644 index 00000000..90c0295e --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory @@ -0,0 +1,19 @@ +System Type : S6000 +System Mode : 1.0 +Software Version : 9.12(0.0) + +Unit Type Serial Number Part Number Rev Piece Part ID Rev Svc Tag Exprs Svc Code +-------------------------------------------------------------------------------------------------------------- +* 0 S6000-01-FE-32T NA 08YWFG A00 CN-08YWFG-28298-3AG-0031 A00 6BJ8VS1 137 581 490 89 + 0 S6000-PWR-AC NA 0T9FNW A00 CN-0T9FNW-28298-3AG-0119 A00 NA NA + 0 S6000-FAN NA 0MGDH8 A00 CN-0MGDH8-28298-3AG-0094 A00 NA NA + 0 S6000-FAN NA 0MGDH8 A00 CN-0MGDH8-28298-3AG-0096 A00 NA NA + 0 S6000-FAN NA 0MGDH8 A00 CN-0MGDH8-28298-3AG-0095 A00 NA NA + + * - Management Unit + + +Software Protocol Configured +-------------------------------------------------------------- + LLDP + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface new file mode 100644 index 00000000..0cc43da9 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface @@ -0,0 +1,26 @@ +fortyGigE 0/16 is down, line protocol is down + IPV6 is enabled + Link Local address: fe80::92b1:1cff:fef4:a28f + Global Unicast address(es): + 2001:4898:5808:ffa2::5, subnet is 2001:4898:5808:ffa2::4/126 (MANUAL) + Remaining lifetime: infinite + Global Anycast address(es): + Joined Group address(es): + ff02::1 + ff02::2 + ff02::1:ff00:5 + ff02::1:fff4:a28f + IP MTU is 1500 bytes + ND MTU is 0 + ICMP redirects are not sent + DAD is enabled, number of DAD attempts: 3 + ND reachable time is 35780 milliseconds + ND base reachable time is 30000 milliseconds + ND advertised reachable time is 0 milliseconds + ND advertised retransmit interval is 0 milliseconds + ND router advertisements are sent every 198 to 600 seconds + ND router advertisements live for 1800 seconds + ND advertised hop limit is 64 + IPv6 hop limit for originated packets is 64 + IPv6 unicast RPF check is not supported + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail new file mode 100644 index 00000000..a868571c --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail @@ -0,0 +1,35 @@ +======================================================================== + Local Interface Ma 0/0 has 1 neighbor + Total Frames Out: 1747 + Total Frames In: 10333 + Total Neighbor information Age outs: 0 + Total Multiple Neighbors Detected: 0 + Total Frames Discarded: 0 + Total In Error Frames: 0 + Total Unrecognized TLVs: 0 + Total TLVs Discarded: 0 + Next packet will be sent after 17 seconds + The neighbors are given below: + ----------------------------------------------------------------------- + + Remote Chassis ID Subtype: Mac address (4) + Remote Chassis ID: 90:b1:1c:f4:2f:6d + Remote Port Subtype: Interface name (5) + Remote Port ID: TenGigabitEthernet 0/33 + Remote Port Description: TenGigabitEthernet 0/33 + Local Port ID: ManagementEthernet 0/0 + Locally assigned remote Neighbor Index: 1 + Remote TTL: 20 + Information valid for next 17 seconds + Time since last information change of this neighbor: 14:54:48 + Remote System Name: swlab1-maa-tor-A2 + Remote System Desc: Dell Real Time Operating System Software. Dell + Operating System Version: 2.0. Dell Application Software Version: + 9.11(2.0) Copyright (c) 1999-2017Dell Inc. All Rights Reserved.Build + Time: Tue Apr 25 21:22:59 2017 + Existing System Capabilities: Repeater Bridge Router + Enabled System Capabilities: Repeater Bridge Router + Remote Port Vlan ID: 148 + Port and Protocol Vlan ID: 148, Capability: Supported, Status: Enabled + --------------------------------------------------------------------------- + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor new file mode 100644 index 00000000..c2f65415 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor @@ -0,0 +1,4 @@ + =========================== + Total(b) Used(b) Free(b) Lowest(b) Largest(b) + 3203911680 3172120 3200739560 3200673304 3200739560 + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config new file mode 100644 index 00000000..4804ebba --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config @@ -0,0 +1,238 @@ +Current Configuration ... +! Version 9.12(0.0) +! Last configuration change at Thu Jan 11 06:53:29 2018 by admin +! +! +logging coredump stack-unit 0 +logging coredump stack-unit 1 +logging coredump stack-unit 2 +logging coredump stack-unit 3 +logging coredump stack-unit 4 +logging coredump stack-unit 5 +! +hostname Dell +! +protocol lldp +! +redundancy auto-synchronize full +! +enable password 7 b125455cf679b208e79b910e85789edf +! +username admin password 7 1d28e9f33f99cf5c +! +stack-unit 0 quad-port-profile 0,8,16,24,32,36,40,44,48,52,56,60,64,68,72,76,80,84,88,92,100,108,116,124 +! +stack-unit 0 provision S6000 +! +stack-unit 0 port 0 portmode quad +! +interface TenGigabitEthernet 0/0 + no ip address + shutdown +! +interface TenGigabitEthernet 0/1 + no ip address + shutdown +! +interface TenGigabitEthernet 0/2 + no ip address + shutdown +! +interface TenGigabitEthernet 0/3 + no ip address + shutdown +! +interface fortyGigE 0/4 + no ip address + shutdown +! +interface fortyGigE 0/8 + no ip address + shutdown +! +interface fortyGigE 0/12 + no ip address + shutdown +! +interface fortyGigE 0/16 + no ip address + ipv6 address 2001:4898:5808:ffa2::5/126 + shutdown +! +interface fortyGigE 0/20 + no ip address + switchport + ip access-group ipv6-ssh-only in + shutdown +! +interface fortyGigE 0/24 + no ip address + switchport + mac access-group ssh-only-mac in + mac access-group ssh-only-mac out + shutdown +! +interface fortyGigE 0/28 + no ip address + switchport + mac access-group ssh-only-mac in + mac access-group ssh-only-mac out + shutdown +! +interface fortyGigE 0/32 + no ip address + switchport + ip access-group ipv6-ssh-only out + shutdown +! +interface fortyGigE 0/36 + no ip address + shutdown +! +interface fortyGigE 0/40 + no ip address + shutdown +! +interface fortyGigE 0/44 + no ip address + shutdown +! +interface fortyGigE 0/48 + no ip address + shutdown +! +interface fortyGigE 0/52 + no ip address + shutdown +! +interface fortyGigE 0/56 + no ip address + shutdown +! +interface fortyGigE 0/60 + no ip address + shutdown +! +interface fortyGigE 0/64 + no ip address + shutdown +! +interface fortyGigE 0/68 + no ip address + shutdown +! +interface fortyGigE 0/72 + no ip address + shutdown +! +interface fortyGigE 0/76 + no ip address + shutdown +! +interface fortyGigE 0/80 + no ip address + shutdown +! +interface fortyGigE 0/84 + no ip address + shutdown +! +interface fortyGigE 0/88 + no ip address + shutdown +! +interface fortyGigE 0/92 + no ip address + shutdown +! +interface fortyGigE 0/96 + no ip address + shutdown +! +interface fortyGigE 0/100 + no ip address + shutdown +! +interface fortyGigE 0/104 + no ip address + shutdown +! +interface fortyGigE 0/108 + no ip address + shutdown +! +interface fortyGigE 0/112 + no ip address + shutdown +! +interface fortyGigE 0/116 + no ip address + shutdown +! +interface fortyGigE 0/120 + no ip address + shutdown +! +interface fortyGigE 0/124 + no ip address + shutdown +! +interface ManagementEthernet 0/0 + ip address 10.16.148.71/16 + no shutdown +! +interface ManagementEthernet 1/0 + no shutdown +! +interface ManagementEthernet 2/0 + no shutdown +! +interface ManagementEthernet 3/0 + no shutdown +! +interface ManagementEthernet 4/0 + no shutdown +! +interface ManagementEthernet 5/0 + no shutdown +! +interface Vlan 1 +!untagged fortyGigE 0/20-32 +! +ipv6 access-list ipv6-ssh-only + description ipv6acl + remark 1 ipv6 + seq 10 permit ipv6 2001:4898::/32 any + seq 20 permit tcp any eq 2 2404:f801::/32 + seq 30 permit tcp any 2a01:110::/31 ack + seq 40 permit tcp any any +! +mac access-list extended ssh-only-mac + description macacl + remark 1 mac + seq 5 permit any any count + seq 6 deny any any +! +ip ssh server enable +! +line console 0 +line vty 0 +line vty 1 + access-class ipv6-ssh-only ipv6 +line vty 2 + access-class ipv6-ssh-only ipv6 +line vty 3 + access-class ipv6-ssh-only ipv6 +line vty 4 +line vty 5 +line vty 6 +line vty 7 +line vty 8 +line vty 9 +! +reload-type + boot-type normal-reload + config-scr-download enable +! +end + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname new file mode 100644 index 00000000..9a2c181a --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname @@ -0,0 +1 @@ +hostname os9_sw1 diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version new file mode 100644 index 00000000..e385cf3e --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version @@ -0,0 +1,18 @@ +Dell Real Time Operating System Software +Dell Operating System Version: 2.0 +Dell Application Software Version: 9.10(0.1P13) +Copyright (c) 1999-2016 by Dell Inc. All Rights Reserved. +Build Time: Wed Sep 7 23:48:35 2016 +Build Path: /sites/eqx/work/swbuild01_1/build01/E9-10-0/SW/SRC +Dell Networking OS uptime is 12 week(s), 6 day(s), 9 hour(s), 20 minute(s) + +System image file is "system://A" + +System Type: S6000-ON +Control Processor: Intel Centerton with 3 Gbytes (3203911680 bytes) of memory, core(s) 2. + +16G bytes of boot flash memory. + + 1 32-port TE/FG (SI-ON) + 32 Forty GigabitEthernet/IEEE 802.3 interface(s) + diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py new file mode 100644 index 00000000..57ea4e68 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py @@ -0,0 +1,88 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import json + +from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase + + +fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') +fixture_data = {} + + +def load_fixture(name): + path = os.path.join(fixture_path, name) + + if path in fixture_data: + return fixture_data[path] + + with open(path) as f: + data = f.read() + + try: + data = json.loads(data) + except Exception: + pass + + fixture_data[path] = data + return data + + +class TestDellos9Module(ModuleTestCase): + + def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False): + + self.load_fixtures(commands) + + if failed: + result = self.failed() + self.assertTrue(result['failed'], result) + else: + result = self.changed(changed) + self.assertEqual(result['changed'], changed, result) + + if commands is not None: + if sort: + self.assertEqual(sorted(commands), sorted(result['updates']), result['updates']) + else: + self.assertEqual(commands, result['updates'], result['updates']) + + return result + + def failed(self): + with self.assertRaises(AnsibleFailJson) as exc: + self.module.main() + + result = exc.exception.args[0] + self.assertTrue(result['failed'], result) + return result + + def changed(self, changed=False): + with self.assertRaises(AnsibleExitJson) as exc: + self.module.main() + + result = exc.exception.args[0] + self.assertEqual(result['changed'], changed, result) + return result + + def load_fixtures(self, commands=None): + pass diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py new file mode 100644 index 00000000..6353d8f5 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py @@ -0,0 +1,108 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible.compat.tests.mock import patch +from ansible_collections.dellemc.os9.plugins.modules import os9_command +from units.modules.utils import set_module_args +from .os9_module import TestDellos9Module, load_fixture + + +class TestDellos9CommandModule(TestDellos9Module): + + module = os9_command + + def setUp(self): + super(TestDellos9CommandModule, self).setUp() + + self.mock_run_commands = patch('ansible.modules.network.os9.os9_command.run_commands') + self.run_commands = self.mock_run_commands.start() + + def tearDown(self): + super(TestDellos9CommandModule, self).tearDown() + self.mock_run_commands.stop() + + def load_fixtures(self, commands=None): + + def load_from_file(*args, **kwargs): + module, commands = args + output = list() + + for item in commands: + try: + obj = json.loads(item['command']) + command = obj['command'] + except ValueError: + command = item['command'] + filename = str(command).replace(' ', '_') + output.append(load_fixture(filename)) + return output + + self.run_commands.side_effect = load_from_file + + def test_os9_command_simple(self): + set_module_args(dict(commands=['show version'])) + result = self.execute_module() + self.assertEqual(len(result['stdout']), 1) + self.assertTrue(result['stdout'][0].startswith('Dell Real Time')) + + def test_os9_command_multiple(self): + set_module_args(dict(commands=['show version', 'show version'])) + result = self.execute_module() + self.assertEqual(len(result['stdout']), 2) + self.assertTrue(result['stdout'][0].startswith('Dell Real Time')) + + def test_os9_command_wait_for(self): + wait_for = 'result[0] contains "Dell Real"' + set_module_args(dict(commands=['show version'], wait_for=wait_for)) + self.execute_module() + + def test_os9_command_wait_for_fails(self): + wait_for = 'result[0] contains "test string"' + set_module_args(dict(commands=['show version'], wait_for=wait_for)) + self.execute_module(failed=True) + self.assertEqual(self.run_commands.call_count, 10) + + def test_os9_command_retries(self): + wait_for = 'result[0] contains "test string"' + set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2)) + self.execute_module(failed=True) + self.assertEqual(self.run_commands.call_count, 2) + + def test_os9_command_match_any(self): + wait_for = ['result[0] contains "Dell Real"', + 'result[0] contains "test string"'] + set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any')) + self.execute_module() + + def test_os9_command_match_all(self): + wait_for = ['result[0] contains "Dell Real"', + 'result[0] contains "Operating System"'] + set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all')) + self.execute_module() + + def test_os9_command_match_all_failure(self): + wait_for = ['result[0] contains "Dell Real"', + 'result[0] contains "test string"'] + commands = ['show version', 'show version'] + set_module_args(dict(commands=commands, wait_for=wait_for, match='all')) + self.execute_module(failed=True) diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py new file mode 100644 index 00000000..8c159eb5 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py @@ -0,0 +1,148 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests.mock import patch +from ansible_collections.dellemc.os9.plugins.modules import os9_config +from units.modules.utils import set_module_args +from .os9_module import TestDellos9Module, load_fixture + + +class TestDellos9ConfigModule(TestDellos9Module): + + module = os9_config + + def setUp(self): + super(TestDellos9ConfigModule, self).setUp() + + self.mock_get_config = patch('ansible.modules.network.os9.os9_config.get_config') + self.get_config = self.mock_get_config.start() + + self.mock_load_config = patch('ansible.modules.network.os9.os9_config.load_config') + self.load_config = self.mock_load_config.start() + + self.mock_run_commands = patch('ansible.modules.network.os9.os9_config.run_commands') + self.run_commands = self.mock_run_commands.start() + + def tearDown(self): + super(TestDellos9ConfigModule, self).tearDown() + self.mock_get_config.stop() + self.mock_load_config.stop() + self.mock_run_commands.stop() + + def load_fixtures(self, commands=None): + config_file = 'os9_config_config.cfg' + self.get_config.return_value = load_fixture(config_file) + self.load_config.return_value = None + + def test_os9_config_unchanged(self): + src = load_fixture('os9_config_config.cfg') + set_module_args(dict(src=src)) + self.execute_module() + + def test_os9_config_src(self): + src = load_fixture('os9_config_src.cfg') + set_module_args(dict(src=src)) + commands = ['hostname foo', 'interface fortyGigE 1/6', + 'no ip address'] + self.execute_module(changed=True, commands=commands) + + def test_os9_config_backup(self): + set_module_args(dict(backup=True)) + result = self.execute_module() + self.assertIn('__backup__', result) + + def test_os9_config_save(self): + set_module_args(dict(save=True)) + self.execute_module(changed=True) + self.assertEqual(self.run_commands.call_count, 1) + self.assertEqual(self.get_config.call_count, 0) + self.assertEqual(self.load_config.call_count, 0) + args = self.run_commands.call_args[0][1] + self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0]) +# self.assertIn('copy running-config startup-config\r', args) + + def test_os9_config_lines_wo_parents(self): + set_module_args(dict(lines=['hostname foo'])) + commands = ['hostname foo'] + self.execute_module(changed=True, commands=commands) + + def test_os9_config_lines_w_parents(self): + set_module_args(dict(lines=['shutdown'], parents=['interface fortyGigE 1/6'])) + commands = ['interface fortyGigE 1/6', 'shutdown'] + self.execute_module(changed=True, commands=commands) + + def test_os9_config_before(self): + set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar'])) + commands = ['snmp-server contact bar', 'hostname foo'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os9_config_after(self): + set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar'])) + commands = ['hostname foo', 'snmp-server contact bar'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os9_config_before_after_no_change(self): + set_module_args(dict(lines=['hostname router'], + before=['snmp-server contact bar'], + after=['snmp-server location chennai'])) + self.execute_module() + + def test_os9_config_config(self): + config = 'hostname localhost' + set_module_args(dict(lines=['hostname router'], config=config)) + commands = ['hostname router'] + self.execute_module(changed=True, commands=commands) + + def test_os9_config_replace_block(self): + lines = ['description test string', 'test string'] + parents = ['interface fortyGigE 1/6'] + set_module_args(dict(lines=lines, replace='block', parents=parents)) + commands = parents + lines + self.execute_module(changed=True, commands=commands) + + def test_os9_config_match_none(self): + lines = ['hostname router'] + set_module_args(dict(lines=lines, match='none')) + self.execute_module(changed=True, commands=lines) + + def test_os9_config_match_none(self): + lines = ['ip address 1.2.3.4/24', 'description test string'] + parents = ['interface fortyGigE 1/6'] + set_module_args(dict(lines=lines, parents=parents, match='none')) + commands = parents + lines + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os9_config_match_strict(self): + lines = ['ip address 1.2.3.4/24', 'description test string', + 'shutdown'] + parents = ['interface fortyGigE 1/6'] + set_module_args(dict(lines=lines, parents=parents, match='strict')) + commands = parents + ['shutdown'] + self.execute_module(changed=True, commands=commands, sort=False) + + def test_os9_config_match_exact(self): + lines = ['ip address 1.2.3.4/24', 'description test string', + 'shutdown'] + parents = ['interface fortyGigE 1/6'] + set_module_args(dict(lines=lines, parents=parents, match='exact')) + commands = parents + lines + self.execute_module(changed=True, commands=commands, sort=False) diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py new file mode 100644 index 00000000..2a563ef1 --- /dev/null +++ b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py @@ -0,0 +1,106 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible.compat.tests.mock import patch +from units.modules.utils import set_module_args +from .os9_module import TestDellos9Module, load_fixture +from ansible_collections.dellemc.os9.plugins.modules import os9_facts + + +class TestDellos9Facts(TestDellos9Module): + + module = os9_facts + + def setUp(self): + super(TestDellos9Facts, self).setUp() + + self.mock_run_command = patch( + 'ansible.modules.network.os9.os9_facts.run_commands') + self.run_command = self.mock_run_command.start() + + def tearDown(self): + super(TestDellos9Facts, self).tearDown() + + self.mock_run_command.stop() + + def load_fixtures(self, commands=None): + + def load_from_file(*args, **kwargs): + module, commands = args + output = list() + + for item in commands: + try: + obj = json.loads(item) + command = obj['command'] + except ValueError: + command = item + if '|' in command: + command = str(command).replace('|', '') + filename = str(command).replace(' ', '_') + filename = filename.replace('/', '7') + output.append(load_fixture(filename)) + return output + + self.run_command.side_effect = load_from_file + + def test_os9_facts_gather_subset_default(self): + set_module_args(dict()) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('hardware', ansible_facts['ansible_net_gather_subset']) + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset']) + self.assertEquals('os9_sw1', ansible_facts['ansible_net_hostname']) + self.assertIn('fortyGigE 0/24', ansible_facts['ansible_net_interfaces'].keys()) + self.assertEquals(3128820, ansible_facts['ansible_net_memtotal_mb']) + self.assertEquals(3125722, ansible_facts['ansible_net_memfree_mb']) + + def test_os9_facts_gather_subset_config(self): + set_module_args({'gather_subset': 'config'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('config', ansible_facts['ansible_net_gather_subset']) + self.assertEquals('os9_sw1', ansible_facts['ansible_net_hostname']) + self.assertIn('ansible_net_config', ansible_facts) + + def test_os9_facts_gather_subset_hardware(self): + set_module_args({'gather_subset': 'hardware'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('hardware', ansible_facts['ansible_net_gather_subset']) + self.assertEquals(['flash', 'fcmfs', 'nfsmount', 'ftp', 'tftp', 'scp', 'http', 'https'], ansible_facts['ansible_net_filesystems']) + self.assertEquals(3128820, ansible_facts['ansible_net_memtotal_mb']) + self.assertEquals(3125722, ansible_facts['ansible_net_memfree_mb']) + + def test_os9_facts_gather_subset_interfaces(self): + set_module_args({'gather_subset': 'interfaces'}) + result = self.execute_module() + ansible_facts = result['ansible_facts'] + self.assertIn('default', ansible_facts['ansible_net_gather_subset']) + self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset']) + self.assertIn('fortyGigE 0/24', ansible_facts['ansible_net_interfaces'].keys()) + self.assertEquals(['Ma 0/0'], ansible_facts['ansible_net_neighbors'].keys()) + self.assertIn('ansible_net_interfaces', ansible_facts) diff --git a/ansible_collections/dellemc/powerflex/CHANGELOG.rst b/ansible_collections/dellemc/powerflex/CHANGELOG.rst new file mode 100644 index 00000000..707509dc --- /dev/null +++ b/ansible_collections/dellemc/powerflex/CHANGELOG.rst @@ -0,0 +1,95 @@ +=============================== +Dellemc.PowerFlex Change Logs +=============================== + +.. contents:: Topics + + +v1.5.0 +====== + +Minor Changes +------------- + +- Info module is enhanced to support the listing replication consistency groups. +- Renamed gateway_host to hostname +- Renamed verifycert to validate_certs. +- Updated modules to adhere with ansible community guidelines. + +New Modules +----------- + +- dellemc.powerflex.replication_consistency_group - Manage replication consistency groups on Dell PowerFlex + +v1.4.0 +====== + +Minor Changes +------------- + +- Added support for 4.0.x release of PowerFlex OS. +- Info module is enhanced to support the listing volumes and storage pools with statistics data. +- Storage pool module is enhanced to get the details with statistics data. +- Volume module is enhanced to get the details with statistics data. + +v1.3.0 +====== + +Minor Changes +------------- + +- Added execution environment manifest file to support building an execution environment with ansible-builder. +- Enabled the check_mode support for info module + +New Modules +----------- + +- dellemc.powerflex.mdm_cluster - Manage MDM cluster on Dell PowerFlex + +v1.2.0 +====== + +Minor Changes +------------- + +- Names of previously released modules have been changed from dellemc_powerflex_\ to \. + +New Modules +----------- + +- dellemc.powerflex.protection_domain - Manage Protection Domain on Dell PowerFlex + +v1.1.1 +====== + +Deprecated Features +------------------- + +- The dellemc_powerflex_gatherfacts module is deprecated and replaced with dellemc_powerflex_info + +v1.1.0 +====== + +Minor Changes +------------- + +- Added dual licensing. +- Gatherfacts module is enhanced to list devices. + +New Modules +----------- + +- dellemc.powerflex.device - Manage device on Dell PowerFlex +- dellemc.powerflex.sds - Manage SDS on Dell PowerFlex + +v1.0.0 +====== + +New Modules +----------- + +- dellemc.powerflex.info - Gathering information about Dell PowerFlex +- dellemc.powerflex.sdc - Manage SDCs on Dell PowerFlex +- dellemc.powerflex.snapshot - Manage Snapshots on Dell PowerFlex +- dellemc.powerflex.storagepool - Managing Dell PowerFlex storage pool +- dellemc.powerflex.volume - Manage volumes on Dell PowerFlex diff --git a/ansible_collections/dellemc/powerflex/FILES.json b/ansible_collections/dellemc/powerflex/FILES.json new file mode 100644 index 00000000..58a2dfd4 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/FILES.json @@ -0,0 +1,530 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "796d9d259e4da6c8655b665586ffaf095d1a5a45b18c768b5ba748385910cf8e", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3593e8970a00d95a557377bd7af2f50e5212620def3ed4134c989a33dfd8ec4f", + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6275c6cd307d5afbdf72bd71abcffea19e755fd452f37be43d7036dc1ed4d5a4", + "format": 1 + }, + { + "name": "MODULE-LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33b23a8f0c817a8e2648e255269cb51dc3882c613cc9e94c1c605d885a163f4c", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/powerflex.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "042fd3430b1ecc1ce01ce3efafa2f2d0fca1d814b891a756f686b5b543eb3bef", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/logging_handler.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdd1b7ef81bde300864051fe060d5f393c074201279e3f739584a5c2c44153a2", + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87e9c4d3570ace6a236080e285e3b3c12b4e5c763064334b861ddb38ea37b264", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/sdc.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8104d04864a2270a5ceb5cf7e66f681125dec8510faf60d0054f4ae0e8739c2", + "format": 1 + }, + { + "name": "plugins/modules/protection_domain.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53142b5ab071e1b841af1857e0408df1d2d29b9e5a261a04b0acf1e84a3eb851", + "format": 1 + }, + { + "name": "plugins/modules/mdm_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bd51cc09a4f91e05d96d696600149275ffb2c1904b3ecdaf794212da1bb90a5", + "format": 1 + }, + { + "name": "plugins/modules/snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f653d2f3c1f0dc8a2d496cab89e240fe7a77e7d3d5e7f88a47f718ae0dbc07c", + "format": 1 + }, + { + "name": "plugins/modules/info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d8525b1e2b4a5ef671b93082d329193331181b4a85c37a0e8e5b172d8c9734c", + "format": 1 + }, + { + "name": "plugins/modules/replication_consistency_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e187923e495ab03d4f52f5001a450aed291500f0f9a8d5b03645bec0907131a6", + "format": 1 + }, + { + "name": "plugins/modules/sds.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc02c40e9b6d49c9df942e275de073a4854cfb6e0b27f2a876583e3d094b7803", + "format": 1 + }, + { + "name": "plugins/modules/storagepool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f9c9179a1cfe50510946135ee8ff0eb7b9b027e4f7d7afa53cc20e35f6a1b5d", + "format": 1 + }, + { + "name": "plugins/modules/device.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9531ecfeaa468b126d90e0932723d12d434dd604a089de4770b0b2dfcd5b9253", + "format": 1 + }, + { + "name": "plugins/modules/volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c8e0677e4071d2288a6b13969279e10873e41410eaabf80c344f56206dcedb9", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1ef9cddda5a94dce31f5e8f4719e3e963adec5c2295f8f0c584faddcc0bf110", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1ef9cddda5a94dce31f5e8f4719e3e963adec5c2295f8f0c584faddcc0bf110", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1ef9cddda5a94dce31f5e8f4719e3e963adec5c2295f8f0c584faddcc0bf110", + "format": 1 + }, + { + "name": "tests/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65e6091d1c8d88a703555bd13590bb95248fb0b7376d3ed1d660e2b9d65581c8", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_volume_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e61698f03d1a7ec229c5ffb6a4def656e806e5dd234a0e15b2136fba839a2d7", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_mdm_cluster_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a319347b17bfd7eca567a2a259313809b2ef3e3a33e545cc2b2a4b5187d27ee4", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bf628a051d160856652bda22e45d35303db612874edc75ed4e2e8b4a270fba3", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_api_exception.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a9639349df9561e15df73ad72a28fb0120121b9ef2f8f72e6a7ef8c01c1edeb", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_info_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c47b5eeb60987f156d5891f514ec891099e39e42b42fb7e9ec3f91125491ed1", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_sdk_response.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82b029313ed53922594cdabcf9129708a5a1ee8b4b00382994ed054e58637b89", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_protection_domain_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1005c8842b81ff4c5613992e5f80fb25cfa6ac36d1a9274e574caf04d7510584", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_storagepool_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2552190a68b46919f44632fe65972825624ab201e330771e1992e57de253d27", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_storagepool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f78779e6b346b612915d92323ca0d1850e68da65b95f32205cc585b622b48be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4742f5675d613a39bd5929b24e6f515d034bebf8defc1c98bb8fe69444785015", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66d4d0adaaa1197363ffe1f25e4c5f7def1b6996fdcf5678af40528c5652a64b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_replication_consistency_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7071e60cf4c57f36a59be9249964ceb9ef6ed0b8b6dca8a86a62515dbea087d3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_mdm_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a242b57ed85421cb8823e0814484d077407f00c761e23169542ac34cc9aa0d3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_protection_domain.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d2d1e320a857f994db4e48ce5406ca5bbfe21cd7c37d9f8d3bb1e07db8d333e", + "format": 1 + }, + { + "name": "tests/unit/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b02f408522b5f335ac6c1ef0d7ee5dd72a0a24492de516545ac4071d315882db", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98ddfc70b930fb944fb944d6e981fd33da656495bca6b5f56562144546149252", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2969cb15eb1ae0ca0f5033d427a1ff70fe84bcf1a655c1e51059d8d03f34ebb3", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7ab69438be06910388bd76d2cc4cc60b31bf5b501513435673879d5df230a0a", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0fb1c23651bf566498150c8eda7e9baa2aa6f7e8436bfe6928dfc202b3e1817", + "format": 1 + }, + { + "name": "docs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/Product Guide.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26906ca9341bd49c5ed3d7c8c633d570d7f92f91c5b595460e47003cc6a7fc89", + "format": 1 + }, + { + "name": "docs/CODE_OF_CONDUCT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10e328fb91562dc4e0b6c5da59f213295b4f25af28a53235612698e4a8465acc", + "format": 1 + }, + { + "name": "docs/ISSUE_TRIAGE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "530744bbed97aad7ce03e7d367ab53882a53a69b2307f7a1ded46f19a6d410ab", + "format": 1 + }, + { + "name": "docs/SUPPORT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5ff5c4c06bcd6e7432653d13fab3fbf230685ae0337f6ecabec6f38aacb0ef7", + "format": 1 + }, + { + "name": "docs/Release Notes.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c83ab7f6bcbfe80040e285730b0309ce9457839880babfd1d14de8dd4bb73650", + "format": 1 + }, + { + "name": "docs/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1579da61311237aaa707ceea81dff9269d6826fdf90f32c5fcf9b513d2c4513", + "format": 1 + }, + { + "name": "docs/INSTALLATION.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b07d28298ed19f6de063a772a607f4cb865d2e706c4bd676599ab7fc49c64dc", + "format": 1 + }, + { + "name": "docs/MAINTAINER_GUIDE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc7f5f908d7eb407ce9f93982892b7615e99a6abdb0f836b4b0e59ab2b3c1dcb", + "format": 1 + }, + { + "name": "docs/SECURITY.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "062552a1455d62b9e3c6683ad9d1b42343a82f7ffc33ac412fce7e40d1f52c28", + "format": 1 + }, + { + "name": "docs/BRANCHING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06567f4a7d818fbc89030b88b40ac7d7965e42c4a3408e7e8629662a8104e215", + "format": 1 + }, + { + "name": "docs/COMMITTER_GUIDE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b41b38fe09cfcbfb4499c39ed4822a9f8c3f5d562e68dad45b5f2389f18053b5", + "format": 1 + }, + { + "name": "docs/ADOPTERS.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c81933a41503275063d789f0685472e1603e4614376f3918b42c4bfb210a2c01", + "format": 1 + }, + { + "name": "docs/MAINTAINERS.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e748fd39a38ac2a61aa6f48eac2179dffcc5a3e8f261f54042946d969bbfadf6", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/LICENSE b/ansible_collections/dellemc/powerflex/LICENSE new file mode 100644 index 00000000..e72bfdda --- /dev/null +++ b/ansible_collections/dellemc/powerflex/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/MANIFEST.json b/ansible_collections/dellemc/powerflex/MANIFEST.json new file mode 100644 index 00000000..eecd338a --- /dev/null +++ b/ansible_collections/dellemc/powerflex/MANIFEST.json @@ -0,0 +1,39 @@ +{ + "collection_info": { + "namespace": "dellemc", + "name": "powerflex", + "version": "1.5.0", + "authors": [ + "Akash Shendge ", + "Arindam Datta ", + "P Srinivas Rao ", + "Rajshree Khare ", + "Bhavneet Sharma ", + "Ananthu S Kuttattu ", + "Trisha Datta " + ], + "readme": "README.md", + "tags": [ + "storage" + ], + "description": "Ansible modules for PowerFlex", + "license": [ + "GPL-3.0-or-later", + "Apache-2.0" + ], + "license_file": null, + "dependencies": {}, + "repository": "https://github.com/dell/ansible-powerflex/tree/1.5.0", + "documentation": "https://github.com/dell/ansible-powerflex/tree/1.5.0/docs", + "homepage": "https://github.com/dell/ansible-powerflex/tree/1.5.0", + "issues": "https://www.dell.com/community/Automation/bd-p/Automation" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d8254015c7f5f969370d97d3badb243137e3e8a2f13cbfea625fc543f6f685b", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/MODULE-LICENSE b/ansible_collections/dellemc/powerflex/MODULE-LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/MODULE-LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible_collections/dellemc/powerflex/README.md b/ansible_collections/dellemc/powerflex/README.md new file mode 100644 index 00000000..e15e7a54 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/README.md @@ -0,0 +1,60 @@ +# Ansible Modules for Dell Technologies PowerFlex + +The Ansible Modules for Dell Technologies (Dell) PowerFlex allow Data Center and IT administrators to use RedHat Ansible to automate and orchestrate the provisioning and management of Dell PowerFlex storage systems. + +The capabilities of the Ansible modules are managing SDCs, volumes, snapshots, storage pools, replication consistency groups, SDSs, devices, protection domains, MDM cluster, and to gather high level facts from the storage system. The options available are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request. + +## Table of contents + +* [Code of conduct](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/CODE_OF_CONDUCT.md) +* [Maintainer guide](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/MAINTAINER_GUIDE.md) +* [Committer guide](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/COMMITTER_GUIDE.md) +* [Contributing guide](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/CONTRIBUTING.md) +* [Branching strategy](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/BRANCHING.md) +* [List of adopters](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/ADOPTERS.md) +* [Maintainers](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/MAINTAINERS.md) +* [Support](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/SUPPORT.md) +* [License](#license) +* [Security](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/SECURITY.md) +* [Prerequisites](#prerequisites) +* [List of Ansible modules for Dell PowerFlex](#list-of-ansible-modules-for-dell-powerflex) +* [Installation and execution of Ansible modules for Dell PowerFlex](#installation-and-execution-of-ansible-modules-for-dell-powerflex) +* [Releasing, Maintenance and Deprecation](#releasing-maintenance-and-deprecation) + +## License +The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/1.5.0/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/1.5.0/MODULE-LICENSE) for the full terms. + +## Prerequisites + +| **Ansible Modules** | **PowerFlex/VxFlex OS Version** | **SDK version** | **Python version** | **Ansible** | +|---------------------|-----------------------|-------|--------------------|--------------------------| +| v1.5.0 |3.5
3.6
4.0 | 1.6.0 | 3.9.x
3.10.x
3.11.x | 2.12
2.13
2.14 | + + * Please follow PyPowerFlex installation instructions on [PyPowerFlex Documentation](https://github.com/dell/python-powerflex) + +## Idempotency +The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed. + +## List of Ansible modules for Dell PowerFlex + * [Info module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#info-module) + * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#snapshot-module) + * [SDC module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#sdc-module) + * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#storage-pool-module) + * [Volume module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#volume-module) + * [SDS module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#sds-module) + * [Device Module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#device-module) + * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#protection-domain-module) + * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/Product%20Guide.md#mdm-cluster-module) + +## Installation and execution of Ansible modules for Dell PowerFlex +The installation and execution steps of Ansible modules for Dell PowerFlex can be found [here](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/INSTALLATION.md). + +## Releasing, Maintenance and Deprecation + +Ansible Modules for Dell Technnologies PowerFlex follows [Semantic Versioning](https://semver.org/). + +New version will be release regularly if significant changes (bug fix or new feature) are made in the collection. + +Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/BRANCHING.md). + +Ansible Modules for Dell Technologies PowerFlex deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html). \ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml new file mode 100644 index 00000000..733ca5d8 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml @@ -0,0 +1,67 @@ +objects: + role: {} +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + httpapi: {} + inventory: {} + lookup: {} + module: + device: + description: Manage device on Dell PowerFlex + name: device + namespace: '' + version_added: 1.1.0 + info: + description: Gathering information about Dell PowerFlex + name: info + namespace: '' + version_added: 1.0.0 + mdm_cluster: + description: Manage MDM cluster on Dell PowerFlex + name: mdm_cluster + namespace: '' + version_added: 1.3.0 + protection_domain: + description: Manage Protection Domain on Dell PowerFlex + name: protection_domain + namespace: '' + version_added: 1.2.0 + replication_consistency_group: + description: Manage replication consistency groups on Dell PowerFlex + name: replication_consistency_group + namespace: '' + version_added: 1.5.0 + sdc: + description: Manage SDCs on Dell PowerFlex + name: sdc + namespace: '' + version_added: 1.0.0 + sds: + description: Manage SDS on Dell PowerFlex + name: sds + namespace: '' + version_added: 1.1.0 + snapshot: + description: Manage Snapshots on Dell PowerFlex + name: snapshot + namespace: '' + version_added: 1.0.0 + storagepool: + description: Managing Dell PowerFlex storage pool + name: storagepool + namespace: '' + version_added: 1.0.0 + volume: + description: Manage volumes on Dell PowerFlex + name: volume + namespace: '' + version_added: 1.0.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 1.5.0 diff --git a/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml new file mode 100644 index 00000000..80c2934c --- /dev/null +++ b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml @@ -0,0 +1,82 @@ +ancestor: null +releases: + 1.0.0: + modules: + - description: Gathering information about Dell PowerFlex + name: info + namespace: '' + - description: Manage SDCs on Dell PowerFlex + name: sdc + namespace: '' + - description: Manage Snapshots on Dell PowerFlex + name: snapshot + namespace: '' + - description: Managing Dell PowerFlex storage pool + name: storagepool + namespace: '' + - description: Manage volumes on Dell PowerFlex + name: volume + namespace: '' + release_date: '2021-03-24' + 1.1.0: + changes: + minor_changes: + - Added dual licensing. + - Gatherfacts module is enhanced to list devices. + modules: + - description: Manage device on Dell PowerFlex + name: device + namespace: '' + - description: Manage SDS on Dell PowerFlex + name: sds + namespace: '' + release_date: '2021-09-28' + 1.1.1: + changes: + deprecated_features: + - The dellemc_powerflex_gatherfacts module is deprecated and replaced with dellemc_powerflex_info + trivial: + - Product Guide, Release Notes and ReadMe updated as per community guidelines. + release_date: '2021-12-16' + 1.2.0: + changes: + minor_changes: + - Names of previously released modules have been changed from dellemc_powerflex_\ to \. + modules: + - description: Manage Protection Domain on Dell PowerFlex + name: protection_domain + namespace: '' + release_date: '2022-03-25' + 1.3.0: + changes: + minor_changes: + - Added execution environment manifest file to support building an execution + environment with ansible-builder. + - Enabled the check_mode support for info module + modules: + - description: Manage MDM cluster on Dell PowerFlex + name: mdm_cluster + namespace: '' + release_date: '2022-06-28' + 1.4.0: + changes: + minor_changes: + - Added support for 4.0.x release of PowerFlex OS. + - Info module is enhanced to support the listing volumes and storage pools with + statistics data. + - Storage pool module is enhanced to get the details with statistics data. + - Volume module is enhanced to get the details with statistics data. + release_date: '2022-09-27' + 1.5.0: + changes: + minor_changes: + - Info module is enhanced to support the listing replication consistency groups. + - Renamed gateway_host to hostname + - Renamed verifycert to validate_certs. + - Updated modules to adhere with ansible community guidelines. + modules: + - description: Manage replication consistency groups on Dell PowerFlex + name: replication_consistency_group + namespace: '' + release_date: '2022-12-22' diff --git a/ansible_collections/dellemc/powerflex/changelogs/config.yaml b/ansible_collections/dellemc/powerflex/changelogs/config.yaml new file mode 100644 index 00000000..636258e1 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/changelogs/config.yaml @@ -0,0 +1,33 @@ +--- +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues +title: Dellemc.PowerFlex +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/dellemc/powerflex/docs/ADOPTERS.md b/ansible_collections/dellemc/powerflex/docs/ADOPTERS.md new file mode 100644 index 00000000..826b5cd7 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/ADOPTERS.md @@ -0,0 +1,11 @@ + + +# List of adopters diff --git a/ansible_collections/dellemc/powerflex/docs/BRANCHING.md b/ansible_collections/dellemc/powerflex/docs/BRANCHING.md new file mode 100644 index 00000000..e244df70 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/BRANCHING.md @@ -0,0 +1,32 @@ + + +# Branching strategy + +Ansible modules for Dell PowerFlex follows a scaled trunk branching strategy where short-lived branches are created off of the main branch. When coding is complete, the branch is merged back into main after being approved in a pull request code review. + +## Branch naming convention + +| Branch Type | Example | Comment | +|--------------|-----------------------------------|-------------------------------------------| +| main | main | | +| Release | release-1.0 | hotfix: release-1.1 patch: release-1.0.1 | +| Feature | feature-9-vol-support | "9" referring to GitHub issue ID | +| Bug Fix | bugfix-110-fix-duplicates-issue | "110" referring to GitHub issue ID | + + +## Steps for working on a release branch + +1. Fork the repository. +2. Create a branch off of the main branch. The branch name should follow [branch naming convention](#branch-naming-convention). +3. Make your changes and commit them to your branch. +4. If other code changes have merged into the upstream main branch, perform a rebase of those changes into your branch. +5. Open a [pull request](https://github.com/dell/ansible-powerflex/pulls) between your branch and the upstream main branch. +6. Once your pull request has merged, your branch can be deleted. diff --git a/ansible_collections/dellemc/powerflex/docs/CODE_OF_CONDUCT.md b/ansible_collections/dellemc/powerflex/docs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..cfc4993c --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,137 @@ + + +# Code of conduct - contributor covenant + +## Our pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at ansible.team@dell.com +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary ban + +**Community impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent ban + +**Community impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. \ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/docs/COMMITTER_GUIDE.md b/ansible_collections/dellemc/powerflex/docs/COMMITTER_GUIDE.md new file mode 100644 index 00000000..8af0752e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/COMMITTER_GUIDE.md @@ -0,0 +1,49 @@ + + +# Committer guidelines + +These are the guidelines for people with commit privileges on the GitHub repository. Committers act as members of the Core Team and not necessarily employees of Dell. + +These guidelines apply to everyone and as Committers you have been given access to commit changes because you exhibit good judgment and have demonstrated your commitment to the vision of the project. We trust that you will use these privileges wisely and not abuse it. + +If these privileges are abused in any way and the quality of the project is compromised, our trust will be diminished and you may be asked to not commit or lose these privileges all together. + +## General rules + +### Don't + +* Break the build. +* Commit directly. +* Compromise backward compatibility. +* Disrespect your Community Team members. Help them grow. +* Think it is someone elses job to test your code. Write tests for all the code you produce. +* Forget to keep thing simple. +* Create technical debt. Fix-in-place and make it the highest priority above everything else. + +### Do + +* Keep it simple. +* Good work, your best every time. +* Keep the design of your software clean and maintainable. +* Squash your commits, avoid merges. +* Be active. Committers that are not active may have their permissions suspended. +* Write tests for all your deliverables. +* Automate everything. +* Maintain a high code coverage. +* Keep an open communication with other Committers. +* Ask questions. +* Document your contributions and remember to keep it simple. + +## People + +| Name | GitHub ID | Nickname | +|-------|-------------|------------| +| | | | diff --git a/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md new file mode 100644 index 00000000..b01639f3 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md @@ -0,0 +1,173 @@ + + +# How to contribute + +Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.5.0/CODE_OF_CONDUCT.md). + +## Table of contents + +* [Become a contributor](#Become-a-contributor) +* [Submitting issues](#Submitting-issues) +* [Triage issues](#Triage-issues) +* [Your first contribution](#Your-first-contribution) +* [Branching](#Branching) +* [Signing your commits](#Signing-your-commits) +* [Pull requests](#Pull-requests) +* [Code reviews](#Code-reviews) +* [TODOs in the code](#TODOs-in-the-code) + +## Become a contributor + +You can contribute to this project in several ways. Here are some examples: + +* Contribute to the Ansible modules for Dell PowerFlex documentation and codebase. +* Report and triage bugs. +* Feature requests. +* Write technical documentation and blog posts, for users and contributors. +* Help others by answering questions about this project. + +## Submitting issues + +All issues related to Ansible modules for Dell PowerFlex, regardless of the service/repository the issue belongs to (see table above), should be submitted [here](https://github.com/dell/ansible-powerflex/issues). Issues will be triaged and labels will be used to indicate the type of issue. This section outlines the types of issues that can be submitted. + +### Report bugs + +We aim to track and document everything related to Ansible modules for Dell PowerFlex via the Issues page. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process. + +Before submitting a new issue, make sure someone hasn't already reported the problem. Look through the [existing issues](https://github.com/dell/ansible-powerflex/issues) for similar issues. + +Report a bug by submitting a [bug report](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Fbug%2C+needs-triage&template=bug_report.md&title=%5BBUG%5D%3A). Make sure that you provide as much information as possible on how to reproduce the bug. + +When opening a Bug please include this information to help with debugging: + +1. Version of relevant software: this software, Ansible, Python, SDK, etc. +2. Details of the issue explaining the problem: what, when, where +3. The expected outcome that was not met (if any) +4. Supporting troubleshooting information. __Note: Do not provide private company information that could compromise your company's security.__ + +An Issue __must__ be created before submitting any pull request. Any pull request that is created should be linked to an Issue. + +### Feature request + +If you have an idea of how to improve this project, submit a [feature request](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Ffeature-request%2C+needs-triage&template=feature_request.md&title=%5BFEATURE%5D%3A). + +### Answering questions + +If you have a question and you can't find the answer in the documentation or issues, the next step is to submit a [question.](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Fquestion&template=ask-a-question.md&title=%5BQUESTION%5D%3A) + +We'd love your help answering questions being asked by other Ansible modules for Dell PowerFlex users. + +## Triage issues + +Triage helps ensure that issues resolve quickly by: + +* Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +* Giving a contributor the information they need before they commit to resolving an issue. +* Lowering the issue count by preventing duplicate issues. +* Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell PowerFlex community will thank you for saving them time by spending some of yours. + +Read more about the ways you can [Triage issues](https://github.com/dell/ansible-powerflex/blob/1.5.0/ISSUE_TRIAGE.md). + +## Your first contribution + +Unsure where to begin contributing? Start by browsing issues labeled `beginner friendly` or `help wanted`. + +* [Beginner-friendly](https://github.com/dell/ansible-powerflex/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22) issues are generally straightforward to complete. +* [Help wanted](https://github.com/dell/ansible-powerflex/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) issues are problems we would like the community to help us with regardless of complexity. + +When you're ready to contribute, it's time to create a pull request. + +## Branching + +* [Branching Strategy for Ansible modules for Dell PowerFlex](https://github.com/dell/ansible-powerflex/blob/1.5.0/BRANCHING.md) + +## Signing your commits + +We require that developers sign off their commits to certify that they have permission to contribute the code in a pull request. This way of certifying is commonly known as the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). We encourage all contributors to read the DCO text before signing a commit and making contributions. + +GitHub will prevent a pull request from being merged if there are any unsigned commits. + +### Signing a commit + +GPG (GNU Privacy Guard) will be used to sign commits. Follow the instructions [here](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-commits) to create a GPG key and configure your GitHub account to use that key. + +Make sure you have your user name and e-mail set. This will be required for your signed commit to be properly verified. Check this references: + +* Setting up your github user name [reference](https://help.github.com/articles/setting-your-username-in-git/) +* Setting up your e-mail address [reference](https://help.github.com/articles/setting-your-commit-email-address-in-git/) + +Once Git and your GitHub account have been properly configured, you can add the -S flag to the git commits: + +```console +$ git commit -S -m your commit message +# Creates a signed commit +``` + +### Commit message format + +Ansible modules for Dell PowerFlex uses the guidelines for commit messages outlined in [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/) + +## Pull requests + +If this is your first time contributing to an open-source project on GitHub, make sure you read about [Creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it. + +To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines: + +* Title and description matches the implementation. +* Commits within the pull request follow the formatting guidelines. +* The pull request closes one related issue. +* The pull request contains necessary tests that verify the intended behavior. +* If your pull request has conflicts, rebase your branch onto the main branch. + +If the pull request fixes a bug: + +* The pull request description must include `Fixes #`. +* To avoid regressions, the pull request should include tests that replicate the fixed bug. + +The team _squashes_ all commits into one when we accept a pull request. The title of the pull request becomes the subject line of the squashed commit message. We still encourage contributors to write informative commit messages, as they becomes a part of the Git commit body. + +We use the pull request title when we generate change logs for releases. As such, we strive to make the title as informative as possible. + +Make sure that the title for your pull request uses the same format as the subject line in the commit message. + +### Quality gates for pull requests + +GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-powerflex/blob/1.5.0/SUPPORT.md). + +#### Code sanitization + +[GitHub action](https://github.com/dell/ansible-powerflex/actions/workflows/ansible-test.yml) that analyzes source code to flag ansible sanity errors and runs Unit tests. + +## Code reviews + +All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. + +A pull request must satisfy following for it to be merged: + +* A pull request will require at least 2 maintainer approvals. +* Maintainers must perform a review to ensure the changes adhere to guidelines laid out in this document. +* If any commits are made after the PR has been approved, the PR approval will automatically be removed and the above process must happen again. + +## Code style + +Ensure the added code has the required documenation, examples and unit tests. + +### Sanity + +Run ansible-test sanity --docker default on your code to ensure sanity. Ensure the code does not have any Andersson script violations and not break any existing unit test workflows. + +### TODOs in the code + +We don't like TODOs in the code or documentation. It is really best if you sort out all issues you can see with the changes before we check the changes in. diff --git a/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md new file mode 100644 index 00000000..7cf4da17 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md @@ -0,0 +1,106 @@ + + +# Installation and execution of Ansible modules for Dell PowerFlex + +## Installation of SDK +* Install the python SDK named [PyPowerFlex](https://pypi.org/project/PyPowerFlex/). It can be installed using pip, based on appropriate python version. Execute this command: + + pip install PyPowerFlex +* Alternatively, Clone the repo "https://github.com/dell/python-powerflex" + using command: + + git clone https://github.com/dell/python-powerflex.git + * Go to the root directory of setup. + * Execute this command: + + pip install . + +## Building collections + * Use this command to build the collection from source code: + + ansible-galaxy collection build + + For more details on how to build a tar ball, please refer to: [Building the collection](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_distributing.html#building-your-collection-tarball) + +## Installing collections + +#### Online installation of collections + * Use this command to install the latest collection hosted in [galaxy portal](https://galaxy.ansible.com/dellemc/powerflex): + + ansible-galaxy collection install dellemc.powerflex -p + +#### Offline installation of collections + + * Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/powerflex) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/powerflex) and use this command to install the collection anywhere in your system: + + ansible-galaxy collection install dellemc-powerflex-1.5.0.tar.gz -p + + * Set the environment variable: + + export ANSIBLE_COLLECTIONS_PATHS=$ANSIBLE_COLLECTIONS_PATHS: + +## Using collections + + * In order to use any Ansible module, ensure that the importing of proper FQCN (Fully Qualified Collection Name) must be embedded in the playbook. + This example can be referred to: + + collections: + - dellemc.powerflex + + * In order to use installed collection in a specific task use a proper FQCN (Fully Qualified Collection Name). Refer to this example: + + tasks: + - name: Get Volume details + dellemc.powerflex.volume + + * For generating Ansible documentation for a specific module, embed the FQCN before the module name. Refer to this example: + + ansible-doc dellemc.powerflex.volume + + +## Ansible modules execution + +The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules. + +## SSL certificate validation + +* Copy the CA certificate to the "/etc/pki/ca-trust/source/anchors" path of the host by any external means. +* Set the "REQUESTS_CA_BUNDLE" environment variable to the path of the SSL certificate using the command: + + export REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/<> +* Import the SSL certificate to host using the command: + + update-ca-trust extract +* If "TLS CA certificate bundle error" occurs, then follow these steps: + + cd /etc/pki/tls/certs/ + openssl x509 -in ca-bundle.crt -text -noout + +## Results +Each module returns the updated state and details of the entity, For example, if you are using the Volume module, all calls will return the updated details of the volume. Sample result is shown in each module's documentation. + +## Ansible execution environment +Ansible can also be installed in a container environment. Ansible Builder provides the ability to create reproducible, self-contained environments as container images that can be run as Ansible execution environments. +* Install the ansible builder package using: + + pip3 install ansible-builder +* Ensure the execution-environment.yml is at the root of collection and create the execution environment using: + + ansible-builder build --tag --container-runtime docker +* After the image is built, run the container using: + + docker run -it /bin/bash +* Verify collection installation using command: + + ansible-galaxy collection list +* The playbook can be run on the container using: + + docker run --rm -v $(pwd):/runner ansible-playbook info_test.yml diff --git a/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md new file mode 100644 index 00000000..f764df38 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md @@ -0,0 +1,306 @@ + + +# Triage issues + +The main goal of issue triage is to categorize all incoming issues and make sure each issue has all basic information needed for anyone else to understand and be able to start working on it. + +> **Note:** This information is for project Maintainers, Owners, and Admins. If you are a Contributor, then you will not be able to perform most of the tasks in this topic. + +The core maintainers of this project are responsible for categorizing all incoming issues and delegating any critical or important issue to other maintainers. Triage provides an important way to contribute to an open source project. + +Triage helps ensure issues resolve quickly by: + +- Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +## 1. Find issues that need triage + +The easiest way to find issues that haven't been triaged is to search for issues with the `needs-triage` label. + +## 2. Ensure the issue contains basic information + +Make sure that the issue's author provided the standard issue information. This project utilizes GitHub issue templates to guide contributors to provide standard information that must be included for each type of template or type of issue. + +### Standard issue information that must be included + +This section describes the various issue templates and the expected content. + +#### Bug reports + +Should explain what happened, what was expected and how to reproduce it together with any additional information that may help giving a complete picture of what happened such as screenshots, output and any environment related information that's applicable and/or maybe related to the reported problem: + + - Ansible Version: [e.g. 2.14] + - Python Version [e.g. 3.11] + - Ansible modules for Dell PowerFlex Version: [e.g. 1.5.0] + - PowerFlex SDK version: [e.g. PyPowerFlex 1.6.0] + - Any other additional information... + +#### Feature requests + +Should explain what feature that the author wants to be added and why that is needed. + +#### Ask a question requests + +In general, if the issue description and title is perceived as a question no more information is needed. + +### Good practices + +To make it easier for everyone to understand and find issues they're searching for it's suggested as a general rule of thumbs to: + +- Make sure that issue titles are named to explain the subject of the issue, has a correct spelling and doesn't include irrelevant information and/or sensitive information. +- Make sure that issue descriptions doesn't include irrelevant information. +- Make sure that issues do not contain sensitive information. +- Make sure that issues have all relevant fields filled in. +- Do your best effort to change title and description or request suggested changes by adding a comment. + +> **Note:** Above rules are applicable to both new and existing issues. + +### Dealing with missing information + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. Label issue with `triage/needs-information`. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. Label issue with `triage/needs-information`. + +If the author does not respond to the requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +If you receive a notification with additional information provided but you are not anymore on issue triage and you feel you do not have time to handle it, you should delegate it to the current person on issue triage. + +## 3. Categorizing an issue + +### Duplicate issues + +Make sure it's not a duplicate by searching existing issues using related terms from the issue title and description. If you think you know there is an existing issue, but can't find it, please reach out to one of the maintainers and ask for help. If you identify that the issue is a duplicate of an existing issue: + +1. Add a comment `duplicate of #` +2. Add the `triage/duplicate` label + +### Bug reports + +If it's not perfectly clear that it's an actual bug, quickly try to reproduce it. + +**It's a bug/it can be reproduced:** + +1. Add a comment describing detailed steps for how to reproduce it, if applicable. +2. If you know that maintainers wont be able to put any resources into it for some time then label the issue with `help wanted` and optionally `beginner friendly` together with pointers on which code to update to fix the bug. This should signal to the community that we would appreciate any help we can get to resolve this. +3. Move on to [prioritizing the issue](#4-prioritization-of-issues). + +**It can't be reproduced:** + +1. Either [ask for more information](#2-ensure-the-issue-contains-basic-information) needed to investigate it more thoroughly. Provide details in a comment. +2. Either [delegate further investigations](#investigation-of-issues) to someone else. Provide details in a comment. + +**It works as intended/by design:** + +1. Kindly and politely add a comment explaining briefly why we think it works as intended and close the issue. +2. Label the issue `triage/works-as-intended`. +3. Remove the `needs-triage` label. + +**It does not work as intended/by design:** + +### Feature requests + +1. If the feature request does not align with the product vision, add a comment indicating so, remove the `needs-triage` label and close the issue +2. Otherwise, move on to [prioritizing the issue](#4-prioritization-of-issues). Assign the appropriate priority label to the issue, add the appropriate comments to the issue, and remove the `needs-triage` label. + +## 4. Prioritization of issues + +In general bugs and feature request issues should be labeled with a priority. + +Adding priority levels can be difficult. Ensure you have the knowledge, context, and the experience before prioritizing any issue. + +If you have any uncertainty as to which priority level to assign, please ask the maintainers for help. + +| Label | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `priority/critical` | Highest priority. Must be actively worked on as someone's top priority immediately. | +| `priority/high` | Must be worked on soon, ideally in time for the next release. | +| `priority/low` | Lowest priority. Possibly useful, but not yet enough interest in it. | + +### Critical priority + +1. If an issue has been categorized and any of this criteria apply, the issue should be labeled as critical and must be actively worked on as someone's top priority immediately. + + - Results in any data loss + - Critical security or performance issues + - Problem that makes a feature unusable + - Multiple users experience a severe problem affecting their business, users etc. + +2. Label the issue `priority/critical`. +3. Escalate the problem to the maintainers. +4. Assign or ask a maintainer for help assigning someone to make this issue their top priority immediately. +5. Add the issue to the next upcoming release milestone. + +### High priority + +1. Label the issue `priority/high`. +2. Add the issue to the next upcoming release milestone. +3. Prioritize it or assign someone to work on it now or very soon. +4. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +### Low priority + +1. If the issue is deemed possibly useful but a low priority label the issue `priority/low`. +2. The amount of interest in the issue will determine if the priority elevated. +3. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +## 5. Requesting help from the community + +Depending on the issue and/or priority, it's always a good idea to consider signalling to the community that help from community is appreciated and needed in case an issue is not prioritized to be worked on by maintainers. Use your best judgement. In general, requesting help from the community means that a contribution has a good chance of getting accepted and merged. + +In many cases the issue author or community as a whole is more suitable to contribute changes since they're experts in their domain. It's also quite common that someone has tried to get something to work using the documentation without success and made an effort to get it to work and/or reached out to the community to get the missing information. + +1. Kindly and politely add a comment to alert update subscribers. + - Explain the issue and need for resolution. Be sure and detail that the issue has not been prioritized and that the issue has not been scheduled for work by the maintainers. + - If possible or applicable, add pointers and references to the code/files that need to be revised. Provide any idea as to the solution. This will help the maintainers get started on resolving the issue. +2. Label the issue with `help wanted`. +3. If applicable, label the issue with `beginner friendly` to denote that the issue is suitable for a beginner to work on. + +## Investigation of issues + +When an issue has all basic information provided, but the reported problem cannot be reproduced at a first glance, the issue is labeled `triage/needs-information`. Depending on the perceived severity and/or number of [upvotes](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments), the investigation will either be delegated to another maintainer for further investigation or put on hold until someone else (maintainer or contributor) picks it up and eventually starts investigating it. + +Even if you don't have the time or knowledge to investigate an issue we highly recommend that you [upvote](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments) the issue if you happen to have the same problem. If you have further details that may help investigating the issue please provide as much information as possible. + +## External pull requests + +Part of issue triage should also be triaging of external PRs. Main goal should be to make sure PRs from external contributors have an owner/reviewer and are not forgotten. + +1. Check new external PRs which do not have a reviewer. +1. Check if there is a link to an existing issue. +1. If not and you know which issue it is solving, add the link yourself, otherwise ask the author to link the issue or create one. +1. Assign a reviewer based on who was handling the linked issue or what code or feature does the PR touches (look at who was the last to make changes there if all else fails). + +## GitHub issue management workflow + +This section describes the triage workflow for new GitGHub issues that get created. + +### GitHub Issue: Bug + +This workflow starts off with a GitHub issue of type bug being created. + +1. Collaborator or maintainer creates a GitHub bug using the appropriate GitHub issue template +2. By default a bug will be created with the `type/bug` and `needs-triage` labels + +The following flow chart outlines the triage process for bugs. + + +``` + +--------------------------+ + | New bug issue opened/more| + | information added | + +-------------|------------+ + | + | + +----------------------------------+ NO +--------------|-------------+ + | label: triage/needs-information --------- All required information | + | | | contained in issue? | + +-----------------------------|----+ +--------------|-------------+ + | | YES + | | + +--------------------------+ | +---------------------+ YES +---------------------------------------+ + |label: | | | Dupicate Issue? ------- Comment `Duplicate of #` + |triage/needs-investigation| | NO | | | Remove needs-triage label | + +------|-------------------+ | +----------|----------+ | label: triage/duplicate | + | | | NO +-----------------|---------------------+ + YES | | | | + | +---------------|----+ NO +------------|------------+ | + | |Needs investigation?|---------- Can it be reproduced? | | + |------- | +------------|------------+ | + +--------------------+ | YES | + | +----------|----------+ + +-------------------------+ +------------|------------+ | Close Issue | + | Add release-found label |------------------ Works as intended? | | | + | label: release-found/* | NO | | +----------|----------+ + +------------|------------+ +------------|------------+ | + | | | + | | YES | + +-----------------------------+ +----------------|----------------+ | + | Add area label | | Add comment | | + | label: area/* | | Remove needs-triage label ------------------| + +------------|----------------+ | label: triage/works-as-intended | + | +---------------------------------+ + | + +------------|-------------+ +----------+ + | Add priority label | | Done ---------------------------------------- + | label: priority/* | +----|-----+ | + +------------|-------------+ |NO | + | | +------------------|------------------+ + +------------|-------------+ +----|----------------+ YES | Add details to issue | + | ------------ Signal Community? ---------- label: help wanted | + |Remove needs-triage label | | | | label: beginner friendly (optional)| + +--------------------------+ +---------------------+ +-------------------------------------+ + +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +### GitHub issue: feature request + +This workflow starts off with a GitHub issue of type feature request being created. + +1. Collaborator or maintainer creates a GitHub feature request using the appropriate GitHub issue template +2. By default a feature request will be created with the `type/feature-request` and `needs-triage` labels + +This flow chart outlines the triage process for feature requests. + + +``` + +---------------------------------+ + |New feature request issue opened/| + |more information added | + +----------------|----------------+ + | + | + +---------------------------------+ NO +-------------|------------+ + | label: triage/needs-information ---------- All required information | + | | | contained in issue? | + +---------------------------------+ +-------------|------------+ + | + | + +---------------------------------------+ | + |Comment `Duplicate of #` | YES +----------|----------+ + |Remove needs-triage label ------- Duplicate issue? | + |label: triage/duplicate | | | + +-----|---------------------------------+ +-----------|---------+ + | |NO + | +-------------------------+ NO +-----------------------------+ + | |Add comment |-------- Does feature request align | + | |Remove needs-triage label| | with product vision? | + | +------|------------------+ +--------------|--------------+ + | | | YES + | | +-----------------|----------------+ + | | |Change feature-request to feature | + | | |Remove label: type/feature-request| + | | |Add label: type/feature | + | | +-----------------|----------------+ + | | | + | | +--------------|--------------+ + | | | Add area label | + | | | label: area/* | + | | +--------------|--------------+ + | | | + +-|---------|---+ +--------+ +--------------|--------------+ + | Close issue | | Done --------- Add priority label | + | | | | | label: priority/* | + +---------------+ +--------+ +-----------------------------+ +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +In some cases you may receive a request you do not wish to accept. Perhaps the request doesn't align with the project scope or vision. It is important to tactfully handle contributions that don't meet the project standards. + +1. Acknowledge the person behind the contribution and thank them for their interest and contribution +2. Explain why it didn't fit into the scope of the project or vision +3. Don't leave an unwanted contributions open. Immediately close the contribution you do not wish to accept diff --git a/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md b/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md new file mode 100644 index 00000000..24ab255d --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/MAINTAINERS.md @@ -0,0 +1,19 @@ + + +# Maintainers + +* Ananthu Kuttattu (kuttattz) +* Bhavneet Sharma (Bhavneet-Sharma) +* Jennifer John (Jennifer-John) +* Meenakshi Dembi (meenakshidembi691) +* Pavan Mudunuri (Pavan-Mudunuri) +* Previnkumar G (Previnkumar-G) +* Trisha Datta (trisha-dell) diff --git a/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md new file mode 100644 index 00000000..b99e5b22 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md @@ -0,0 +1,38 @@ + + +# Maintainer guidelines + +As a Maintainer of this project you have the responsibility of keeping true to the vision of the project with a high-degree quality. Being part of this group is a privilege that requires dedication and time to attend to the daily activities that are associated with the maintenance of this project. + +## Becoming a maintainer + +Most Maintainers started as Contributors that have demonstrated their commitment to the success of the project. Contributors wishing to become Maintainers, must demonstrate commitment to the success of the project by contributing code, reviewing others' work, and triaging issues on a regular basis for at least three months. + +The contributions alone don't make you a Maintainer. You need to earn the trust of the current Maintainers and other project Contributors, that your decisions and actions are in the best interest of the project. + +Periodically, the existing Maintainers curate a list of Contributors who have shown regular activity on the project over the prior months. It is from this list that Maintainer candidates are selected. + +After a candidate is selected, the existing Maintainers discuss the candidate over the next 5 business days, provide feedback, and vote. At least 75% of the current Maintainers must vote in the affirmative for a candidate to be moved to the role of Maintainer. + +If a candidate is approved, a Maintainer contacts the candidate to invite them to open a pull request that adds the contributor to the MAINTAINERS file. The candidate becomes a Maintainer once the pull request is merged. + +## Maintainer policies + +* Lead by example +* Follow the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.5.0/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-powerflex/blob/1.5.0/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-powerflex/blob/1.5.0/COMMITTER_GUIDE.md) guides +* Promote a friendly and collaborative environment within our community +* Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests +* Criticize code, not people. Ideally, tell the contributor a better way to do what they need. +* Clearly mark optional suggestions as such. Best practice, start your comment with *At your option: …* + +## Project decision making + +All project decisions should contribute to successfully executing on the project roadmap. Project milestones are established for each release. \ No newline at end of file diff --git a/ansible_collections/dellemc/powerflex/docs/Product Guide.md b/ansible_collections/dellemc/powerflex/docs/Product Guide.md new file mode 100644 index 00000000..b255917e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/Product Guide.md @@ -0,0 +1,5437 @@ +# Ansible Modules for Dell Technologies PowerFlex +## Product Guide 1.5.0 +© 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell, and other trademarks are trademarks of Dell Inc. or its subsidiaries. Other trademarks may be trademarks of their respective owners. + +-------------- +## Contents +* [Device Module](#device-module) + * [Synopsis](#synopsis) + * [Parameters](#parameters) + * [Notes](#notes) + * [Examples](#examples) + * [Return Values](#return-values) + * [Authors](#authors) +* [Info Module](#info-module) + * [Synopsis](#synopsis-1) + * [Parameters](#parameters-1) + * [Notes](#notes-1) + * [Examples](#examples-1) + * [Return Values](#return-values-1) + * [Authors](#authors-1) +* [MDM Cluster Module](#mdm-cluster-module) + * [Synopsis](#synopsis-2) + * [Parameters](#parameters-2) + * [Notes](#notes-2) + * [Examples](#examples-2) + * [Return Values](#return-values-2) + * [Authors](#authors-2) +* [Protection Domain Module](#protection-domain-module) + * [Synopsis](#synopsis-3) + * [Parameters](#parameters-3) + * [Notes](#notes-3) + * [Examples](#examples-3) + * [Return Values](#return-values-3) + * [Authors](#authors-3) +* [Replication Consistency Group Module](#replication-consistency-group-module) + * [Synopsis](#synopsis-4) + * [Parameters](#parameters-4) + * [Notes](#notes-4) + * [Examples](#examples-4) + * [Return Values](#return-values-4) + * [Authors](#authors-4) +* [SDC Module](#sdc-module) + * [Synopsis](#synopsis-5) + * [Parameters](#parameters-5) + * [Notes](#notes-5) + * [Examples](#examples-5) + * [Return Values](#return-values-5) + * [Authors](#authors-5) +* [SDS Module](#sds-module) + * [Synopsis](#synopsis-6) + * [Parameters](#parameters-6) + * [Notes](#notes-6) + * [Examples](#examples-6) + * [Return Values](#return-values-6) + * [Authors](#authors-6) +* [Storage Pool Module](#storage-pool-module) + * [Synopsis](#synopsis-7) + * [Parameters](#parameters-7) + * [Notes](#notes-7) + * [Examples](#examples-7) + * [Return Values](#return-values-7) + * [Authors](#authors-7) +* [Volume Module](#volume-module) + * [Synopsis](#synopsis-8) + * [Parameters](#parameters-8) + * [Notes](#notes-8) + * [Examples](#examples-8) + * [Return Values](#return-values-8) + * [Authors](#authors-8) + +-------------- + +# Device Module + +Manage device on Dell PowerFlex + +### Synopsis + Managing device on PowerFlex storage system includes adding new device, getting details of device, and removing a device. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
current_pathname str
Full path of the device to be added.
Required while adding a device.
device_name str
Device name.
Mutually exclusive with device_id.
device_id str
Device ID.
Mutually exclusive with device_name.
sds_name str
The name of the SDS.
Required while adding a device.
Mutually exclusive with sds_id.
sds_id str
The ID of the SDS.
Required while adding a device.
Mutually exclusive with sds_name.
storage_pool_name str
Storage Pool name.
Used while adding a storage device.
Mutually exclusive with storage_pool_id, acceleration_pool_id and acceleration_pool_name.
storage_pool_id str
Storage Pool ID.
Used while adding a storage device.
Media type supported are SSD and HDD.
Mutually exclusive with storage_pool_name, acceleration_pool_id and acceleration_pool_name.
acceleration_pool_name str
Acceleration Pool Name.
Used while adding an acceleration device.
Media type supported are SSD and NVDIMM.
Mutually exclusive with storage_pool_id, storage_pool_name and acceleration_pool_name.
acceleration_pool_id str
Acceleration Pool ID.
Used while adding an acceleration device.
Media type supported are SSD and NVDIMM.
Mutually exclusive with acceleration_pool_name, storage_pool_name and storage_pool_id.
protection_domain_name str
Protection domain name.
Used while identifying a storage pool along with storage_pool_name.
Mutually exclusive with protection_domain_id.
protection_domain_id str
Protection domain ID.
Used while identifying a storage pool along with storage_pool_name.
Mutually exclusive with protection_domain_name.
external_acceleration_type str
  • Invalid
  • None
  • Read
  • Write
  • ReadAndWrite

Device external acceleration types.
Used while adding a device.
media_type str
  • HDD
  • SSD
  • NVDIMM

Device media types.
Required while adding a device.
state str True
  • present
  • absent

State of the device.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* The value for device_id is generated only after successful addition of the device. +* To uniquely identify a device, either device_id can be passed or one of current_pathname or device_name must be passed with sds_id or sds_name. +* It is recommended to install Rfcache driver for SSD device on SDS in order to add it to an acceleration pool. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Add a device + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + media_type: "HDD" + device_name: "device2" + storage_pool_name: "pool1" + protection_domain_name: "domain1" + external_acceleration_type: "ReadAndWrite" + state: "present" +- name: Get device details using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "d7fe088900000000" + state: "present" +- name: Get device details using (current_pathname, sds_name) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node0" + state: "present" +- name: Get device details using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_id: "5717d71800000000" + state: "present" +- name: Remove a device using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "76eb7e2f00010000" + state: "absent" +- name: Remove a device using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
device_details dict When device exists Details of the device.
  accelerationPoolId str success Acceleration pool ID.
  accelerationPoolName str success Acceleration pool name.
  accelerationProps str success Indicates acceleration props.
  aggregatedState str success Indicates aggregated state.
  ataSecurityActive bool success Indicates ATA security active state.
  autoDetectMediaType str success Indicates auto detection of media type.
  cacheLookAheadActive bool success Indicates cache look ahead active state.
  capacity int success Device capacity.
  capacityLimitInKb int success Device capacity limit in KB.
  deviceCurrentPathName str success Device current path name.
  deviceOriginalPathName str success Device original path name.
  deviceState str success Indicates device state.
  deviceType str success Indicates device type.
  errorState str success Indicates error state.
  externalAccelerationType str success Indicates external acceleration type.
  fglNvdimmMetadataAmortizationX100 int success Indicates FGL NVDIMM meta data amortization value.
  fglNvdimmWriteCacheSize int success Indicates FGL NVDIMM write cache size.
  firmwareVersion str success Indicates firmware version.
  id str success Device ID.
  ledSetting str success Indicates LED setting.
  links list success Device links.
   href str success Device instance URL.
   rel str success Relationship of device with different entities.
  logicalSectorSizeInBytes int success Logical sector size in bytes.
  longSuccessfulIos list success Indicates long successful IOs.
  maxCapacityInKb int success Maximum device capacity limit in KB.
  mediaFailing bool success Indicates media failing.
  mediaType str success Indicates media type.
  modelName str success Indicates model name.
  name str success Device name.
  persistentChecksumState str success Indicates persistent checksum state.
  physicalSectorSizeInBytes int success Physical sector size in bytes.
  protectionDomainId str success Protection domain ID.
  protectionDomainName str success Protection domain name.
  raidControllerSerialNumber str success RAID controller serial number.
  rfcacheErrorDeviceDoesNotExist bool success Indicates RF cache error device does not exist.
  rfcacheProps str success RF cache props.
  sdsId str success SDS ID.
  sdsName str success SDS name.
  serialNumber str success Indicates Serial number.
  spSdsId str success Indicates SPs SDS ID.
  ssdEndOfLifeState str success Indicates SSD end of life state.
  storagePoolId str success Storage Pool ID.
  storagePoolName str success Storage Pool name.
  storageProps list success Storage props.
  temperatureState str success Indicates temperature state.
  vendorName str success Indicates vendor name.
  writeCacheActive bool success Indicates write cache active.
+ +### Authors +* Rajshree Khare (@khareRajshree) + +-------------------------------- +# Info Module + +Gathering information about Dell PowerFlex + +### Synopsis + Gathering information about Dell PowerFlex storage system includes getting the api details, list of volumes, SDSs, SDCs, storage pools, protection domains, snapshot policies, and devices. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
gather_subset list
elements: str
  • vol
  • storage_pool
  • protection_domain
  • sdc
  • sds
  • snapshot_policy
  • device
  • rcg

List of string variables to specify the Powerflex storage system entities for which information is required.
Volumes - vol.
Storage pools - storage_pool.
Protection domains - protection_domain.
SDCs - sdc.
SDSs - sds.
Snapshot policies - snapshot_policy.
Devices - device.
Replication consistency groups - rcg.
filters list
elements: dict

List of filters to support filtered output for storage entities.
Each filter is a list of filter_key, filter_operator, filter_value.
Supports passing of multiple filters.
  filter_key str True
Name identifier of the filter.
  filter_operator str True
  • equal

Operation to be performed on filter key.
  filter_value str True
Value of the filter key.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* The check_mode is supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get detailed list of PowerFlex entities + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + - storage_pool + - protection_domain + - sdc + - sds + - snapshot_policy + - device + - rcg + +- name: Get a subset list of PowerFlex volumes + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + filters: + - filter_key: "name" + filter_operator: "equal" + filter_value: "ansible_test" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
API_Version str always API version of PowerFlex API Gateway.
Array_Details dict always System entities of PowerFlex storage array.
  addressSpaceUsage str success Address space usage.
  authenticationMethod str success Authentication method.
  capacityAlertCriticalThresholdPercent int success Capacity alert critical threshold percentage.
  capacityAlertHighThresholdPercent int success Capacity alert high threshold percentage.
  capacityTimeLeftInDays str success Capacity time left in days.
  cliPasswordAllowed bool success CLI password allowed.
  daysInstalled int success Days installed.
  defragmentationEnabled bool success Defragmentation enabled.
  enterpriseFeaturesEnabled bool success Enterprise features enabled.
  id str success The ID of the system.
  installId str success installation Id.
  isInitialLicense bool success Initial license.
  lastUpgradeTime int success Last upgrade time.
  managementClientSecureCommunicationEnabled bool success Management client secure communication enabled.
  maxCapacityInGb dict success Maximum capacity in GB.
  mdmCluster dict success MDM cluster details.
  mdmExternalPort int success MDM external port.
  mdmManagementPort int success MDM management port.
  mdmSecurityPolicy str success MDM security policy.
  showGuid bool success Show guid.
  swid str success SWID.
  systemVersionName str success System version and name.
  tlsVersion str success TLS version.
  upgradeState str success Upgrade state.
Devices list always Details of devices.
  id str success device id.
  name str success device name.
Protection_Domains list always Details of all protection domains.
  id str success protection domain id.
  name str success protection domain name.
Replication_Consistency_Groups list always Details of rcgs.
  abstractState str success The abstract state of the replication consistency group.
  activeLocal bool success Whether the local replication consistency group is active.
  activeRemote bool success Whether the remote replication consistency group is active
  currConsistMode str success The current consistency mode of the replication consistency group.
  disasterRecoveryState str success The state of disaster recovery of the local replication consistency group.
  error int success The error code of the replication consistency group.
  failoverState str success The state of failover of the replication consistency group.
  failoverType str success The type of failover of the replication consistency group.
  freezeState str success The freeze state of the replication consistency group.
  id str success The ID of the replication consistency group.
  inactiveReason int success The reason for the inactivity of the replication consistency group.
  lastSnapCreationRc int success The return code of the last snapshot of the replication consistency group.
  lastSnapGroupId str success ID of the last snapshot of the replication consistency group.
  lifetimeState str success The Lifetime state of the replication consistency group.
  localActivityState str success The state of activity of the local replication consistency group.
  name str success The name of the replication consistency group.
  pauseMode str success The Lifetime state of the replication consistency group.
  peerMdmId str success The ID of the peer MDM of the replication consistency group.
  protectionDomainId str success The Protection Domain ID of the replication consistency group.
  remoteActivityState str success The state of activity of the remote replication consistency group..
  remoteDisasterRecoveryState str success The state of disaster recovery of the remote replication consistency group.
  remoteId str success The ID of the remote replication consistency group.
  remoteMdmId str success The ID of the remote MDM of the replication consistency group.
  remoteProtectionDomainId str success The ID of the remote Protection Domain.
  remoteProtectionDomainName str success The Name of the remote Protection Domain.
  replicationDirection str success The direction of the replication of the replication consistency group.
  rpoInSeconds int success The RPO value of the replication consistency group in seconds.
  snapCreationInProgress bool success Whether the process of snapshot creation of the replication consistency group is in progress or not.
  targetVolumeAccessMode str success The access mode of the target volume of the replication consistency group.
  type str success The type of the replication consistency group.
SDCs list always Details of storage data clients.
  id str success storage data client id.
  name str success storage data client name.
SDSs list always Details of storage data servers.
  id str success storage data server id.
  name str success storage data server name.
Snapshot_Policies list always Details of snapshot policies.
  id str success snapshot policy id.
  name str success snapshot policy name.
Storage_Pools list always Details of storage pools.
  id str success ID of the storage pool under protection domain.
  mediaType str success Type of devices in the storage pool.
  name str success Name of the storage pool under protection domain.
  protectionDomainId str success ID of the protection domain in which pool resides.
  protectionDomainName str success Name of the protection domain in which pool resides.
  statistics dict success Statistics details of the storage pool.
   capacityInUseInKb str success Total capacity of the storage pool.
   deviceIds list success Device Ids of the storage pool.
   unusedCapacityInKb str success Unused capacity of the storage pool.
  useRfcache bool success Enable/Disable RFcache on a specific storage pool.
  useRmcache bool success Enable/Disable RMcache on a specific storage pool.
Volumes list always Details of volumes.
  id str success The ID of the volume.
  mappedSdcInfo dict success The details of the mapped SDC.
   accessMode str success mapping access mode for the specified volume.
   limitBwInMbps int success Bandwidth limit for the SDC.
   limitIops int success IOPS limit for the SDC.
   sdcId str success ID of the SDC.
   sdcIp str success IP of the SDC.
   sdcName str success Name of the SDC.
  name str success Name of the volume.
  protectionDomainId str success ID of the protection domain in which volume resides.
  protectionDomainName str success Name of the protection domain in which volume resides.
  sizeInGb int success Size of the volume in Gb.
  sizeInKb int success Size of the volume in Kb.
  snapshotPolicyId str success ID of the snapshot policy associated with volume.
  snapshotPolicyName str success Name of the snapshot policy associated with volume.
  snapshotsList str success List of snapshots associated with the volume.
  statistics dict success Statistics details of the storage pool.
   numOfChildVolumes int success Number of child volumes.
   numOfMappedSdcs int success Number of mapped Sdcs of the volume.
  storagePoolId str success ID of the storage pool in which volume resides.
  storagePoolName str success Name of the storage pool in which volume resides.
changed bool always Whether or not the resource has changed.
+ +### Authors +* Arindam Datta (@dattaarindam) + +-------------------------------- +# MDM Cluster Module + +Manage MDM cluster on Dell PowerFlex + +### Synopsis + Managing MDM cluster and MDMs on PowerFlex storage system includes adding/removing standby MDM, modify MDM name and virtual interface. + It also includes getting details of MDM cluster, modify MDM cluster ownership, cluster mode, and performance profile. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
mdm_name str
The name of the MDM. It is unique across the PowerFlex array.
Mutually exclusive with mdm_id.
If mdm_name passed in add standby operation, then same name will be assigned to the new standby mdm.
mdm_id str
The ID of the MDM.
Mutually exclusive with mdm_name.
mdm_new_name str
To rename the MDM.
standby_mdm dict
Specifies add standby MDM parameters.
  mdm_ips list
elements: str
True
List of MDM IPs that will be assigned to new MDM. It can contain IPv4 addresses.
  role str True
  • Manager
  • TieBreaker

Role of new MDM.
  management_ips list
elements: str

List of management IPs to manage MDM. It can contain IPv4 addresses.
  port int
Specifies the port of new MDM.
  allow_multiple_ips bool
Allow the added node to have different number of IPs from the primary node.
  virtual_interfaces list
elements: str

List of NIC interfaces that will be used for virtual IP addresses.
is_primary bool
Set is_primary as True to change MDM cluster ownership from the current master MDM to different MDM.
Set is_primary as False, will return MDM cluster details.
New owner MDM must be an MDM with a manager role.
cluster_mode str
  • OneNode
  • ThreeNodes
  • FiveNodes

Mode of the cluster.
mdm list
elements: dict

Specifies parameters to add/remove MDMs to/from the MDM cluster.
  mdm_id str
ID of MDM that will be added/removed to/from the cluster.
  mdm_name str
Name of MDM that will be added/removed to/from the cluster.
  mdm_type str True
  • Secondary
  • TieBreaker

Type of the MDM.
Either mdm_id or mdm_name must be passed with mdm_type.
mdm_state str
  • present-in-cluster
  • absent-in-cluster

Mapping state of MDM.
virtual_ip_interfaces list
elements: str

List of interfaces to be used for virtual IPs.
The order of interfaces must be matched with virtual IPs assigned to the cluster.
Interfaces of the primary and secondary type MDMs are allowed to modify.
The virtual_ip_interfaces is mutually exclusive with clear_interfaces.
clear_interfaces bool
Clear all virtual IP interfaces.
The clear_interfaces is mutually exclusive with virtual_ip_interfaces.
performance_profile str
  • Compact
  • HighPerformance

Apply performance profile to cluster MDMs.
state str True
  • present
  • absent

State of the MDM cluster.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* Parameters mdm_name or mdm_id are mandatory for rename and modify virtual IP interfaces. +* Parameters mdm_name or mdm_id are not required while modifying performance profile. +* For change MDM cluster ownership operation, only changed as True will be returned and for idempotency case MDM cluster details will be returned. +* Reinstall all SDC after changing ownership to some newly added MDM. +* To add manager standby MDM, MDM package must be installed with manager role. +* The check_mode is supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Add a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + standby_mdm: + mdm_ips: + - "10.x.x.x" + role: "TieBreaker" + management_ips: + - "10.x.y.z" + state: "present" + +- name: Remove a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + state: "absent" + +- name: Switch cluster mode from 3 node to 5 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "FiveNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "present-in-cluster" + state: "present" + +- name: Switch cluster mode from 5 node to 3 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "ThreeNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "absent-in-cluster" + state: "present" + +- name: Get the details of the MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + state: "present" + +- name: Change ownership of MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_2" + is_primary: True + state: "present" + +- name: Modify performance profile + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + performance_profile: "HighPerformance" + state: "present" + +- name: Rename the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + mdm_new_name: "new_mdm_1" + state: "present" + +- name: Modify virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + virtual_ip_interface: + - "ens224" + state: "present" + +- name: Clear virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + clear_interfaces: True + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
mdm_cluster_details dict When MDM cluster exists Details of the MDM cluster.
  clusterMode str success Mode of the MDM cluster.
  clusterState str success State of the MDM cluster.
  goodNodesNum int success Number of Nodes in MDM cluster.
  goodReplicasNum int success Number of nodes for Replication.
  id str success The ID of the MDM cluster.
  master dict success The details of the master MDM.
   id str success ID of the MDM.
   ips list success List of IPs for master MDM.
   managementIPs list success List of management IPs for master MDM.
   name str success Name of the MDM.
   opensslVersion str success OpenSSL version.
   port str success Port of the MDM.
   role str success Role of MDM.
   status str success Status of MDM.
   versionInfo str success Version of MDM.
   virtualInterfaces list success List of virtual interfaces
  name str success Name of MDM cluster.
  slaves list success The list of the secondary MDMs.
   id str success ID of the MDM.
   ips list success List of IPs for secondary MDM.
   managementIPs list success List of management IPs for secondary MDM.
   name str success Name of the MDM.
   opensslVersion str success OpenSSL version.
   port str success Port of the MDM.
   role str success Role of MDM.
   status str success Status of MDM.
   versionInfo str success Version of MDM.
   virtualInterfaces list success List of virtual interfaces
  standbyMDMs list success The list of the standby MDMs.
   id str success ID of the MDM.
   ips list success List of IPs for MDM.
   managementIPs list success List of management IPs for MDM.
   name str success Name of the MDM.
   opensslVersion str success OpenSSL version.
   port str success Port of the MDM.
   role str success Role of MDM.
   status str success Status of MDM.
   versionInfo str success Version of MDM.
   virtualInterfaces list success List of virtual interfaces.
  tieBreakers list success The list of the TieBreaker MDMs.
   id str success ID of the MDM.
   ips list success List of IPs for tie-breaker MDM.
   managementIPs list success List of management IPs for tie-breaker MDM.
   name str success Name of the MDM.
   opensslVersion str success OpenSSL version.
   port str success Port of the MDM.
   role str success Role of MDM.
   status str success Status of MDM.
   versionInfo str success Version of MDM.
  virtualIps list success List of virtual IPs.
+ +### Authors +* Bhavneet Sharma (@sharmb5) + +-------------------------------- +# Protection Domain Module + +Manage Protection Domain on Dell PowerFlex + +### Synopsis + Managing Protection Domain on PowerFlex storage system includes creating, modifying attributes, deleting and getting details of Protection Domain. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
protection_domain_name str
The name of the protection domain.
Mandatory for create operation.
It is unique across the PowerFlex array.
Mutually exclusive with protection_domain_id.
protection_domain_id str
The ID of the protection domain.
Except for create operation, all other operations can be performed using protection_domain_id.
Mutually exclusive with protection_domain_name.
protection_domain_new_name str
Used to rename the protection domain.
is_active bool
Used to activate or deactivate the protection domain.
network_limits dict
Network bandwidth limit used by all SDS in protection domain.
  rebuild_limit int
Limit the network bandwidth for rebuild.
  rebalance_limit int
Limit the network bandwidth for rebalance.
  vtree_migration_limit int
Limit the network bandwidth for vtree migration.
  overall_limit int
Limit the overall network bandwidth.
  bandwidth_unit str KBps
  • KBps
  • MBps
  • GBps

Unit for network bandwidth limits.
rf_cache_limits dict
Used to set the RFcache parameters of the protection domain.
  is_enabled bool
Used to enable or disable RFcache in the protection domain.
  page_size int
Used to set the cache page size in KB.
  max_io_limit int
Used to set cache maximum I/O limit in KB.
  pass_through_mode str
  • None
  • Read
  • Write
  • ReadAndWrite
  • WriteMiss

Used to set the cache mode.
state str True
  • present
  • absent

State of the protection domain.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* The protection domain can only be deleted if all its related objects have been dissociated from the protection domain. +* If the protection domain set to inactive, then no operation can be performed on protection domain. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Create protection domain + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Create protection domain with all parameters + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + is_active: true + network_limits: + rebuild_limit: 10 + rebalance_limit: 17 + vtree_migration_limit: 14 + overall_limit: 20 + bandwidth_unit: "MBps" + rf_cache_limits: + is_enabled: true + page_size: 16 + max_io_limit: 128 + pass_through_mode: "Read" + state: "present" + +- name: Get protection domain details using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Get protection domain details using ID + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_id: "5718253c00000004" + state: "present" + +- name: Modify protection domain attributes + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + protection_domain_new_name: "domain1_new" + network_limits: + rebuild_limit: 14 + rebalance_limit: 20 + overall_limit: 25 + bandwidth_unit: "MBps" + rf_cache_limits: + page_size: 64 + pass_through_mode: "WriteMiss" + state: "present" + +- name: Delete protection domain using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1_new" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
protection_domain_details dict When protection domain exists Details of the protection domain.
  fglDefaultMetadataCacheSize int success FGL metadata cache size.
  fglDefaultNumConcurrentWrites str success FGL concurrent writes.
  fglMetadataCacheEnabled bool success Whether FGL cache enabled.
  id str success Protection domain ID.
  links list success Protection domain links.
   href str success Protection domain instance URL.
   rel str success Protection domain's relationship with different entities.
  mdmSdsNetworkDisconnectionsCounterParameters dict success MDM's SDS counter parameter.
   longWindow int success Long window for Counter Parameters.
   mediumWindow int success Medium window for Counter Parameters.
   shortWindow int success Short window for Counter Parameters.
  name str success Name of the protection domain.
  overallIoNetworkThrottlingEnabled bool success Whether overall network throttling enabled.
  overallIoNetworkThrottlingInKbps int success Overall network throttling in KBps.
  protectedMaintenanceModeNetworkThrottlingEnabled bool success Whether protected maintenance mode network throttling enabled.
  protectedMaintenanceModeNetworkThrottlingInKbps int success Protected maintenance mode network throttling in KBps.
  protectionDomainState int success State of protection domain.
  rebalanceNetworkThrottlingEnabled int success Whether rebalance network throttling enabled.
  rebalanceNetworkThrottlingInKbps int success Rebalance network throttling in KBps.
  rebuildNetworkThrottlingEnabled int success Whether rebuild network throttling enabled.
  rebuildNetworkThrottlingInKbps int success Rebuild network throttling in KBps.
  rfcacheAccpId str success Id of RF cache acceleration pool.
  rfcacheEnabled bool success Whether RF cache is enabled or not.
  rfcacheMaxIoSizeKb int success RF cache maximum I/O size in KB.
  rfcacheOpertionalMode str success RF cache operational mode.
  rfcachePageSizeKb bool success RF cache page size in KB.
  sdrSdsConnectivityInfo dict success Connectivity info of SDR and SDS.
   clientServerConnStatus str success Connectivity status of client and server.
   disconnectedClientId str success Disconnected client ID.
   disconnectedClientName str success Disconnected client name.
   disconnectedServerId str success Disconnected server ID.
   disconnectedServerIp str success Disconnected server IP.
   disconnectedServerName str success Disconnected server name.
  sdsSdsNetworkDisconnectionsCounterParameters dict success Counter parameter for SDS-SDS network.
   longWindow int success Long window for Counter Parameters.
   mediumWindow int success Medium window for Counter Parameters.
   shortWindow int success Short window for Counter Parameters.
  storagePool list success List of storage pools.
  systemId str success ID of system.
  vtreeMigrationNetworkThrottlingEnabled bool success Whether V-Tree migration network throttling enabled.
  vtreeMigrationNetworkThrottlingInKbps int success V-Tree migration network throttling in KBps.
+ +### Authors +* Bhavneet Sharma (@sharmb5) + +-------------------------------- +# Replication Consistency Group Module + +Manage replication consistency groups on Dell PowerFlex + +### Synopsis + Managing replication consistency groups on PowerFlex storage system includes getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze, activate, inactivate and deleting a replication consistency group. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
rcg_name str
The name of the replication consistency group.
It is unique across the PowerFlex array.
Mutually exclusive with rcg_id.
rcg_id str
The ID of the replication consistency group.
Mutually exclusive with rcg_name.
create_snapshot bool
Whether to create the snapshot of the replication consistency group.
rpo int
Desired RPO in seconds.
protection_domain_id str
Protection domain id.
Mutually exclusive with protection_domain_name.
protection_domain_name str
Protection domain name.
Mutually exclusive with protection_domain_id.
activity_mode str
  • Active
  • Inactive

Activity mode of RCG.
This parameter is supported for version 3.6 and above.
pause bool
Pause or resume the RCG.
freeze bool
Freeze or unfreeze the RCG.
pause_mode str
  • StopDataTransfer
  • OnlyTrackChanges

Pause mode.
It is required if pause is set as True.
target_volume_access_mode str
  • ReadOnly
  • NoAccess

Target volume access mode.
is_consistent bool
Consistency of RCG.
new_rcg_name str
Name of RCG to rename to.
remote_peer dict
Remote peer system.
  hostname str True
IP or FQDN of the remote peer host.
  username str True
The username of the remote peer host.
  password str True
The password of the remote peer host.
  validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
  port int 443
Port number through which communication happens with remote peer host.
  timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
  protection_domain_id str
Remote protection domain id.
Mutually exclusive with protection_domain_name.
  protection_domain_name str
Remote protection domain name.
Mutually exclusive with protection_domain_id.
state str present
  • present
  • absent

State of the replication consistency group.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* The check_mode is supported. +* Idempotency is not supported for create snapshot operation. +* There is a delay in reflection of final state of RCG after few update operations on RCG. +* In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as True. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get RCG details + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "{{rcg_name}}" + +- name: Create a snapshot of the RCG + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_id: "{{rcg_id}}" + create_snapshot: True + state: "present" + +- name: Create a replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + protection_domain_name: "domain1" + activity_mode: "active" + remote_peer: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + +- name: Modify replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + target_volume_access_mode: "ReadOnly" + activity_mode: "Inactive" + is_consistent: True + +- name: Rename replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + new_rcg_name: "rcg_test_rename" + +- name: Pause replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "pause" + pause_mode: "StopDataTransfer" + +- name: Resume replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "resume" + +- name: Freeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "freeze" + +- name: UnFreeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "unfreeze" + +- name: Delete replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
replication_consistency_group_details dict When replication consistency group exists Details of the replication consistency group.
  abstractState str success The abstract state of the replication consistency group.
  activeLocal bool success Whether the local replication consistency group is active.
  activeRemote bool success Whether the remote replication consistency group is active
  currConsistMode str success The current consistency mode of the replication consistency group.
  disasterRecoveryState str success The state of disaster recovery of the local replication consistency group.
  error int success The error code of the replication consistency group.
  failoverState str success The state of failover of the replication consistency group.
  failoverType str success The type of failover of the replication consistency group.
  freezeState str success The freeze state of the replication consistency group.
  id str success The ID of the replication consistency group.
  inactiveReason int success The reason for the inactivity of the replication consistency group.
  lastSnapCreationRc int success The return code of the last snapshot of the replication consistency group.
  lastSnapGroupId str success ID of the last snapshot of the replication consistency group.
  lifetimeState str success The Lifetime state of the replication consistency group.
  localActivityState str success The state of activity of the local replication consistency group.
  name str success The name of the replication consistency group.
  pauseMode str success The Lifetime state of the replication consistency group.
  peerMdmId str success The ID of the peer MDM of the replication consistency group.
  protectionDomainId str success The Protection Domain ID of the replication consistency group.
  remoteActivityState str success The state of activity of the remote replication consistency group..
  remoteDisasterRecoveryState str success The state of disaster recovery of the remote replication consistency group.
  remoteId str success The ID of the remote replication consistency group.
  remoteMdmId str success The ID of the remote MDM of the replication consistency group.
  remoteProtectionDomainId str success The ID of the remote Protection Domain.
  remoteProtectionDomainName str success The Name of the remote Protection Domain.
  replicationDirection str success The direction of the replication of the replication consistency group.
  rpoInSeconds int success The RPO value of the replication consistency group in seconds.
  snapCreationInProgress bool success Whether the process of snapshot creation of the replication consistency group is in progress or not.
  targetVolumeAccessMode str success The access mode of the target volume of the replication consistency group.
  type str success The type of the replication consistency group.
+ +### Authors +* Trisha Datta (@Trisha-Datta) +* Jennifer John (@Jennifer-John) + +-------------------------------- +# SDC Module + +Manage SDCs on Dell PowerFlex + +### Synopsis + Managing SDCs on PowerFlex storage system includes getting details of SDC and renaming SDC. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
sdc_name str
Name of the SDC.
Specify either sdc_name, sdc_id or sdc_ip for get/rename operation.
Mutually exclusive with sdc_id and sdc_ip.
sdc_id str
ID of the SDC.
Specify either sdc_name, sdc_id or sdc_ip for get/rename operation.
Mutually exclusive with sdc_name and sdc_ip.
sdc_ip str
IP of the SDC.
Specify either sdc_name, sdc_id or sdc_ip for get/rename operation.
Mutually exclusive with sdc_id and sdc_name.
sdc_new_name str
New name of the SDC. Used to rename the SDC.
state str True
  • present
  • absent

State of the SDC.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get SDC details using SDC ip + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_ip: "{{sdc_ip}}" + state: "present" + +- name: Rename SDC using SDC name + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_name: "centos_sdc" + sdc_new_name: "centos_sdc_renamed" + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
sdc_details dict When SDC exists Details of the SDC.
  id str success The ID of the SDC.
  mapped_volumes list success The details of the mapped volumes.
   id str success The ID of the volume.
   name str success The name of the volume.
   volumeType str success Type of the volume.
  name str success Name of the SDC.
  osType str success OS type of the SDC.
  sdcApproved bool success Indicates whether an SDC has approved access to the system.
  sdcIp str success IP of the SDC.
+ +### Authors +* Akash Shendge (@shenda1) + +-------------------------------- +# SDS Module + +Manage SDS on Dell PowerFlex + +### Synopsis + Managing SDS on PowerFlex storage system includes creating new SDS, getting details of SDS, adding/removing IP to/from SDS, modifying attributes of SDS, and deleting SDS. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
sds_name str
The name of the SDS.
Mandatory for create operation.
It is unique across the PowerFlex array.
Mutually exclusive with sds_id.
sds_id str
The ID of the SDS.
Except create operation, all other operations can be performed using sds_id.
Mutually exclusive with sds_name.
protection_domain_name str
The name of the protection domain.
Mutually exclusive with protection_domain_id.
protection_domain_id str
The ID of the protection domain.
Mutually exclusive with protection_domain_name.
sds_ip_list list
elements: dict

Dictionary of IPs and their roles for the SDS.
At least one IP-role is mandatory while creating a SDS.
IP-roles can be updated as well.
  ip str True
IP address of the SDS.
  role str True
  • sdsOnly
  • sdcOnly
  • all

Role assigned to the SDS IP address.
sds_ip_state str
  • present-in-sds
  • absent-in-sds

State of IP with respect to the SDS.
rfcache_enabled bool
Whether to enable the Read Flash cache.
rmcache_enabled bool
Whether to enable the Read RAM cache.
rmcache_size int
Read RAM cache size (in MB).
Minimum size is 128 MB.
Maximum size is 3911 MB.
sds_new_name str
SDS new name.
performance_profile str
  • Compact
  • HighPerformance

Performance profile to apply to the SDS.
The HighPerformance profile configures a predefined set of parameters for very high performance use cases.
Default value by API is HighPerformance.
state str True
  • present
  • absent

State of the SDS.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* The maximum limit for the IPs that can be associated with an SDS is 8. +* There needs to be at least 1 IP for SDS communication and 1 for SDC communication. +* If only 1 IP exists, it must be with role 'all'; else 1 IP can be with role 'all'and other IPs with role 'sdcOnly'; or 1 IP must be with role 'sdsOnly' and others with role 'sdcOnly'. +* There can be 1 or more IPs with role 'sdcOnly'. +* There must be only 1 IP with SDS role (either with role 'all' or 'sdsOnly'). +* SDS can be created with RF cache disabled, but, be aware that the RF cache is not always updated. In this case, the user should re-try the operation. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Create SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "all" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Create SDS with all parameters + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node1" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + rmcache_enabled: true + rmcache_size: 128 + performance_profile: "HighPerformance" + state: "present" + +- name: Get SDS details using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "present" + +- name: Get SDS details using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "present" + +- name: Modify SDS attributes using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Modify SDS attributes using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Add IP and role to an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Remove IP and role from an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "absent-in-sds" + state: "present" + +- name: Delete SDS using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "absent" + +- name: Delete SDS using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
sds_details dict When SDS exists Details of the SDS.
  authenticationError str success Indicates authentication error.
  certificateInfo str success Information about certificate.
  configuredDrlMode str success Configured DRL mode.
  drlMode str success DRL mode.
  faultSetId str success Fault set ID.
  fglMetadataCacheSize int success FGL metadata cache size.
  fglMetadataCacheState str success FGL metadata cache state.
  fglNumConcurrentWrites int success FGL concurrent writes.
  id str success SDS ID.
  ipList list success SDS IP list.
   ip str success IP present in the SDS.
   role str success Role of the SDS IP.
  lastUpgradeTime str success Last time SDS was upgraded.
  links list success SDS links.
   href str success SDS instance URL.
   rel str success SDS's relationship with different entities.
  maintenanceState str success Maintenance state.
  maintenanceType str success Maintenance type.
  mdmConnectionState str success MDM connection state.
  membershipState str success Membership state.
  name str success Name of the SDS.
  numOfIoBuffers int success Number of IO buffers.
  numRestarts int success Number of restarts.
  onVmWare bool success Presence on VMware.
  perfProfile str success Performance profile.
  port int success SDS port.
  protectionDomainId str success Protection Domain ID.
  protectionDomainName str success Protection Domain Name.
  raidControllers int success Number of RAID controllers.
  rfcacheEnabled bool success Whether RF cache is enabled or not.
  rfcacheErrorApiVersionMismatch bool success RF cache error for API version mismatch.
  rfcacheErrorDeviceDoesNotExist bool success RF cache error for device does not exist.
  rfcacheErrorInconsistentCacheConfiguration bool success RF cache error for inconsistent cache configuration.
  rfcacheErrorInconsistentSourceConfiguration bool success RF cache error for inconsistent source configuration.
  rfcacheErrorInvalidDriverPath bool success RF cache error for invalid driver path.
  rfcacheErrorLowResources bool success RF cache error for low resources.
  rmcacheEnabled bool success Whether Read RAM cache is enabled or not.
  rmcacheFrozen bool success RM cache frozen.
  rmcacheMemoryAllocationState bool success RM cache memory allocation state.
  rmcacheSizeInKb int success RM cache size in KB.
  rmcacheSizeInMb int success RM cache size in MB.
  sdsConfigurationFailure str success SDS configuration failure.
  sdsDecoupled str success SDS decoupled.
  sdsReceiveBufferAllocationFailures str success SDS receive buffer allocation failures.
  sdsState str success SDS state.
  softwareVersionInfo str success SDS software version information.
+ +### Authors +* Rajshree Khare (@khareRajshree) + +-------------------------------- +# Storage Pool Module + +Managing Dell PowerFlex storage pool + +### Synopsis + Dell PowerFlex storage pool module includes getting the details of storage pool, creating a new storage pool, and modifying the attribute of a storage pool. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
storage_pool_name str
The name of the storage pool.
If more than one storage pool is found with the same name then protection domain id/name is required to perform the task.
Mutually exclusive with storage_pool_id.
storage_pool_id str
The id of the storage pool.
It is auto generated, hence should not be provided during creation of a storage pool.
Mutually exclusive with storage_pool_name.
protection_domain_name str
The name of the protection domain.
During creation of a pool, either protection domain name or id must be mentioned.
Mutually exclusive with protection_domain_id.
protection_domain_id str
The id of the protection domain.
During creation of a pool, either protection domain name or id must be mentioned.
Mutually exclusive with protection_domain_name.
media_type str
  • HDD
  • SSD
  • TRANSITIONAL

Type of devices in the storage pool.
storage_pool_new_name str
New name for the storage pool can be provided.
This parameter is used for renaming the storage pool.
use_rfcache bool
Enable/Disable RFcache on a specific storage pool.
use_rmcache bool
Enable/Disable RMcache on a specific storage pool.
state str True
  • present
  • absent

State of the storage pool.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* TRANSITIONAL media type is supported only during modification. +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Get the details of storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "sample_pool_name" + protection_domain_name: "sample_protection_domain" + state: "present" + +- name: Get the details of storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + state: "present" + +- name: Create a new storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + media_type: "HDD" + state: "present" + +- name: Modify a storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + use_rmcache: True + use_rfcache: True + state: "present" + +- name: Rename storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + storage_pool_new_name: "new_ansible_pool" + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
storage_pool_details dict When storage pool exists Details of the storage pool.
  id str success ID of the storage pool under protection domain.
  mediaType str success Type of devices in the storage pool.
  name str success Name of the storage pool under protection domain.
  protectionDomainId str success ID of the protection domain in which pool resides.
  protectionDomainName str success Name of the protection domain in which pool resides.
  statistics dict success Statistics details of the storage pool.
   capacityInUseInKb str success Total capacity of the storage pool.
   deviceIds list success Device Ids of the storage pool.
   unusedCapacityInKb str success Unused capacity of the storage pool.
  useRfcache bool success Enable/Disable RFcache on a specific storage pool.
  useRmcache bool success Enable/Disable RMcache on a specific storage pool.
+ +### Authors +* Arindam Datta (@dattaarindam) +* P Srinivas Rao (@srinivas-rao5) + +-------------------------------- +# Volume Module + +Manage volumes on Dell PowerFlex + +### Synopsis + Managing volumes on PowerFlex storage system includes creating, getting details, modifying attributes and deleting volume. + It also includes adding/removing snapshot policy, mapping/unmapping volume to/from SDC and listing associated snapshots. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
vol_name str
The name of the volume.
Mandatory for create operation.
It is unique across the PowerFlex array.
Mutually exclusive with vol_id.
vol_id str
The ID of the volume.
Except create operation, all other operations can be performed using vol_id.
Mutually exclusive with vol_name.
storage_pool_name str
The name of the storage pool.
Either name or the id of the storage pool is required for creating a volume.
During creation, if storage pool name is provided then either protection domain name or id must be mentioned along with it.
Mutually exclusive with storage_pool_id.
storage_pool_id str
The ID of the storage pool.
Either name or the id of the storage pool is required for creating a volume.
Mutually exclusive with storage_pool_name.
protection_domain_name str
The name of the protection domain.
During creation of a volume, if more than one storage pool exists with the same name then either protection domain name or id must be mentioned along with it.
Mutually exclusive with protection_domain_id.
protection_domain_id str
The ID of the protection domain.
During creation of a volume, if more than one storage pool exists with the same name then either protection domain name or id must be mentioned along with it.
Mutually exclusive with protection_domain_name.
vol_type str
  • THICK_PROVISIONED
  • THIN_PROVISIONED

Type of volume provisioning.
compression_type str
  • NORMAL
  • NONE

Type of the compression method.
use_rmcache bool
Whether to use RM Cache or not.
snapshot_policy_name str
Name of the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type.
snapshot_policy_id str
ID of the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type.
auto_snap_remove_type str
  • remove
  • detach

Whether to remove or detach the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type.
If the snapshot policy name/id is passed empty then auto_snap_remove_type is defaulted to detach.
size int
The size of the volume.
Size of the volume will be assigned as higher multiple of 8 GB.
cap_unit str
  • GB
  • TB

The unit of the volume size. It defaults to 'GB'.
vol_new_name str
New name of the volume. Used to rename the volume.
allow_multiple_mappings bool
Specifies whether to allow or not allow multiple mappings.
If the volume is mapped to one SDC then for every new mapping allow_multiple_mappings has to be passed as True.
sdc list
elements: dict

Specifies SDC parameters.
  sdc_name str
Name of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_id and sdc_ip.
  sdc_id str
ID of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_name and sdc_ip.
  sdc_ip str
IP of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_id and sdc_ip.
  access_mode str
  • READ_WRITE
  • READ_ONLY
  • NO_ACCESS

Define the access mode for all mappings of the volume.
  bandwidth_limit int
Limit of volume network bandwidth.
Need to mention in multiple of 1024 Kbps.
To set no limit, 0 is to be passed.
  iops_limit int
Limit of volume IOPS.
Minimum IOPS limit is 11 and specify 0 for unlimited iops.
sdc_state str
  • mapped
  • unmapped

Mapping state of the SDC.
delete_snapshots bool
If True, the volume and all its dependent snapshots will be deleted.
If False, only the volume will be deleted.
It can be specified only when the state is absent.
It defaults to False, if not specified.
state str True
  • present
  • absent

State of the volume.
hostname str True
IP or FQDN of the PowerFlex host.
username str True
The username of the PowerFlex host.
password str True
The password of the PowerFlex host.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with PowerFlex host.
timeout int 120
Time after which connection will get terminated.
It is to be mentioned in seconds.
+ +### Notes +* The check_mode is not supported. +* The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform. + +### Examples +``` +- name: Create a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + protection_domain_name: "pd_1" + vol_type: "THICK_PROVISIONED" + compression_type: "NORMAL" + use_rmcache: True + size: 16 + state: "present" + +- name: Map a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + allow_multiple_mappings: True + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + sdc_state: "mapped" + state: "present" + +- name: Unmap a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + sdc_state: "unmapped" + state: "present" + +- name: Map multiple SDCs to a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + protection_domain_name: "pd_1" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + bandwidth_limit: 2048 + iops_limit: 20 + - sdc_ip: "198.10.xxx.xxx" + access_mode: "READ_ONLY" + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Get the details of the volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_id: "fe6c8b7100000005" + state: "present" + +- name: Modify the details of the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + new_vol_name: "new_sample_volume" + size: 64 + state: "present" + +- name: Delete the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: False + state: "absent" + +- name: Delete the Volume and all its dependent snapshots + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: True + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
volume_details dict When volume exists Details of the volume.
  id str success The ID of the volume.
  mappedSdcInfo dict success The details of the mapped SDC.
   accessMode str success Mapping access mode for the specified volume.
   limitBwInMbps int success Bandwidth limit for the SDC.
   limitIops int success IOPS limit for the SDC.
   sdcId str success ID of the SDC.
   sdcIp str success IP of the SDC.
   sdcName str success Name of the SDC.
  name str success Name of the volume.
  protectionDomainId str success ID of the protection domain in which volume resides.
  protectionDomainName str success Name of the protection domain in which volume resides.
  sizeInGb int success Size of the volume in Gb.
  sizeInKb int success Size of the volume in Kb.
  snapshotPolicyId str success ID of the snapshot policy associated with volume.
  snapshotPolicyName str success Name of the snapshot policy associated with volume.
  snapshotsList str success List of snapshots associated with the volume.
  statistics dict success Statistics details of the storage pool.
   numOfChildVolumes int success Number of child volumes.
   numOfMappedSdcs int success Number of mapped Sdcs of the volume.
  storagePoolId str success ID of the storage pool in which volume resides.
  storagePoolName str success Name of the storage pool in which volume resides.
+ +### Authors +* P Srinivas Rao (@srinivas-rao5) + +-------------------------------- diff --git a/ansible_collections/dellemc/powerflex/docs/Release Notes.md b/ansible_collections/dellemc/powerflex/docs/Release Notes.md new file mode 100644 index 00000000..562937fb --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/Release Notes.md @@ -0,0 +1,73 @@ +**Ansible Modules for Dell Technologies PowerFlex** +========================================= +### Release notes 1.5.0 + +> © 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell +> and other trademarks are trademarks of Dell Inc. or its +> subsidiaries. Other trademarks may be trademarks of their respective +> owners. + +Contents +------- +These release notes contain supplemental information about Ansible +Modules for Dell Technologies (Dell) PowerFlex. + +- [Revision History](#revision-history) +- [Product Description](#product-description) +- [New Features](#new-features-and-enhancements) +- [Known issues](#known-issues) +- [Limitations](#limitations) +- [Distribution](#distribution) +- [Documentation](#documentation) + +Revision history +---------------- +The table in this section lists the revision history of this document. + +Table 1. Revision history + +| Revision | Date | Description | +|----------|----------------|-------------------------------------------------------------| +| 01 | December 2022 | Current release of Ansible Modules for Dell PowerFlex 1.5.0 | + +Product description +------------------- + +The Ansible modules for Dell PowerFlex are used to automate and orchestrate +the deployment, configuration, and management of Dell PowerFlex storage +systems. The capabilities of Ansible modules are managing volumes, +storage pools, SDCs, snapshots, SDSs, replication consistency groups, devices, protection domain and MDM +cluster, and obtaining high-level information about a PowerFlex system information. +The modules use playbooks to list, show, create, delete, and modify +each of the entities. + +New features and enhancements +----------------------------- +Along with the previous release deliverables, this release supports following features - +- Info module is enhanced to support the listing replication consistency groups. +- Added New module for replication consistency groups. +- Updated modules to adhere with ansible community guidelines. +- Renamed gateway_host to hostname +- Renamed verifycert to validate_certs. + +Known issues +------------ +- Setting the RF cache and performance profile of the SDS during its creation fails intermittently on PowerFlex version 3.5 + +Limitations +----------- +- The API is accepting a negative integer value for overall_limit in the network_limits for a specific protection domain. + +Distribution +------------ +The software package is available for download from the [Ansible Modules +for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.5.0) page. + +Documentation +------------- +The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.5.0/docs) +page. It includes the following: + + - README + - Release Notes (this document) + - Product Guide diff --git a/ansible_collections/dellemc/powerflex/docs/SECURITY.md b/ansible_collections/dellemc/powerflex/docs/SECURITY.md new file mode 100644 index 00000000..d8bf879f --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/SECURITY.md @@ -0,0 +1,22 @@ + + +# Security policy + +The Ansible modules for Dell PowerFlex repository are inspected for security vulnerabilities via blackduck scans and static code analysis. + +In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-powerflex/blob/1.5.0/docs/CONTRIBUTING.md#Pull-requests) for more information. + +## Reporting a vulnerability + +Have you discovered a security vulnerability in this project? +We ask you to alert the maintainers by sending an email, describing the issue, impact, and fix - if applicable. + +You can reach the Ansible modules for Dell PowerFlex maintainers at ansible.team@dell.com. diff --git a/ansible_collections/dellemc/powerflex/docs/SUPPORT.md b/ansible_collections/dellemc/powerflex/docs/SUPPORT.md new file mode 100644 index 00000000..26e6f159 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/docs/SUPPORT.md @@ -0,0 +1,12 @@ + + +## Support +For all your support needs you can interact with us on [GitHub](https://github.com/dell/ansible-powerflex) by creating a [GitHub Issue](https://github.com/dell/ansible-powerflex/issues) or through the [Ansible Community](https://www.dell.com/community/Automation/bd-p/Automation). diff --git a/ansible_collections/dellemc/powerflex/meta/execution-environment.yml b/ansible_collections/dellemc/powerflex/meta/execution-environment.yml new file mode 100644 index 00000000..d2c0a3ea --- /dev/null +++ b/ansible_collections/dellemc/powerflex/meta/execution-environment.yml @@ -0,0 +1,5 @@ +--- +version: 1 +dependencies: + galaxy: requirements.yml #Absolute/relative path of requirements.yml + python: requirements.txt #Absolute/relative path of requirements.txt diff --git a/ansible_collections/dellemc/powerflex/meta/runtime.yml b/ansible_collections/dellemc/powerflex/meta/runtime.yml new file mode 100644 index 00000000..0e8263ed --- /dev/null +++ b/ansible_collections/dellemc/powerflex/meta/runtime.yml @@ -0,0 +1,39 @@ +--- +requires_ansible: ">=2.12" +plugin_routing: + modules: + dellemc_powerflex_gatherfacts: + redirect: dellemc.powerflex.info + deprecation: + removal_date: "2024-03-31" + warning_text: Use info module instead. + dellemc_powerflex_device: + redirect: dellemc.powerflex.device + deprecation: + removal_date: "2024-03-31" + warning_text: Use device module instead. + dellemc_powerflex_sdc: + redirect: dellemc.powerflex.sdc + deprecation: + removal_date: "2024-03-31" + warning_text: Use sdc module instead. + dellemc_powerflex_sds: + redirect: dellemc.powerflex.sds + deprecation: + removal_date: "2024-03-31" + warning_text: Use sds module instead. + dellemc_powerflex_snapshot: + redirect: dellemc.powerflex.snapshot + deprecation: + removal_date: "2024-03-31" + warning_text: Use snapshot module instead. + dellemc_powerflex_storagepool: + redirect: dellemc.powerflex.storagepool + deprecation: + removal_date: "2024-03-31" + warning_text: Use storagepool module instead. + dellemc_powerflex_volume: + redirect: dellemc.powerflex.volume + deprecation: + removal_date: "2024-03-31" + warning_text: Use volume module instead. diff --git a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py new file mode 100644 index 00000000..34968034 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py @@ -0,0 +1,61 @@ +# Copyright: (c) 2020, Dell Technologies. +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + # Documentation fragment for PowerFlex + DOCUMENTATION = r''' + options: + hostname: + required: true + description: + - IP or FQDN of the PowerFlex host. + type: str + aliases: + - gateway_host + username: + type: str + required: true + description: + - The username of the PowerFlex host. + password: + type: str + required: true + description: + - The password of the PowerFlex host. + validate_certs: + type: bool + default: true + aliases: + - verifycert + description: + - Boolean variable to specify whether or not to validate SSL + certificate. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be + verified. + port: + description: + - Port number through which communication happens with PowerFlex + host. + type: int + default: 443 + timeout: + description: + - Time after which connection will get terminated. + - It is to be mentioned in seconds. + type: int + required: False + default: 120 + requirements: + - A Dell PowerFlex storage system version 3.5 or later. + - Ansible-core 2.12 or later. + - PyPowerFlex 1.6.0. + - Python 3.9, 3.10 or 3.11. + notes: + - The modules present in the collection named as 'dellemc.powerflex' + are built to support the Dell PowerFlex storage platform. +''' diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/__init__.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/logging_handler.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/logging_handler.py new file mode 100644 index 00000000..7436cbb1 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/logging_handler.py @@ -0,0 +1,24 @@ +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Custom rotating file handler for PowerFlex""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from datetime import datetime +from logging.handlers import RotatingFileHandler + + +class CustomRotatingFileHandler(RotatingFileHandler): + def rotation_filename(self, default_name): + """ + Modify the filename of a log file when rotating. + :param default_name: The default name of the log file. + """ + src_file_name = default_name.split('.') + dest_file_name = "{0}_{1}.{2}.{3}".format( + src_file_name[0], '{0:%Y%m%d}'.format(datetime.now()), + src_file_name[1], src_file_name[2] + ) + return dest_file_name diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py new file mode 100644 index 00000000..8503aeb0 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py @@ -0,0 +1,186 @@ +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import math +import re +from decimal import Decimal +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.logging_handler \ + import CustomRotatingFileHandler +import traceback +from ansible.module_utils.basic import missing_required_lib + +"""import PyPowerFlex lib""" +try: + from PyPowerFlex import PowerFlexClient + from PyPowerFlex.objects.sds import Sds + from PyPowerFlex.objects import protection_domain + from PyPowerFlex.objects import storage_pool + from PyPowerFlex.objects import sdc + from PyPowerFlex.objects import volume + from PyPowerFlex.objects import system + from PyPowerFlex.objects.system import SnapshotDef + + HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = True, None +except ImportError: + HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = False, traceback.format_exc() + +"""importing pkg_resources""" +try: + from pkg_resources import parse_version + import pkg_resources + + PKG_RSRC_IMPORTED, PKG_RSRC_IMP_ERR = True, None +except ImportError: + PKG_RSRC_IMPORTED, PKG_RSRC_IMP_ERR = False, traceback.format_exc() + +"""importing dateutil""" +try: + import dateutil.relativedelta + HAS_DATEUTIL, DATEUTIL_IMP_ERR = True, None +except ImportError: + HAS_DATEUTIL, DATEUTIL_IMP_ERR = False, traceback.format_exc() + + +def get_powerflex_gateway_host_parameters(): + """Provides common access parameters required for the + ansible modules on PowerFlex Storage System""" + + return dict( + hostname=dict(type='str', aliases=['gateway_host'], required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', aliases=['verifycert'], required=False, default=True), + port=dict(type='int', required=False, default=443), + timeout=dict(type='int', required=False, default=120) + ) + + +def get_powerflex_gateway_host_connection(module_params): + """Establishes connection with PowerFlex storage system""" + + if HAS_POWERFLEX_SDK: + conn = PowerFlexClient( + gateway_address=module_params['hostname'], + gateway_port=module_params['port'], + verify_certificate=module_params['validate_certs'], + username=module_params['username'], + password=module_params['password'], + timeout=module_params['timeout']) + conn.initialize() + return conn + + +def ensure_required_libs(module): + """Check required libraries""" + + if not HAS_DATEUTIL: + module.fail_json(msg=missing_required_lib("python-dateutil"), + exception=DATEUTIL_IMP_ERR) + + if not PKG_RSRC_IMPORTED: + module.fail_json(msg=missing_required_lib("pkg_resources"), + exception=PKG_RSRC_IMP_ERR) + + if not HAS_POWERFLEX_SDK: + module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.6.0 or above"), + exception=POWERFLEX_SDK_IMP_ERR) + + min_ver = '1.6.0' + try: + curr_version = pkg_resources.require("PyPowerFlex")[0].version + supported_version = (parse_version(curr_version) >= parse_version(min_ver)) + if not supported_version: + module.fail_json(msg="PyPowerFlex {0} is not supported. " + "Required minimum version is " + "{1}".format(curr_version, min_ver)) + except Exception as e: + module.fail_json(msg="Getting PyPowerFlex SDK version, failed with " + "Error {0}".format(str(e))) + + +def get_logger(module_name, log_file_name='ansible_powerflex.log', log_devel=logging.INFO): + """ + Initialize logger and return the logger object. + :param module_name: Name of module to be part of log message + :param log_file_name: Name of file in which the log messages get appended + :param log_devel: Log level + :return LOG object + """ + FORMAT = '%(asctime)-15s %(filename)s %(levelname)s : %(message)s' + max_bytes = 5 * 1024 * 1024 + logging.basicConfig(filename=log_file_name, format=FORMAT) + LOG = logging.getLogger(module_name) + LOG.setLevel(log_devel) + handler = CustomRotatingFileHandler(log_file_name, maxBytes=max_bytes, backupCount=5) + formatter = logging.Formatter(FORMAT) + handler.setFormatter(formatter) + LOG.addHandler(handler) + LOG.propagate = False + return LOG + + +KB_IN_BYTES = 1024 +MB_IN_BYTES = 1024 * 1024 +GB_IN_BYTES = 1024 * 1024 * 1024 +TB_IN_BYTES = 1024 * 1024 * 1024 * 1024 + + +def get_size_bytes(size, cap_units): + """Convert the given size to bytes""" + + if size is not None and size > 0: + if cap_units in ('kb', 'KB'): + return size * KB_IN_BYTES + elif cap_units in ('mb', 'MB'): + return size * MB_IN_BYTES + elif cap_units in ('gb', 'GB'): + return size * GB_IN_BYTES + elif cap_units in ('tb', 'TB'): + return size * TB_IN_BYTES + else: + return size + else: + return 0 + + +def convert_size_with_unit(size_bytes): + """Convert size in byte with actual unit like KB,MB,GB,TB,PB etc.""" + + if not isinstance(size_bytes, int): + raise ValueError('This method takes Integer type argument only') + if size_bytes == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return "%s %s" % (s, size_name[i]) + + +def get_size_in_gb(size, cap_units): + """Convert the given size to size in GB, size is restricted to 2 decimal places""" + + size_in_bytes = get_size_bytes(size, cap_units) + size = Decimal(size_in_bytes / GB_IN_BYTES) + size_in_gb = round(size) + return size_in_gb + + +def is_version_less_than_3_6(version): + """Verifies if powerflex version is less than 3.6""" + version = re.search(r'R\s*([\d.]+)', version.replace('_', '.')).group(1) + return \ + pkg_resources.parse_version(version) < pkg_resources.parse_version('3.6') + + +def is_invalid_name(name): + """Validates string against regex pattern""" + if name is not None: + regexp = re.compile(r'^[a-zA-Z0-9!@#$%^~*_-]*$') + if not regexp.search(name): + return True diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/device.py b/ansible_collections/dellemc/powerflex/plugins/modules/device.py new file mode 100644 index 00000000..a321315e --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/device.py @@ -0,0 +1,1105 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing device on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: device +version_added: '1.1.0' +short_description: Manage device on Dell PowerFlex +description: +- Managing device on PowerFlex storage system includes + adding new device, getting details of device, and removing a device. +author: +- Rajshree Khare (@khareRajshree) +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + current_pathname: + description: + - Full path of the device to be added. + - Required while adding a device. + type: str + device_name: + description: + - Device name. + - Mutually exclusive with I(device_id). + type: str + device_id: + description: + - Device ID. + - Mutually exclusive with I(device_name). + type: str + sds_name: + description: + - The name of the SDS. + - Required while adding a device. + - Mutually exclusive with I(sds_id). + type: str + sds_id: + description: + - The ID of the SDS. + - Required while adding a device. + - Mutually exclusive with I(sds_name). + type: str + storage_pool_name: + description: + - Storage Pool name. + - Used while adding a storage device. + - Mutually exclusive with I(storage_pool_id), I(acceleration_pool_id) and + I(acceleration_pool_name). + type: str + storage_pool_id: + description: + - Storage Pool ID. + - Used while adding a storage device. + - Media type supported are C(SSD) and C(HDD). + - Mutually exclusive with I(storage_pool_name), I(acceleration_pool_id) and + I(acceleration_pool_name). + type: str + acceleration_pool_name: + description: + - Acceleration Pool Name. + - Used while adding an acceleration device. + - Media type supported are C(SSD) and C(NVDIMM). + - Mutually exclusive with I(storage_pool_id), I(storage_pool_name) and + I(acceleration_pool_name). + type: str + acceleration_pool_id: + description: + - Acceleration Pool ID. + - Used while adding an acceleration device. + - Media type supported are C(SSD) and C(NVDIMM). + - Mutually exclusive with I(acceleration_pool_name), I(storage_pool_name) and + I(storage_pool_id). + type: str + protection_domain_name: + description: + - Protection domain name. + - Used while identifying a storage pool along with I(storage_pool_name). + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - Protection domain ID. + - Used while identifying a storage pool along with I(storage_pool_name). + - Mutually exclusive with I(protection_domain_name). + type: str + external_acceleration_type: + description: + - Device external acceleration types. + - Used while adding a device. + type: str + choices: ['Invalid', 'None', 'Read', 'Write', 'ReadAndWrite'] + media_type: + description: + - Device media types. + - Required while adding a device. + type: str + choices: ['HDD', 'SSD', 'NVDIMM'] + state: + description: + - State of the device. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The value for device_id is generated only after successful addition of the + device. + - To uniquely identify a device, either I(device_id) can be passed or one of + I(current_pathname) or I(device_name) must be passed with I(sds_id) or I(sds_name). + - It is recommended to install Rfcache driver for SSD device on SDS in + order to add it to an acceleration pool. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Add a device + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + media_type: "HDD" + device_name: "device2" + storage_pool_name: "pool1" + protection_domain_name: "domain1" + external_acceleration_type: "ReadAndWrite" + state: "present" +- name: Get device details using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "d7fe088900000000" + state: "present" +- name: Get device details using (current_pathname, sds_name) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node0" + state: "present" +- name: Get device details using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_id: "5717d71800000000" + state: "present" +- name: Remove a device using device_id + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + device_id: "76eb7e2f00010000" + state: "absent" +- name: Remove a device using (current_pathname, sds_id) + dellemc.powerflex.device: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + current_pathname: "/dev/sdb" + sds_name: "node1" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +device_details: + description: Details of the device. + returned: When device exists + type: dict + contains: + accelerationPoolId: + description: Acceleration pool ID. + type: str + accelerationPoolName: + description: Acceleration pool name. + type: str + accelerationProps: + description: Indicates acceleration props. + type: str + aggregatedState: + description: Indicates aggregated state. + type: str + ataSecurityActive: + description: Indicates ATA security active state. + type: bool + autoDetectMediaType: + description: Indicates auto detection of media type. + type: str + cacheLookAheadActive: + description: Indicates cache look ahead active state. + type: bool + capacity: + description: Device capacity. + type: int + capacityLimitInKb: + description: Device capacity limit in KB. + type: int + deviceCurrentPathName: + description: Device current path name. + type: str + deviceOriginalPathName: + description: Device original path name. + type: str + deviceState: + description: Indicates device state. + type: str + deviceType: + description: Indicates device type. + type: str + errorState: + description: Indicates error state. + type: str + externalAccelerationType: + description: Indicates external acceleration type. + type: str + fglNvdimmMetadataAmortizationX100: + description: Indicates FGL NVDIMM meta data amortization value. + type: int + fglNvdimmWriteCacheSize: + description: Indicates FGL NVDIMM write cache size. + type: int + firmwareVersion: + description: Indicates firmware version. + type: str + id: + description: Device ID. + type: str + ledSetting: + description: Indicates LED setting. + type: str + links: + description: Device links. + type: list + contains: + href: + description: Device instance URL. + type: str + rel: + description: Relationship of device with different + entities. + type: str + logicalSectorSizeInBytes: + description: Logical sector size in bytes. + type: int + longSuccessfulIos: + description: Indicates long successful IOs. + type: list + maxCapacityInKb: + description: Maximum device capacity limit in KB. + type: int + mediaFailing: + description: Indicates media failing. + type: bool + mediaType: + description: Indicates media type. + type: str + modelName: + description: Indicates model name. + type: str + name: + description: Device name. + type: str + persistentChecksumState: + description: Indicates persistent checksum state. + type: str + physicalSectorSizeInBytes: + description: Physical sector size in bytes. + type: int + protectionDomainId: + description: Protection domain ID. + type: str + protectionDomainName: + description: Protection domain name. + type: str + raidControllerSerialNumber: + description: RAID controller serial number. + type: str + rfcacheErrorDeviceDoesNotExist: + description: Indicates RF cache error device does not exist. + type: bool + rfcacheProps: + description: RF cache props. + type: str + sdsId: + description: SDS ID. + type: str + sdsName: + description: SDS name. + type: str + serialNumber: + description: Indicates Serial number. + type: str + spSdsId: + description: Indicates SPs SDS ID. + type: str + ssdEndOfLifeState: + description: Indicates SSD end of life state. + type: str + storagePoolId: + description: Storage Pool ID. + type: str + storagePoolName: + description: Storage Pool name. + type: str + storageProps: + description: Storage props. + type: list + temperatureState: + description: Indicates temperature state. + type: str + vendorName: + description: Indicates vendor name. + type: str + writeCacheActive: + description: Indicates write cache active. + type: bool + sample: { + "accelerationPoolId": null, + "accelerationProps": null, + "aggregatedState": "NeverFailed", + "ataSecurityActive": false, + "autoDetectMediaType": "SSD", + "cacheLookAheadActive": false, + "capacity": 0, + "capacityLimitInKb": 365772800, + "deviceCurrentPathName": "/dev/sdb", + "deviceOriginalPathName": "/dev/sdb", + "deviceState": "Normal", + "deviceType": "Unknown", + "errorState": "None", + "externalAccelerationType": "None", + "fglNvdimmMetadataAmortizationX100": 150, + "fglNvdimmWriteCacheSize": 16, + "firmwareVersion": null, + "id": "b6efa59900000000", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::b6efa59900000000", + "rel": "self" + }, + { + "href": "/api/instances/Device::b6efa59900000000/relationships + /Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::8f3bb0ce00000000", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + }, + { + "href": "/api/instances/SpSds::fedf6f2000000000", + "rel": "/api/parent/relationship/spSdsId" + } + ], + "logicalSectorSizeInBytes": 0, + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 365772800, + "mediaFailing": false, + "mediaType": "HDD", + "modelName": null, + "name": "device230", + "persistentChecksumState": "Protected", + "physicalSectorSizeInBytes": 0, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "raidControllerSerialNumber": null, + "rfcacheErrorDeviceDoesNotExist": false, + "rfcacheProps": null, + "sdsId": "8f3bb0ce00000000", + "sdsName": "node1", + "serialNumber": null, + "slotNumber": null, + "spSdsId": "fedf6f2000000000", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "e0d8f6c900000000", + "storagePoolName": "pool1", + "storageProps": { + "destFglAccDeviceId": null, + "destFglNvdimmSizeMb": 0, + "fglAccDeviceId": null, + "fglNvdimmSizeMb": 0 + }, + "temperatureState": "NeverFailed", + "vendorName": null, + "writeCacheActive": false + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils + +LOG = utils.get_logger('device') + + +class PowerFlexDevice(object): + """Class with device operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_device_parameters()) + + mut_ex_args = [['sds_name', 'sds_id'], + ['device_name', 'device_id'], + ['protection_domain_name', + 'protection_domain_id'], + ['storage_pool_name', 'storage_pool_id'], + ['acceleration_pool_name', 'acceleration_pool_id'], + ['acceleration_pool_id', 'storage_pool_id'], + ['acceleration_pool_name', 'storage_pool_name'], + ['device_id', 'sds_name'], + ['device_id', 'sds_id'], + ['device_id', 'current_pathname']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_device_details(self, current_pathname=None, sds_id=None, + device_name=None, device_id=None): + """Get device details + :param current_pathname: Device path name + :type current_pathname: str + :param sds_id: ID of the SDS + :type sds_id: str + :param device_name: Name of the device + :type device_name: str + :param device_id: ID of the device + :type device_id: str + :return: Details of device if it exist + :rtype: dict + """ + + try: + if current_pathname and sds_id: + device_details = self.powerflex_conn.device.get( + filter_fields={'deviceCurrentPathName': current_pathname, + 'sdsId': sds_id}) + elif device_name and sds_id: + device_details = self.powerflex_conn.device.get( + filter_fields={'name': device_name, + 'sdsId': sds_id}) + else: + device_details = self.powerflex_conn.device.get( + filter_fields={'id': device_id}) + + if len(device_details) == 0: + msg = "Device not found" + LOG.info(msg) + return None + + return device_details[0] + + except Exception as e: + error_msg = "Failed to get the device with error '%s'" % str(e) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_sds(self, sds_name=None, sds_id=None): + """Get SDS details + :param sds_name: Name of the SDS + :param sds_id: ID of the SDS + :return: SDS details + :rtype: dict + """ + name_or_id = sds_id if sds_id else sds_name + try: + sds_details = None + if sds_id: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'id': sds_id}) + + if sds_name: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'name': sds_name}) + + if not sds_details: + error_msg = "Unable to find the SDS with '%s'. Please " \ + "enter a valid SDS name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return sds_details[0] + + except Exception as e: + error_msg = "Failed to get the SDS '%s' with error '%s'" \ + % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + :rtype: dict + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = None + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if not pd_details: + error_msg = "Unable to find the protection domain with " \ + "'%s'. Please enter a valid protection domain " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return pd_details[0] + + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_storage_pool(self, storage_pool_name=None, + storage_pool_id=None, + protection_domain_id=None): + """Get storage pool details + :param storage_pool_name: Name of the storage pool + :param storage_pool_id: ID of the storage pool + :param protection_domain_id: ID of the protection domain + :return: Storage pool details + :rtype: dict + """ + name_or_id = storage_pool_id if storage_pool_id else storage_pool_name + try: + storage_pool_details = None + if storage_pool_id: + storage_pool_details = self.powerflex_conn.storage_pool.get( + filter_fields={'id': storage_pool_id}) + + if storage_pool_name: + storage_pool_details = self.powerflex_conn.storage_pool.get( + filter_fields={'name': storage_pool_name, + 'protectionDomainId': protection_domain_id} + ) + + if not storage_pool_details: + error_msg = "Unable to find the storage pool with " \ + "'%s'. Please enter a valid storage pool " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return storage_pool_details[0] + + except Exception as e: + error_msg = "Failed to get the storage_pool '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_acceleration_pool(self, acceleration_pool_name=None, + acceleration_pool_id=None, + protection_domain_id=None): + """Get acceleration pool details + :param acceleration_pool_name: Name of the acceleration pool + :param acceleration_pool_id: ID of the acceleration pool + :param protection_domain_id: ID of the protection domain + :return: Acceleration pool details + :rtype: dict + """ + name_or_id = acceleration_pool_id \ + if acceleration_pool_id else acceleration_pool_name + try: + acceleration_pool_details = None + if acceleration_pool_id: + acceleration_pool_details = self.powerflex_conn.\ + acceleration_pool.get(filter_fields={ + 'id': acceleration_pool_id}) + + if acceleration_pool_name: + acceleration_pool_details = self.powerflex_conn.\ + acceleration_pool.get(filter_fields={ + 'name': acceleration_pool_name, + 'protectionDomainId': protection_domain_id}) + + if not acceleration_pool_details: + error_msg = "Unable to find the acceleration pool with " \ + "'%s'. Please enter a valid acceleration pool " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return acceleration_pool_details[0] + + except Exception as e: + error_msg = "Failed to get the acceleration pool '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def add_device(self, device_name, current_pathname, sds_id, + storage_pool_id, media_type, acceleration_pool_id, + external_acceleration_type): + """Add device + :param device_name: Device name + :type device_name: str + :param current_pathname: Current pathname of device + :type current_pathname: str + :param sds_id: SDS ID + :type sds_id: str + :param storage_pool_id: Storage Pool ID + :type storage_pool_id: str + :param media_type: Media type of device + :type media_type: str + :param acceleration_pool_id: Acceleration pool ID + :type acceleration_pool_id: str + :param external_acceleration_type: External acceleration type + :type external_acceleration_type: str + return: Boolean indicating if add device operation is successful + """ + try: + if device_name is None or len(device_name.strip()) == 0: + error_msg = "Please provide valid device_name value for " \ + "adding a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if current_pathname is None or len(current_pathname.strip()) == 0: + error_msg = "Current pathname of device is a mandatory " \ + "parameter for adding a device. Please enter a " \ + "valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_id is None or len(sds_id.strip()) == 0: + error_msg = "Please provide valid sds_id value " \ + "for adding a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if storage_pool_id is None and acceleration_pool_id is None: + error_msg = "Please provide either storage pool name/ID " \ + "or acceleration pool name/ID for adding a " \ + "device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + add_params = ("current_pathname: %s, " + "sds_id: %s, " + "acceleration_pool_id: %s," + "external_acceleration_type: %s," + "media_type: %s," + "device_name: %s," + "storage_pool_id: %s," + % (current_pathname, sds_id, + acceleration_pool_id, + external_acceleration_type, + media_type, + device_name, + storage_pool_id)) + LOG.info("Adding device with params: %s", add_params) + + self.powerflex_conn.device.create( + current_pathname=current_pathname, + sds_id=sds_id, + acceleration_pool_id=acceleration_pool_id, + external_acceleration_type=external_acceleration_type, + media_type=media_type, + name=device_name, + storage_pool_id=storage_pool_id) + return True + except Exception as e: + error_msg = "Adding device %s operation failed with " \ + "error '%s'" % (device_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def remove_device(self, device_id): + """Remove device + :param device_id: Device ID + :type device_id: str + return: Boolean indicating if remove device operation is + successful + """ + try: + LOG.info("Device to be removed: %s", device_id) + self.powerflex_conn.device.delete(device_id=device_id) + return True + except Exception as e: + error_msg = "Remove device '%s' operation failed with " \ + "error '%s'" % (device_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_input_parameters(self, device_name=None, device_id=None, + current_pathname=None, sds_name=None, + sds_id=None): + """Validate the input parameters""" + + # Unique ways to identify a device: + # (current_pathname , sds_id) + # (current_pathname , sds_name) + # (device_name , sds_name) + # (device_name , sds_id) + # device_id. + + if current_pathname: + if (sds_name is None or len(sds_name.strip()) == 0) \ + and (sds_id is None or len(sds_id.strip()) == 0): + error_msg = "sds_name or sds_id is mandatory along with " \ + "current_pathname. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif current_pathname is not None \ + and len(current_pathname.strip()) == 0: + error_msg = "Please enter a valid value for current_pathname." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if device_name: + if (sds_name is None or len(sds_name.strip()) == 0) \ + and (sds_id is None or len(sds_id.strip()) == 0): + error_msg = "sds_name or sds_id is mandatory along with " \ + "device_name. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif device_name is not None and len(device_name.strip()) == 0: + error_msg = "Please enter a valid value for device_name." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_name: + if (current_pathname is None + or len(current_pathname.strip()) == 0) \ + and (device_name is None + or len(device_name.strip()) == 0): + error_msg = "current_pathname or device_name is mandatory " \ + "along with sds_name. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif sds_name is not None and len(sds_name.strip()) == 0: + error_msg = "Please enter a valid value for sds_name." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_id: + if (current_pathname is None + or len(current_pathname.strip()) == 0) \ + and (device_name is None + or len(device_name.strip()) == 0): + error_msg = "current_pathname or device_name is mandatory " \ + "along with sds_id. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + elif sds_id is not None and len(sds_id.strip()) == 0: + error_msg = "Please enter a valid value for sds_id." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if device_id is not None and len(device_id.strip()) == 0: + error_msg = "Please provide valid device_id value to identify " \ + "a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if current_pathname is None and device_name is None \ + and device_id is None: + error_msg = "Please specify a valid parameter combination to " \ + "identify a device." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_add_parameters(self, device_id=None, + external_acceleration_type=None, + storage_pool_id=None, + storage_pool_name=None, + acceleration_pool_id=None, + acceleration_pool_name=None): + """Validate the add device parameters""" + + if device_id: + error_msg = "Addition of device is allowed using " \ + "device_name only, device_id given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + if external_acceleration_type and storage_pool_id is None \ + and storage_pool_name is None \ + and acceleration_pool_id is None \ + and acceleration_pool_name is None: + error_msg = "Storage Pool ID/name or Acceleration Pool " \ + "ID/name is mandatory along with " \ + "external_acceleration_type." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on device based on parameters passed in + the playbook + """ + current_pathname = self.module.params['current_pathname'] + device_name = self.module.params['device_name'] + device_id = self.module.params['device_id'] + sds_name = self.module.params['sds_name'] + sds_id = self.module.params['sds_id'] + storage_pool_name = self.module.params['storage_pool_name'] + storage_pool_id = self.module.params['storage_pool_id'] + acceleration_pool_id = self.module.params['acceleration_pool_id'] + acceleration_pool_name = self.module.params['acceleration_pool_name'] + protection_domain_name = self.module.params['protection_domain_name'] + protection_domain_id = self.module.params['protection_domain_id'] + external_acceleration_type = self.module.params[ + 'external_acceleration_type'] + media_type = self.module.params['media_type'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and device details + changed = False + result = dict( + changed=False, + device_details={} + ) + + # validate input parameters + self.validate_input_parameters(device_name, device_id, + current_pathname, sds_name, sds_id) + + # get SDS ID from name + if sds_name: + sds_details = self.get_sds(sds_name) + if sds_details: + sds_id = sds_details['id'] + msg = "Fetched the SDS details with id '%s', name '%s'" \ + % (sds_id, sds_name) + LOG.info(msg) + + # get device details + device_details = self.get_device_details(current_pathname, + sds_id, device_name, + device_id) + + if device_details: + device_id = device_details['id'] + msg = "Fetched the device details %s" % (str(device_details)) + LOG.info(msg) + + # add operation + add_changed = False + if state == 'present' and not device_details: + # get Protection Domain ID from name + # it is needed to uniquely identify a storage pool or acceleration + # pool using name + if protection_domain_name \ + and (storage_pool_name or acceleration_pool_name): + pd_details = self.get_protection_domain( + protection_domain_name) + if pd_details: + protection_domain_id = pd_details['id'] + msg = "Fetched the protection domain details with id " \ + "'%s', name '%s'" % (protection_domain_id, + protection_domain_name) + LOG.info(msg) + + # get storage pool ID from name + if storage_pool_name: + if protection_domain_id: + storage_pool_details = self.get_storage_pool( + storage_pool_name=storage_pool_name, + protection_domain_id=protection_domain_id) + if storage_pool_details: + storage_pool_id = storage_pool_details['id'] + msg = "Fetched the storage pool details with id '%s', " \ + "name '%s'" % (storage_pool_id, storage_pool_name) + LOG.info(msg) + else: + error_msg = "Protection domain name/id is required to " \ + "uniquely identify a storage pool, only " \ + "storage_pool_name is given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # get acceleration pool ID from name + if acceleration_pool_name: + if protection_domain_id: + acceleration_pool_details = self.get_acceleration_pool( + acceleration_pool_name=acceleration_pool_name, + protection_domain_id=protection_domain_id) + if acceleration_pool_details: + acceleration_pool_id = acceleration_pool_details['id'] + msg = "Fetched the acceleration pool details with id " \ + "'%s', name '%s'" % (acceleration_pool_id, + acceleration_pool_name) + LOG.info(msg) + else: + error_msg = "Protection domain name/id is required to " \ + "uniquely identify a acceleration pool, " \ + "only acceleration_pool_name is given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # validate input parameters + self.validate_add_parameters(device_id, + external_acceleration_type, + storage_pool_id, + storage_pool_name, + acceleration_pool_id, + acceleration_pool_name) + add_changed = self.add_device(device_name, current_pathname, + sds_id, storage_pool_id, media_type, + acceleration_pool_id, + external_acceleration_type) + if add_changed: + device_details = self.get_device_details( + device_name=device_name, sds_id=sds_id) + device_id = device_details['id'] + msg = "Device created successfully, fetched device details " \ + "%s" % (str(device_details)) + LOG.info(msg) + + # remove operation + remove_changed = False + if state == 'absent' and device_details: + remove_changed = self.remove_device(device_id) + + if add_changed or remove_changed: + changed = True + + # modify operation + if device_details and state == 'present': + modify_dict = to_modify(device_details, media_type, + external_acceleration_type) + if modify_dict: + error_msg = "Modification of device attributes is " \ + "currently not supported by Ansible modules." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # Returning the updated device details + if state == 'present': + device_details = self.show_output(device_id) + result['device_details'] = device_details + result['changed'] = changed + self.module.exit_json(**result) + + def show_output(self, device_id): + """Show device details + :param device_id: ID of the device + :type device_id: str + :return: Details of device + :rtype: dict + """ + + try: + device_details = self.powerflex_conn.device.get( + filter_fields={'id': device_id}) + + if len(device_details) == 0: + msg = "Device with identifier '%s' not found" % device_id + LOG.error(msg) + return None + + # Append SDS name + if 'sdsId' in device_details[0] and device_details[0]['sdsId']: + sds_details = self.get_sds(sds_id=device_details[0]['sdsId']) + device_details[0]['sdsName'] = sds_details['name'] + + # Append storage pool name and its protection domain name and ID + if 'storagePoolId' in device_details[0] \ + and device_details[0]['storagePoolId']: + sp_details = self.get_storage_pool( + storage_pool_id=device_details[0]['storagePoolId']) + device_details[0]['storagePoolName'] = sp_details['name'] + pd_id = sp_details['protectionDomainId'] + device_details[0]['protectionDomainId'] = pd_id + pd_details = self.get_protection_domain( + protection_domain_id=pd_id) + device_details[0]['protectionDomainName'] = pd_details['name'] + + # Append acceleration pool name and its protection domain name + # and ID + if 'accelerationPoolId' in device_details[0] \ + and device_details[0]['accelerationPoolId']: + ap_details = self.get_acceleration_pool( + acceleration_pool_id=device_details[0][ + 'accelerationPoolId']) + device_details[0]['accelerationPoolName'] = ap_details['name'] + pd_id = ap_details['protectionDomainId'] + device_details[0]['protectionDomainId'] = pd_id + pd_details = self.get_protection_domain( + protection_domain_id=pd_id) + device_details[0]['protectionDomainName'] = pd_details['name'] + + return device_details[0] + + except Exception as e: + error_msg = "Failed to get the device '%s' with error '%s'"\ + % (device_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + +def to_modify(device_details, media_type, external_acceleration_type): + """Identify device attributes to be modified""" + + modify_dict = {} + + if media_type is not None and \ + device_details['mediaType'] != media_type: + modify_dict['mediaType'] = media_type + + if external_acceleration_type is not None and \ + device_details['externalAccelerationType'] \ + != external_acceleration_type: + modify_dict['externalAccelerationType'] \ + = external_acceleration_type + + if len(modify_dict) != 0: + LOG.info("Attributes to be modified: %s", modify_dict) + return modify_dict + + +def get_powerflex_device_parameters(): + """This method provide parameter required for the device module on + PowerFlex""" + return dict( + current_pathname=dict(), + device_name=dict(), + device_id=dict(), + sds_name=dict(), + sds_id=dict(), + storage_pool_name=dict(), + storage_pool_id=dict(), + acceleration_pool_id=dict(), + acceleration_pool_name=dict(), + protection_domain_name=dict(), + protection_domain_id=dict(), + external_acceleration_type=dict(choices=['Invalid', 'None', 'Read', + 'Write', 'ReadAndWrite']), + media_type=dict(choices=['HDD', 'SSD', 'NVDIMM']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex device object and perform actions on it + based on user input from playbook""" + obj = PowerFlexDevice() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/info.py b/ansible_collections/dellemc/powerflex/plugins/modules/info.py new file mode 100644 index 00000000..40bdfd92 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/info.py @@ -0,0 +1,1495 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for Gathering information about Dell Technologies (Dell) PowerFlex""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: info + +version_added: '1.0.0' + +short_description: Gathering information about Dell PowerFlex + +description: +- Gathering information about Dell PowerFlex storage system includes + getting the api details, list of volumes, SDSs, SDCs, storage pools, + protection domains, snapshot policies, and devices. + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +author: +- Arindam Datta (@dattaarindam) + +options: + gather_subset: + description: + - List of string variables to specify the Powerflex storage system + entities for which information is required. + - Volumes - C(vol). + - Storage pools - C(storage_pool). + - Protection domains - C(protection_domain). + - SDCs - C(sdc). + - SDSs - C(sds). + - Snapshot policies - C(snapshot_policy). + - Devices - C(device). + - Replication consistency groups - C(rcg). + choices: [vol, storage_pool, protection_domain, sdc, sds, + snapshot_policy, device, rcg] + type: list + elements: str + filters: + description: + - List of filters to support filtered output for storage entities. + - Each filter is a list of I(filter_key), I(filter_operator), I(filter_value). + - Supports passing of multiple filters. + type: list + elements: dict + suboptions: + filter_key: + description: + - Name identifier of the filter. + type: str + required: true + filter_operator: + description: + - Operation to be performed on filter key. + type: str + choices: [equal] + required: true + filter_value: + description: + - Value of the filter key. + type: str + required: true +notes: + - The I(check_mode) is supported. +''' + +EXAMPLES = r''' +- name: Get detailed list of PowerFlex entities + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + - storage_pool + - protection_domain + - sdc + - sds + - snapshot_policy + - device + - rcg + +- name: Get a subset list of PowerFlex volumes + dellemc.powerflex.info: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + filters: + - filter_key: "name" + filter_operator: "equal" + filter_value: "ansible_test" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +Array_Details: + description: System entities of PowerFlex storage array. + returned: always + type: dict + contains: + addressSpaceUsage: + description: Address space usage. + type: str + authenticationMethod: + description: Authentication method. + type: str + capacityAlertCriticalThresholdPercent: + description: Capacity alert critical threshold percentage. + type: int + capacityAlertHighThresholdPercent: + description: Capacity alert high threshold percentage. + type: int + capacityTimeLeftInDays: + description: Capacity time left in days. + type: str + cliPasswordAllowed: + description: CLI password allowed. + type: bool + daysInstalled: + description: Days installed. + type: int + defragmentationEnabled: + description: Defragmentation enabled. + type: bool + enterpriseFeaturesEnabled: + description: Enterprise features enabled. + type: bool + id: + description: The ID of the system. + type: str + installId: + description: installation Id. + type: str + isInitialLicense: + description: Initial license. + type: bool + lastUpgradeTime: + description: Last upgrade time. + type: int + managementClientSecureCommunicationEnabled: + description: Management client secure communication enabled. + type: bool + maxCapacityInGb: + description: Maximum capacity in GB. + type: dict + mdmCluster: + description: MDM cluster details. + type: dict + mdmExternalPort: + description: MDM external port. + type: int + mdmManagementPort: + description: MDM management port. + type: int + mdmSecurityPolicy: + description: MDM security policy. + type: str + showGuid: + description: Show guid. + type: bool + swid: + description: SWID. + type: str + systemVersionName: + description: System version and name. + type: str + tlsVersion: + description: TLS version. + type: str + upgradeState: + description: Upgrade state. + type: str + sample: { + "addressSpaceUsage": "Normal", + "authenticationMethod": "Native", + "capacityAlertCriticalThresholdPercent": 90, + "capacityAlertHighThresholdPercent": 80, + "capacityTimeLeftInDays": "24", + "cliPasswordAllowed": true, + "daysInstalled": 66, + "defragmentationEnabled": true, + "enterpriseFeaturesEnabled": true, + "id": "4a54a8ba6df0690f", + "installId": "38622771228e56db", + "isInitialLicense": true, + "lastUpgradeTime": 0, + "managementClientSecureCommunicationEnabled": true, + "maxCapacityInGb": "Unlimited", + "mdmCluster": { + "clusterMode": "ThreeNodes", + "clusterState": "ClusteredNormal", + "goodNodesNum": 3, + "goodReplicasNum": 2, + "id": "5356091375512217871", + "master": { + "id": "6101582c2ca8db00", + "ips": [ + "10.47.xxx.xxx" + ], + "managementIPs": [ + "10.47.xxx.xxx" + ], + "name": "node0", + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "port": 9011, + "role": "Manager", + "status": "Normal", + "versionInfo": "R3_6.0.0", + "virtualInterfaces": [ + "ens160" + ] + }, + "slaves": [ + { + "id": "23fb724015661901", + "ips": [ + "10.47.xxx.xxx" + ], + "managementIPs": [ + "10.47.xxx.xxx" + ], + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "port": 9011, + "role": "Manager", + "status": "Normal", + "versionInfo": "R3_6.0.0", + "virtualInterfaces": [ + "ens160" + ] + } + ], + "tieBreakers": [ + { + "id": "6ef27eb20d0c1202", + "ips": [ + "10.47.xxx.xxx" + ], + "managementIPs": [ + "10.47.xxx.xxx" + ], + "opensslVersion": "N/A", + "port": 9011, + "role": "TieBreaker", + "status": "Normal", + "versionInfo": "R3_6.0.0" + } + ] + }, + "mdmExternalPort": 7611, + "mdmManagementPort": 6611, + "mdmSecurityPolicy": "None", + "showGuid": true, + "swid": "", + "systemVersionName": "DellEMC PowerFlex Version: R3_6.0.354", + "tlsVersion": "TLSv1.2", + "upgradeState": "NoUpgrade" + } +API_Version: + description: API version of PowerFlex API Gateway. + returned: always + type: str + sample: "3.5" +Protection_Domains: + description: Details of all protection domains. + returned: always + type: list + contains: + id: + description: protection domain id. + type: str + name: + description: protection domain name. + type: str + sample: [ + { + "id": "9300e90900000001", + "name": "domain2" + }, + { + "id": "9300c1f900000000", + "name": "domain1" + } + ] +SDCs: + description: Details of storage data clients. + returned: always + type: list + contains: + id: + description: storage data client id. + type: str + name: + description: storage data client name. + type: str + sample: [ + { + "id": "07335d3d00000006", + "name": "LGLAP203" + }, + { + "id": "07335d3c00000005", + "name": "LGLAP178" + }, + { + "id": "0733844a00000003" + } + ] +SDSs: + description: Details of storage data servers. + returned: always + type: list + contains: + id: + description: storage data server id. + type: str + name: + description: storage data server name. + type: str + sample: [ + { + "id": "8f3bb0cc00000002", + "name": "node0" + }, + { + "id": "8f3bb0ce00000000", + "name": "node1" + }, + { + "id": "8f3bb15300000001", + "name": "node22" + } + ] +Snapshot_Policies: + description: Details of snapshot policies. + returned: always + type: list + contains: + id: + description: snapshot policy id. + type: str + name: + description: snapshot policy name. + type: str + sample: [ + { + "id": "2b380c5c00000000", + "name": "sample_snap_policy" + }, + { + "id": "2b380c5d00000001", + "name": "sample_snap_policy_1" + } + ] +Storage_Pools: + description: Details of storage pools. + returned: always + type: list + contains: + mediaType: + description: Type of devices in the storage pool. + type: str + useRfcache: + description: Enable/Disable RFcache on a specific storage pool. + type: bool + useRmcache: + description: Enable/Disable RMcache on a specific storage pool. + type: bool + id: + description: ID of the storage pool under protection domain. + type: str + name: + description: Name of the storage pool under protection domain. + type: str + protectionDomainId: + description: ID of the protection domain in which pool resides. + type: str + protectionDomainName: + description: Name of the protection domain in which pool resides. + type: str + statistics: + description: Statistics details of the storage pool. + type: dict + contains: + capacityInUseInKb: + description: Total capacity of the storage pool. + type: str + unusedCapacityInKb: + description: Unused capacity of the storage pool. + type: str + deviceIds: + description: Device Ids of the storage pool. + type: list + sample: [ + { + "addressSpaceUsage": "Normal", + "addressSpaceUsageType": "DeviceCapacityLimit", + "backgroundScannerBWLimitKBps": 3072, + "backgroundScannerMode": "DataComparison", + "bgScannerCompareErrorAction": "ReportAndFix", + "bgScannerReadErrorAction": "ReportAndFix", + "capacityAlertCriticalThreshold": 90, + "capacityAlertHighThreshold": 80, + "capacityUsageState": "Normal", + "capacityUsageType": "NetCapacity", + "checksumEnabled": false, + "compressionMethod": "Invalid", + "dataLayout": "MediumGranularity", + "externalAccelerationType": "None", + "fglAccpId": null, + "fglExtraCapacity": null, + "fglMaxCompressionRatio": null, + "fglMetadataSizeXx100": null, + "fglNvdimmMetadataAmortizationX100": null, + "fglNvdimmWriteCacheSizeInMb": null, + "fglOverProvisioningFactor": null, + "fglPerfProfile": null, + "fglWriteAtomicitySize": null, + "fragmentationEnabled": true, + "id": "e0d8f6c900000000", + "links": [ + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "self" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Statistics", + "rel": "/api/StoragePool/relationship/Statistics" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/SpSds", + "rel": "/api/StoragePool/relationship/SpSds" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Volume", + "rel": "/api/StoragePool/relationship/Volume" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Device", + "rel": "/api/StoragePool/relationship/Device" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/VTree", + "rel": "/api/StoragePool/relationship/VTree" + }, + { + "href": "/api/instances/ProtectionDomain::9300c1f900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "statistics": { + "BackgroundScannedInMB": 3466920, + "activeBckRebuildCapacityInKb": 0, + "activeEnterProtectedMaintenanceModeCapacityInKb": 0, + "aggregateCompressionLevel": "Uncompressed", + "atRestCapacityInKb": 1248256, + "backgroundScanCompareErrorCount": 0, + "backgroundScanFixedCompareErrorCount": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 369098752, + "capacityInUseInKb": 2496512, + "capacityInUseNoOverheadInKb": 2496512, + "capacityLimitInKb": 845783040, + "compressedDataCompressionRatio": 0.0, + "compressionRatio": 1.0, + "currentFglMigrationSizeInKb": 0, + "deviceIds": [ + ], + "enterProtectedMaintenanceModeCapacityInKb": 0, + "enterProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "enterProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exposedCapacityInKb": 0, + "failedCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 184549376, + "inaccessibleCapacityInKb": 0, + "logWrittenBlocksInKb": 0, + "maxCapacityInKb": 845783040, + "migratingVolumeIds": [ + ], + "migratingVtreeIds": [ + ], + "movingCapacityInKb": 0, + "netCapacityInUseInKb": 1248256, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDeviceAtFaultRebuilds": 0, + "numOfDevices": 3, + "numOfIncomingVtreeMigrations": 0, + "numOfVolumes": 8, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 8, + "overallUsageRatio": 73.92289, + "pendingBckRebuildCapacityInKb": 0, + "pendingEnterProtectedMaintenanceModeCapacityInKb": 0, + "pendingExitProtectedMaintenanceModeCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "persistentChecksumBuilderProgress": 100.0, + "persistentChecksumCapacityInKb": 414720, + "primaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 92274688, + "primaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "protectedCapacityInKb": 2496512, + "protectedVacInKb": 184549376, + "provisionedAddressesInKb": 2496512, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rmPendingAllocatedInKb": 0, + "rmPendingThickInKb": 0, + "rplJournalCapAllowed": 0, + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 92274688, + "secondaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 0, + "snapCapacityInUseOccupiedInKb": 0, + "snapshotCapacityInKb": 0, + "spSdsIds": [ + "abdfe71b00030001", + "abdce71d00040001", + "abdde71e00050001" + ], + "spareCapacityInKb": 84578304, + "targetOtherLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "tempCapacityInKb": 0, + "tempCapacityVacInKb": 0, + "thickCapacityInUseInKb": 0, + "thinAndSnapshotRatio": 73.92289, + "thinCapacityAllocatedInKm": 184549376, + "thinCapacityInUseInKb": 0, + "thinUserDataCapacityInKb": 2496512, + "totalFglMigrationSizeInKb": 0, + "totalReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "totalWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "trimmedUserDataCapacityInKb": 0, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 758708224, + "userDataCapacityInKb": 2496512, + "userDataCapacityNoTrimInKb": 2496512, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volumeAddressSpaceInKb": 922XXXXX, + "volumeAllocationLimitInKb": 3707XXXXX, + "volumeIds": [ + "456afc7900XXXXXXXX" + ], + "vtreeAddresSpaceInKb": 92274688, + "vtreeIds": [ + "32b1681bXXXXXXXX", + ] + }, + "mediaType": "HDD", + "name": "pool1", + "numOfParallelRebuildRebalanceJobsPerDevice": 2, + "persistentChecksumBuilderLimitKb": 3072, + "persistentChecksumEnabled": true, + "persistentChecksumState": "Protected", + "persistentChecksumValidateOnRead": false, + "protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps": null, + "protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold": null, + "protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps": 10240, + "protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice": 1, + "protectedMaintenanceModeIoPriorityPolicy": "limitNumOfConcurrentIos", + "protectedMaintenanceModeIoPriorityQuietPeriodInMsec": null, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "rebalanceEnabled": true, + "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, + "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebalanceIoPriorityPolicy": "favorAppIos", + "rebalanceIoPriorityQuietPeriodInMsec": null, + "rebuildEnabled": true, + "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebuildIoPriorityAppIopsPerDeviceThreshold": null, + "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", + "rebuildIoPriorityQuietPeriodInMsec": null, + "replicationCapacityMaxRatio": 32, + "rmcacheWriteHandlingMode": "Cached", + "sparePercentage": 10, + "useRfcache": false, + "useRmcache": false, + "vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps": null, + "vtreeMigrationIoPriorityAppIopsPerDeviceThreshold": null, + "vtreeMigrationIoPriorityBwLimitPerDeviceInKbps": 10240, + "vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice": 1, + "vtreeMigrationIoPriorityPolicy": "favorAppIos", + "vtreeMigrationIoPriorityQuietPeriodInMsec": null, + "zeroPaddingEnabled": true + } + ] +Volumes: + description: Details of volumes. + returned: always + type: list + contains: + id: + description: The ID of the volume. + type: str + mappedSdcInfo: + description: The details of the mapped SDC. + type: dict + contains: + sdcId: + description: ID of the SDC. + type: str + sdcName: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + accessMode: + description: mapping access mode for the specified volume. + type: str + limitIops: + description: IOPS limit for the SDC. + type: int + limitBwInMbps: + description: Bandwidth limit for the SDC. + type: int + name: + description: Name of the volume. + type: str + sizeInKb: + description: Size of the volume in Kb. + type: int + sizeInGb: + description: Size of the volume in Gb. + type: int + storagePoolId: + description: ID of the storage pool in which volume resides. + type: str + storagePoolName: + description: Name of the storage pool in which volume resides. + type: str + protectionDomainId: + description: ID of the protection domain in which volume resides. + type: str + protectionDomainName: + description: Name of the protection domain in which volume resides. + type: str + snapshotPolicyId: + description: ID of the snapshot policy associated with volume. + type: str + snapshotPolicyName: + description: Name of the snapshot policy associated with volume. + type: str + snapshotsList: + description: List of snapshots associated with the volume. + type: str + "statistics": + description: Statistics details of the storage pool. + type: dict + contains: + "numOfChildVolumes": + description: Number of child volumes. + type: int + "numOfMappedSdcs": + description: Number of mapped Sdcs of the volume. + type: int + sample: [ + { + "accessModeLimit": "ReadWrite", + "ancestorVolumeId": null, + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": null, + "creationTime": 1661234220, + "dataLayout": "MediumGranularity", + "id": "456afd7XXXXXXX", + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": [ + { + "accessMode": "ReadWrite", + "isDirectBufferMapping": false, + "limitBwInMbps": 0, + "limitIops": 0, + "sdcId": "c42425cbXXXXX", + "sdcIp": "10.XXX.XX.XX", + "sdcName": null + } + ], + "name": "vol-1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionLevels": [ + ], + "secureSnapshotExpTime": 0, + "sizeInKb": 8388608, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "statistics": { + "childVolumeIds": [ + ], + "descendantVolumeIds": [ + ], + "initiatorSdcId": null, + "mappedSdcIds": [ + "c42425XXXXXX" + ], + "numOfChildVolumes": 0, + "numOfDescendantVolumes": 0, + "numOfMappedSdcs": 1, + "registrationKey": null, + "registrationKeys": [ + ], + "replicationJournalVolume": false, + "replicationState": "UnmarkedForReplication", + "reservationType": "NotReserved", + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + } + }, + "storagePoolId": "7630a248XXXXXXX", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "ThinProvisioned", + "vtreeId": "32b168bXXXXXX" + } + ] +Devices: + description: Details of devices. + returned: always + type: list + contains: + id: + description: device id. + type: str + name: + description: device name. + type: str + sample: [ + { + "id": "b6efa59900000000", + "name": "device230" + }, + { + "id": "b6efa5fa00020000", + "name": "device_node0" + }, + { + "id": "b7f3a60900010000", + "name": "device22" + } + ] +Replication_Consistency_Groups: + description: Details of rcgs. + returned: always + type: list + contains: + id: + description: The ID of the replication consistency group. + type: str + name: + description: The name of the replication consistency group. + type: str + protectionDomainId: + description: The Protection Domain ID of the replication consistency group. + type: str + peerMdmId: + description: The ID of the peer MDM of the replication consistency group. + type: str + remoteId: + description: The ID of the remote replication consistency group. + type: str + remoteMdmId: + description: The ID of the remote MDM of the replication consistency group. + type: str + currConsistMode: + description: The current consistency mode of the replication consistency group. + type: str + freezeState: + description: The freeze state of the replication consistency group. + type: str + lifetimeState: + description: The Lifetime state of the replication consistency group. + type: str + pauseMode: + description: The Lifetime state of the replication consistency group. + type: str + snapCreationInProgress: + description: Whether the process of snapshot creation of the replication consistency group is in progress or not. + type: bool + lastSnapGroupId: + description: ID of the last snapshot of the replication consistency group. + type: str + lastSnapCreationRc: + description: The return code of the last snapshot of the replication consistency group. + type: int + targetVolumeAccessMode: + description: The access mode of the target volume of the replication consistency group. + type: str + remoteProtectionDomainId: + description: The ID of the remote Protection Domain. + type: str + remoteProtectionDomainName: + description: The Name of the remote Protection Domain. + type: str + failoverType: + description: The type of failover of the replication consistency group. + type: str + failoverState: + description: The state of failover of the replication consistency group. + type: str + activeLocal: + description: Whether the local replication consistency group is active. + type: bool + activeRemote: + description: Whether the remote replication consistency group is active + type: bool + abstractState: + description: The abstract state of the replication consistency group. + type: str + localActivityState: + description: The state of activity of the local replication consistency group. + type: str + remoteActivityState: + description: The state of activity of the remote replication consistency group.. + type: str + inactiveReason: + description: The reason for the inactivity of the replication consistency group. + type: int + rpoInSeconds: + description: The RPO value of the replication consistency group in seconds. + type: int + replicationDirection: + description: The direction of the replication of the replication consistency group. + type: str + disasterRecoveryState: + description: The state of disaster recovery of the local replication consistency group. + type: str + remoteDisasterRecoveryState: + description: The state of disaster recovery of the remote replication consistency group. + type: str + error: + description: The error code of the replication consistency group. + type: int + type: + description: The type of the replication consistency group. + type: str + sample: { + "protectionDomainId": "b969400500000000", + "peerMdmId": "6c3d94f600000000", + "remoteId": "2130961a00000000", + "remoteMdmId": "0e7a082862fedf0f", + "currConsistMode": "Consistent", + "freezeState": "Unfrozen", + "lifetimeState": "Normal", + "pauseMode": "None", + "snapCreationInProgress": false, + "lastSnapGroupId": "e58280b300000001", + "lastSnapCreationRc": "SUCCESS", + "targetVolumeAccessMode": "NoAccess", + "remoteProtectionDomainId": "4eeb304600000000", + "remoteProtectionDomainName": "domain1", + "failoverType": "None", + "failoverState": "None", + "activeLocal": true, + "activeRemote": true, + "abstractState": "Ok", + "localActivityState": "Active", + "remoteActivityState": "Active", + "inactiveReason": 11, + "rpoInSeconds": 30, + "replicationDirection": "LocalToRemote", + "disasterRecoveryState": "None", + "remoteDisasterRecoveryState": "None", + "error": 65, + "name": "test_rcg", + "type": "User", + "id": "aadc17d500000000" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('info') + + +class PowerFlexInfo(object): + """Class with Info operations""" + + filter_mapping = {'equal': 'eq.'} + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_info_parameters()) + + self.filter_keys = sorted( + [k for k in self.module_params['filters']['options'].keys() + if 'filter' in k]) + + """ initialize the ansible module """ + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=True) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info('Got the PowerFlex system connection object instance') + LOG.info('The check_mode flag %s', self.module.check_mode) + + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_api_details(self): + """ Get api details of the array """ + try: + LOG.info('Getting API details ') + api_version = self.powerflex_conn.system.api_version() + return api_version + + except Exception as e: + msg = 'Get API details from Powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_array_details(self): + """ Get system details of a powerflex array """ + + try: + LOG.info('Getting array details ') + entity_list = ['addressSpaceUsage', 'authenticationMethod', + 'capacityAlertCriticalThresholdPercent', + 'capacityAlertHighThresholdPercent', + 'capacityTimeLeftInDays', 'cliPasswordAllowed', + 'daysInstalled', 'defragmentationEnabled', + 'enterpriseFeaturesEnabled', 'id', 'installId', + 'isInitialLicense', 'lastUpgradeTime', + 'managementClientSecureCommunicationEnabled', + 'maxCapacityInGb', 'mdmCluster', + 'mdmExternalPort', 'mdmManagementPort', + 'mdmSecurityPolicy', 'showGuid', 'swid', + 'systemVersionName', 'tlsVersion', 'upgradeState'] + + sys_list = self.powerflex_conn.system.get() + sys_details_list = [] + for sys in sys_list: + sys_details = {} + for entity in entity_list: + if entity in sys.keys(): + sys_details.update({entity: sys[entity]}) + if sys_details: + sys_details_list.append(sys_details) + + return sys_details_list + + except Exception as e: + msg = 'Get array details from Powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_sdc_list(self, filter_dict=None): + """ Get the list of sdcs on a given PowerFlex storage system """ + + try: + LOG.info('Getting SDC list ') + if filter_dict: + sdc = self.powerflex_conn.sdc.get(filter_fields=filter_dict) + else: + sdc = self.powerflex_conn.sdc.get() + return result_list(sdc) + + except Exception as e: + msg = 'Get SDC list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_sds_list(self, filter_dict=None): + """ Get the list of sdses on a given PowerFlex storage system """ + + try: + LOG.info('Getting SDS list ') + if filter_dict: + sds = self.powerflex_conn.sds.get(filter_fields=filter_dict) + else: + sds = self.powerflex_conn.sds.get() + return result_list(sds) + + except Exception as e: + msg = 'Get sds list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_pd_list(self, filter_dict=None): + """ Get the list of Protection Domains on a given PowerFlex + storage system """ + + try: + LOG.info('Getting protection domain list ') + + if filter_dict: + pd = self.powerflex_conn.protection_domain.get(filter_fields=filter_dict) + else: + pd = self.powerflex_conn.protection_domain.get() + return result_list(pd) + + except Exception as e: + msg = 'Get protection domain list from powerflex array failed ' \ + 'with error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_storage_pool_list(self, filter_dict=None): + """ Get the list of storage pools on a given PowerFlex storage + system """ + + try: + LOG.info('Getting storage pool list ') + if filter_dict: + pool = self.powerflex_conn.storage_pool.get(filter_fields=filter_dict) + else: + pool = self.powerflex_conn.storage_pool.get() + + if pool: + statistics_map = self.powerflex_conn.utility.get_statistics_for_all_storagepools() + list_of_pool_ids_in_statistics = statistics_map.keys() + for item in pool: + item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_pool_ids_in_statistics else {} + return result_list(pool) + + except Exception as e: + msg = 'Get storage pool list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_replication_consistency_group_list(self, filter_dict=None): + """ Get the list of replication consistency group on a given PowerFlex storage + system """ + + try: + LOG.info('Getting replication consistency group list ') + if filter_dict: + rcgs = self.powerflex_conn.replication_consistency_group.get(filter_fields=filter_dict) + else: + rcgs = self.powerflex_conn.replication_consistency_group.get() + if rcgs: + api_version = self.powerflex_conn.system.get()[0]['mdmCluster']['master']['versionInfo'] + statistics_map = \ + self.powerflex_conn.replication_consistency_group.get_all_statistics(utils.is_version_less_than_3_6(api_version)) + list_of_rcg_ids_in_statistics = statistics_map.keys() + for rcg in rcgs: + rcg.pop('links', None) + rcg['statistics'] = statistics_map[rcg['id']] if rcg['id'] in list_of_rcg_ids_in_statistics else {} + return result_list(rcgs) + + except Exception as e: + msg = 'Get replication consistency group list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_volumes_list(self, filter_dict=None): + """ Get the list of volumes on a given PowerFlex storage + system """ + + try: + LOG.info('Getting volumes list ') + if filter_dict: + volumes = self.powerflex_conn.volume.get(filter_fields=filter_dict) + else: + volumes = self.powerflex_conn.volume.get() + + if volumes: + statistics_map = self.powerflex_conn.utility.get_statistics_for_all_volumes() + list_of_vol_ids_in_statistics = statistics_map.keys() + for item in volumes: + item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_vol_ids_in_statistics else {} + return result_list(volumes) + + except Exception as e: + msg = 'Get volumes list from powerflex array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_snapshot_policy_list(self, filter_dict=None): + """ Get the list of snapshot schedules on a given PowerFlex storage + system """ + + try: + LOG.info('Getting snapshot schedules list ') + if filter_dict: + snapshot_schedules = \ + self.powerflex_conn.snapshot_policy.get( + filter_fields=filter_dict) + else: + snapshot_schedules = \ + self.powerflex_conn.snapshot_policy.get() + + return result_list(snapshot_schedules) + + except Exception as e: + msg = 'Get snapshot schedules list from powerflex array failed ' \ + 'with error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_devices_list(self, filter_dict=None): + """ Get the list of devices on a given PowerFlex storage + system """ + + try: + LOG.info('Getting device list ') + if filter_dict: + devices = self.powerflex_conn.device.get(filter_fields=filter_dict) + else: + devices = self.powerflex_conn.device.get() + + return result_list(devices) + + except Exception as e: + msg = 'Get device list from powerflex array failed ' \ + 'with error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_filter(self, filter_dict): + """ Validate given filter_dict """ + + is_invalid_filter = self.filter_keys != sorted(list(filter_dict)) + if is_invalid_filter: + msg = "Filter should have all keys: '{0}'".format( + ", ".join(self.filter_keys)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + is_invalid_filter = [filter_dict[i] is None for i in filter_dict] + if True in is_invalid_filter: + msg = "Filter keys: '{0}' cannot be None".format(self.filter_keys) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_filters(self, filters): + """Get the filters to be applied""" + + filter_dict = {} + for item in filters: + self.validate_filter(item) + f_op = item['filter_operator'] + if self.filter_mapping.get(f_op): + f_key = item['filter_key'] + f_val = item['filter_value'] + if f_key in filter_dict: + # multiple filters on same key + if isinstance(filter_dict[f_key], list): + # prev_val is list, so append new f_val + filter_dict[f_key].append(f_val) + else: + # prev_val is not list, + # so create list with prev_val & f_val + filter_dict[f_key] = [filter_dict[f_key], f_val] + else: + filter_dict[f_key] = f_val + else: + msg = "Given filter operator '{0}' is not supported." \ + "supported operators are : '{1}'".format( + f_op, + list(self.filter_mapping.keys())) + LOG.error(msg) + self.module.fail_json(msg=msg) + return filter_dict + + def perform_module_operation(self): + """ Perform different actions on info based on user input + in the playbook """ + + filters = self.module.params['filters'] + filter_dict = {} + if filters: + filter_dict = self.get_filters(filters) + LOG.info('filters: %s', filter_dict) + + api_version = self.get_api_details() + array_details = self.get_array_details() + sdc = [] + sds = [] + storage_pool = [] + vol = [] + snapshot_policy = [] + protection_domain = [] + device = [] + rcgs = [] + + subset = self.module.params['gather_subset'] + if subset is not None: + if 'sdc' in subset: + sdc = self.get_sdc_list(filter_dict=filter_dict) + if 'sds' in subset: + sds = self.get_sds_list(filter_dict=filter_dict) + if 'protection_domain' in subset: + protection_domain = self.get_pd_list(filter_dict=filter_dict) + if 'storage_pool' in subset: + storage_pool = self.get_storage_pool_list(filter_dict=filter_dict) + if 'vol' in subset: + vol = self.get_volumes_list(filter_dict=filter_dict) + if 'snapshot_policy' in subset: + snapshot_policy = self.get_snapshot_policy_list(filter_dict=filter_dict) + if 'device' in subset: + device = self.get_devices_list(filter_dict=filter_dict) + if 'rcg' in subset: + rcgs = self.get_replication_consistency_group_list(filter_dict=filter_dict) + + self.module.exit_json( + Array_Details=array_details, + API_Version=api_version, + SDCs=sdc, + SDSs=sds, + Storage_Pools=storage_pool, + Volumes=vol, + Snapshot_Policies=snapshot_policy, + Protection_Domains=protection_domain, + Devices=device, + Replication_Consistency_Groups=rcgs + ) + + +def result_list(entity): + """ Get the name and id associated with the PowerFlex entities """ + result = [] + if entity: + LOG.info('Successfully listed.') + for item in entity: + if item['name']: + result.append(item) + else: + result.append({"id": item['id']}) + return result + else: + return None + + +def get_powerflex_info_parameters(): + """This method provides parameters required for the ansible + info module on powerflex""" + return dict( + gather_subset=dict(type='list', required=False, elements='str', + choices=['vol', 'storage_pool', + 'protection_domain', 'sdc', 'sds', + 'snapshot_policy', 'device', 'rcg']), + filters=dict(type='list', required=False, elements='dict', + options=dict(filter_key=dict(type='str', required=True, no_log=False), + filter_operator=dict( + type='str', required=True, + choices=['equal']), + filter_value=dict(type='str', required=True) + ))) + + +def main(): + """ Create PowerFlex info object and perform action on it + based on user input from playbook""" + obj = PowerFlexInfo() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py new file mode 100644 index 00000000..25c5058a --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py @@ -0,0 +1,1333 @@ +#!/usr/bin/python + +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing MDM Cluster on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: mdm_cluster +version_added: '1.3.0' +short_description: Manage MDM cluster on Dell PowerFlex +description: +- Managing MDM cluster and MDMs on PowerFlex storage system includes + adding/removing standby MDM, modify MDM name and virtual interface. +- It also includes getting details of MDM cluster, modify MDM cluster + ownership, cluster mode, and performance profile. +author: +- Bhavneet Sharma (@sharmb5) +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + mdm_name: + description: + - The name of the MDM. It is unique across the PowerFlex array. + - Mutually exclusive with I(mdm_id). + - If mdm_name passed in add standby operation, then same name will be + assigned to the new standby mdm. + type: str + mdm_id: + description: + - The ID of the MDM. + - Mutually exclusive with I(mdm_name). + type: str + mdm_new_name: + description: + - To rename the MDM. + type: str + standby_mdm: + description: + - Specifies add standby MDM parameters. + type: dict + suboptions: + mdm_ips: + description: + - List of MDM IPs that will be assigned to new MDM. It can contain + IPv4 addresses. + required: true + type: list + elements: str + role: + description: + - Role of new MDM. + required: true + choices: ['Manager', 'TieBreaker'] + type: str + management_ips: + description: + - List of management IPs to manage MDM. It can contain IPv4 + addresses. + type: list + elements: str + port: + description: + - Specifies the port of new MDM. + type: int + allow_multiple_ips: + description: + - Allow the added node to have different number of IPs from the + primary node. + type: bool + virtual_interfaces: + description: + - List of NIC interfaces that will be used for virtual IP addresses. + type: list + elements: str + is_primary: + description: + - Set I(is_primary) as C(true) to change MDM cluster ownership from the current + master MDM to different MDM. + - Set I(is_primary) as C(false), will return MDM cluster details. + - New owner MDM must be an MDM with a manager role. + type: bool + cluster_mode: + description: + - Mode of the cluster. + choices: ['OneNode', 'ThreeNodes', 'FiveNodes'] + type: str + mdm: + description: + - Specifies parameters to add/remove MDMs to/from the MDM cluster. + type: list + elements: dict + suboptions: + mdm_id: + description: + - ID of MDM that will be added/removed to/from the cluster. + type: str + mdm_name: + description: + - Name of MDM that will be added/removed to/from the cluster. + type: str + mdm_type: + description: + - Type of the MDM. + - Either I(mdm_id) or I(mdm_name) must be passed with mdm_type. + required: true + choices: ['Secondary', 'TieBreaker'] + type: str + mdm_state: + description: + - Mapping state of MDM. + choices: ['present-in-cluster', 'absent-in-cluster'] + type: str + virtual_ip_interfaces: + description: + - List of interfaces to be used for virtual IPs. + - The order of interfaces must be matched with virtual IPs assigned to the + cluster. + - Interfaces of the primary and secondary type MDMs are allowed to modify. + - The I(virtual_ip_interfaces) is mutually exclusive with I(clear_interfaces). + type: list + elements: str + clear_interfaces: + description: + - Clear all virtual IP interfaces. + - The I(clear_interfaces) is mutually exclusive with I(virtual_ip_interfaces). + type: bool + performance_profile: + description: + - Apply performance profile to cluster MDMs. + choices: ['Compact', 'HighPerformance'] + type: str + state: + description: + - State of the MDM cluster. + choices: ['present', 'absent'] + required: true + type: str +notes: + - Parameters I(mdm_name) or I(mdm_id) are mandatory for rename and modify virtual IP + interfaces. + - Parameters I(mdm_name) or I(mdm_id) are not required while modifying performance + profile. + - For change MDM cluster ownership operation, only changed as True will be + returned and for idempotency case MDM cluster details will be returned. + - Reinstall all SDC after changing ownership to some newly added MDM. + - To add manager standby MDM, MDM package must be installed with manager + role. + - The I(check_mode) is supported. +''' + +EXAMPLES = r''' +- name: Add a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + standby_mdm: + mdm_ips: + - "10.x.x.x" + role: "TieBreaker" + management_ips: + - "10.x.y.z" + state: "present" + +- name: Remove a standby MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + state: "absent" + +- name: Switch cluster mode from 3 node to 5 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "FiveNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "present-in-cluster" + state: "present" + +- name: Switch cluster mode from 5 node to 3 node MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cluster_mode: "ThreeNodes" + mdm: + - mdm_id: "5f091a8a013f1100" + mdm_type: "Secondary" + - mdm_name: "mdm_1" + mdm_type: "TieBreaker" + sdc_state: "absent-in-cluster" + state: "present" + +- name: Get the details of the MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + state: "present" + +- name: Change ownership of MDM cluster + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_2" + is_primary: True + state: "present" + +- name: Modify performance profile + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + performance_profile: "HighPerformance" + state: "present" + +- name: Rename the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + mdm_new_name: "new_mdm_1" + state: "present" + +- name: Modify virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + virtual_ip_interface: + - "ens224" + state: "present" + +- name: Clear virtual IP interface of the MDM + dellemc.powerflex.mdm_cluster: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + mdm_name: "mdm_1" + clear_interfaces: True + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +mdm_cluster_details: + description: Details of the MDM cluster. + returned: When MDM cluster exists + type: dict + contains: + id: + description: The ID of the MDM cluster. + type: str + name: + description: Name of MDM cluster. + type: str + clusterMode: + description: Mode of the MDM cluster. + type: str + master: + description: The details of the master MDM. + type: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for master MDM. + type: list + managementIPs: + description: List of management IPs for master MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + virtualInterfaces: + description: List of virtual interfaces + type: list + opensslVersion: + description: OpenSSL version. + type: str + slaves: + description: The list of the secondary MDMs. + type: list + elements: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for secondary MDM. + type: list + managementIPs: + description: List of management IPs for secondary MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + virtualInterfaces: + description: List of virtual interfaces + type: list + opensslVersion: + description: OpenSSL version. + type: str + tieBreakers: + description: The list of the TieBreaker MDMs. + type: list + elements: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for tie-breaker MDM. + type: list + managementIPs: + description: List of management IPs for tie-breaker MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + opensslVersion: + description: OpenSSL version. + type: str + standbyMDMs: + description: The list of the standby MDMs. + type: list + elements: dict + contains: + id: + description: ID of the MDM. + type: str + name: + description: Name of the MDM. + type: str + port: + description: Port of the MDM. + type: str + ips: + description: List of IPs for MDM. + type: list + managementIPs: + description: List of management IPs for MDM. + type: list + role: + description: Role of MDM. + type: str + status: + description: Status of MDM. + type: str + versionInfo: + description: Version of MDM. + type: str + virtualInterfaces: + description: List of virtual interfaces. + type: list + opensslVersion: + description: OpenSSL version. + type: str + clusterState: + description: State of the MDM cluster. + type: str + goodNodesNum: + description: Number of Nodes in MDM cluster. + type: int + goodReplicasNum: + description: Number of nodes for Replication. + type: int + virtualIps: + description: List of virtual IPs. + type: list + sample: { + "clusterState": "ClusteredNormal", + "clusterMode": "ThreeNodes", + "goodNodesNum": 3, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + "10.x.y.z" + ], + "ips": [ + "10.x.y.z" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + "10.x.x.z" + ], + "ips": [ + "10.x.x.z" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "OpenSSL 1.0.2k-fips 26 Jan 2017", + "role": "Manager", + "status": "Normal", + "name": "sample_mdm1", + "id": "5908d328581d1401", + "port": 9011 + } + ], + "tieBreakers": [ + { + "virtualInterfaces": [], + "managementIPs": [], + "ips": [ + "10.x.y.y" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "standbyMDMs": [ + { + "virtualInterfaces": [], + "managementIPs": [ + "10.x.z.z" + ], + "ips": [ + "10.x.z.z" + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1403", + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils +import copy + +LOG = utils.get_logger('mdm_cluster') + + +class PowerFlexMdmCluster(object): + """Class with MDM cluster operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_mdm_cluster_parameters()) + + mut_ex_args = [['mdm_name', 'mdm_id'], + ['virtual_ip_interfaces', 'clear_interfaces']] + + required_together_args = [['cluster_mode', 'mdm', 'mdm_state']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=True, + mutually_exclusive=mut_ex_args, + required_together=required_together_args) + + utils.ensure_required_libs(self.module) + + self.not_exist_msg = "MDM {0} does not exists in MDM cluster." + self.exist_msg = "MDM already exists in the MDM cluster" + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + LOG.info('Check Mode Flag %s', self.module.check_mode) + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def set_mdm_virtual_interface(self, mdm_id=None, mdm_name=None, + virtual_ip_interfaces=None, + clear_interfaces=None, + mdm_cluster_details=None): + """Modify the MDM virtual IP interface. + :param mdm_id: ID of MDM + :param mdm_name: Name of MDM + :param virtual_ip_interfaces: List of virtual IP interfaces + :param clear_interfaces: clear virtual IP interfaces of MDM. + :param mdm_cluster_details: Details of MDM cluster + :return: True if modification of virtual interface or clear operation + successful + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_name is None and mdm_id is None: + err_msg = "Please provide mdm_name/mdm_id to modify virtual IP" \ + " interfaces the MDM." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=mdm_cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + + mdm_id = mdm_details['id'] + modify_list = [] + modify_list, clear = is_modify_mdm_virtual_interface( + virtual_ip_interfaces, clear_interfaces, mdm_details) + + if modify_list is None and not clear: + LOG.info("No change required in MDM virtual IP interfaces.") + return False + + try: + log_msg = "Modifying MDM virtual interfaces to %s " \ + "or %s" % (str(modify_list), clear) + LOG.info(log_msg) + if not self.module.check_mode: + self.powerflex_conn.system.modify_virtual_ip_interface( + mdm_id=mdm_id, virtual_ip_interfaces=modify_list, + clear_interfaces=clear) + return True + except Exception as e: + error_msg = "Failed to modify the virtual IP interfaces of MDM " \ + "{0} with error {1}".format(name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def set_performance_profile(self, performance_profile=None, + cluster_details=None): + """ Set the performance profile of Cluster MDMs + :param performance_profile: Specifies the performance profile of MDMs + :param cluster_details: Details of MDM cluster + :return: True if updated successfully + """ + + if self.module.params['state'] == 'present' and performance_profile: + if cluster_details['perfProfile'] != performance_profile: + try: + if not self.module.check_mode: + self.powerflex_conn.system.\ + set_cluster_mdm_performance_profile(performance_profile=performance_profile) + return True + except Exception as e: + error_msg = "Failed to update performance profile to {0} " \ + "with error {1}.".format(performance_profile, + str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + return False + return False + + def rename_mdm(self, mdm_name=None, mdm_id=None, mdm_new_name=None, + cluster_details=None): + """Rename the MDM + :param mdm_name: Name of the MDM. + :param mdm_id: ID of the MDM. + :param mdm_new_name: New name of the MDM. + :param cluster_details: Details of the MDM cluster. + :return: True if successfully renamed. + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_name is None and mdm_id is None: + err_msg = "Please provide mdm_name/mdm_id to rename the MDM." + self.module.fail_json(msg=err_msg) + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + + mdm_id = mdm_details['id'] + try: + if ('name' in mdm_details and + mdm_new_name != mdm_details['name']) or \ + 'name' not in mdm_details: + log_msg = "Modifying the MDM name from %s to " \ + "%s." % (mdm_name, mdm_new_name) + LOG.info(log_msg) + if not self.module.check_mode: + self.powerflex_conn.system.rename_mdm( + mdm_id=mdm_id, mdm_new_name=mdm_new_name) + return True + except Exception as e: + error_msg = "Failed to rename the MDM {0} with error {1}.".\ + format(name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def is_none_name_id_in_switch_cluster_mode(self, mdm): + """ Check whether mdm dict have mdm_name and mdm_id or not""" + + for node in mdm: + if node['mdm_id'] and node['mdm_name']: + msg = "parameters are mutually exclusive: mdm_name|mdm_id" + self.module.fail_json(msg=msg) + + def change_cluster_mode(self, cluster_mode, mdm, cluster_details): + """change the MDM cluster mode. + :param cluster_mode: specifies the mode of MDM cluster + :param mdm: A dict containing parameters to change MDM cluster mode + :param cluster_details: Details of MDM cluster + :return: True if mode changed successfully + """ + + self.is_none_name_id_in_switch_cluster_mode(mdm=mdm) + + if cluster_mode == cluster_details['clusterMode']: + LOG.info("MDM cluster is already in required mode.") + return False + + add_secondary = [] + add_tb = [] + remove_secondary = [] + remove_tb = [] + if self.module.params['state'] == 'present' and \ + self.module.params['mdm_state'] == 'present-in-cluster': + add_secondary, add_tb = self.cluster_expand_list(mdm, cluster_details) + elif self.module.params['state'] == 'present' and \ + self.module.params['mdm_state'] == 'absent-in-cluster': + remove_secondary, remove_tb = self.\ + cluster_reduce_list(mdm, cluster_details) + try: + if not self.module.check_mode: + self.powerflex_conn.system.switch_cluster_mode( + cluster_mode=cluster_mode, add_secondary=add_secondary, + remove_secondary=remove_secondary, add_tb=add_tb, + remove_tb=remove_tb) + return True + except Exception as e: + err_msg = "Failed to change the MDM cluster mode with error " \ + "{0}".format(str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def gather_secondarys_ids(self, mdm, cluster_details): + """ Prepare a list of secondary MDMs for switch cluster mode + operation""" + + secondarys = [] + + for node in mdm: + name_or_id = node['mdm_name'] if node['mdm_name'] else \ + node['mdm_id'] + + if node['mdm_type'] == 'Secondary' and node['mdm_id'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_id=node['mdm_id'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + secondarys.append(node['mdm_id']) + + elif node['mdm_type'] == 'Secondary' and node['mdm_name'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_name=node['mdm_name'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + secondarys.append(mdm_details['id']) + return secondarys + + def cluster_expand_list(self, mdm, cluster_details): + """Whether MDM cluster expansion is required or not. + """ + add_secondary = [] + add_tb = [] + + if 'standbyMDMs' not in cluster_details: + err_msg = "No Standby MDMs found. To expand cluster size, " \ + "first add standby MDMs." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + add_secondary = self.gather_secondarys_ids(mdm, cluster_details) + for node in mdm: + name_or_id = node['mdm_name'] if node['mdm_name'] else \ + node['mdm_id'] + + if node['mdm_type'] == 'TieBreaker' and \ + node['mdm_id'] is not None: + add_tb.append(node['mdm_id']) + + elif node['mdm_type'] == 'TieBreaker' and \ + node['mdm_name'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_name=node['mdm_name'], + cluster_details=cluster_details) + + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + add_tb.append(mdm_details['id']) + + log_msg = "expand List are: %s, %s" % (add_secondary, add_tb) + LOG.info(log_msg) + return add_secondary, add_tb + + def cluster_reduce_list(self, mdm, cluster_details): + """Whether MDM cluster reduction is required or not. + """ + remove_secondary = [] + remove_tb = [] + + remove_secondary = self.gather_secondarys_ids(mdm, cluster_details) + for node in mdm: + name_or_id = node['mdm_name'] if node['mdm_name'] else \ + node['mdm_id'] + + if node['mdm_type'] == 'TieBreaker' and \ + node['mdm_id'] is not None: + mdm_details = self. \ + is_mdm_name_id_exists(mdm_id=node['mdm_id'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + remove_tb.append(mdm_details['id']) + + elif node['mdm_type'] == 'TieBreaker' and \ + node['mdm_name'] is not None: + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=node['mdm_name'], + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + remove_tb.append(mdm_details['id']) + + log_msg = "Reduce List are: %s, %s." % (remove_secondary, remove_tb) + LOG.info(log_msg) + return remove_secondary, remove_tb + + def perform_add_standby(self, mdm_name, standby_payload): + """ Perform SDK call to add a standby MDM + + :param mdm_name: Name of new standby MDM + :param standby_payload: Parameters dict to add a standby MDM + :return: True if standby MDM added successfully + """ + try: + if not self.module.check_mode: + self.powerflex_conn.system.add_standby_mdm( + mdm_ips=standby_payload['mdm_ips'], + role=standby_payload['role'], + management_ips=standby_payload['management_ips'], + mdm_name=mdm_name, port=standby_payload['port'], + allow_multiple_ips=standby_payload['allow_multiple_ips'], + virtual_interface=standby_payload['virtual_interfaces']) + return True + except Exception as e: + err_msg = "Failed to Add a standby MDM with error {0}.".format( + str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def is_id_new_name_in_add_mdm(self): + """ Check whether mdm_id or mdm_new_name present in Add standby MDM""" + + if self.module.params['mdm_id'] or self.module.params['mdm_new_name']: + err_msg = "Parameters mdm_id/mdm_new_name are not allowed while" \ + " adding a standby MDM. Please try with valid " \ + "parameters to add a standby MDM." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def add_standby_mdm(self, mdm_name, standby_mdm, cluster_details): + """ Adding a standby MDM""" + + if self.module.params['state'] == 'present' and \ + standby_mdm is not None and \ + (self.check_mdm_exists(standby_mdm['mdm_ips'], + cluster_details)): + self.is_id_new_name_in_add_mdm() + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, + cluster_details=cluster_details) + if mdm_details: + LOG.info("Standby MDM %s exits in the system", mdm_name) + return False, cluster_details + + standby_payload = prepare_standby_payload(standby_mdm) + standby_add = self.perform_add_standby(mdm_name, standby_payload) + + if standby_add: + cluster_details = self.get_mdm_cluster_details() + msg = "Fetched the MDM cluster details {0} after adding a " \ + "standby MDM".format(str(cluster_details)) + LOG.info(msg) + return True, cluster_details + return False, cluster_details + + def remove_standby_mdm(self, mdm_name, mdm_id, cluster_details): + """ Remove the Standby MDM + :param mdm_id: ID of MDM that will become owner of MDM cluster + :param mdm_name: Name of MDM that will become owner of MDM cluster + :param cluster_details: Details of MDM cluster + :return: True if MDM removed successful + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_id is None and mdm_name is None: + err_msg = "Either mdm_name or mdm_id is required while removing" \ + " the standby MDM." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + mdm_details = self. \ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details) + if mdm_details is None: + LOG.info("MDM %s not exists in MDM cluster.", name_or_id) + return False + mdm_id = mdm_details['id'] + + try: + if not self.module.check_mode: + self.powerflex_conn.system.remove_standby_mdm(mdm_id=mdm_id) + return True + except Exception as e: + error_msg = "Failed to remove the standby MDM {0} from the MDM " \ + "cluster with error {1}".format(name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def change_ownership(self, mdm_id=None, mdm_name=None, + cluster_details=None): + """ Change the ownership of MDM cluster. + :param mdm_id: ID of MDM that will become owner of MDM cluster + :param mdm_name: Name of MDM that will become owner of MDM cluster + :param cluster_details: Details of MDM cluster + :return: True if Owner of MDM cluster change successful + """ + + name_or_id = mdm_id if mdm_id else mdm_name + if mdm_id is None and mdm_name is None: + err_msg = "Either mdm_name or mdm_id is required while changing" \ + " ownership of MDM cluster." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + mdm_details = self.\ + is_mdm_name_id_exists(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details) + if mdm_details is None: + err_msg = self.not_exist_msg.format(name_or_id) + self.module.fail_json(msg=err_msg) + + mdm_id = mdm_details['id'] + + if mdm_details['id'] == cluster_details['master']['id']: + LOG.info("MDM %s is already Owner of MDM cluster.", name_or_id) + return False + else: + try: + if not self.module.check_mode: + self.powerflex_conn.system.\ + change_mdm_ownership(mdm_id=mdm_id) + return True + except Exception as e: + error_msg = "Failed to update the Owner of MDM cluster to " \ + "MDM {0} with error {1}".format(name_or_id, + str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def find_mdm_in_secondarys(self, mdm_name=None, mdm_id=None, + cluster_details=None, name_or_id=None): + """Whether MDM exists with mdm_name or id in secondary MDMs""" + for mdm in cluster_details['slaves']: + if ('name' in mdm and mdm_name == mdm['name']) or \ + mdm_id == mdm['id']: + LOG.info("MDM %s found in Secondarys MDM.", name_or_id) + return mdm + + def find_mdm_in_tb(self, mdm_name=None, mdm_id=None, + cluster_details=None, name_or_id=None): + """Whether MDM exists with mdm_name or id in tie-breaker MDMs""" + + for mdm in cluster_details['tieBreakers']: + if ('name' in mdm and mdm_name == mdm['name']) or \ + mdm_id == mdm['id']: + LOG.info("MDM %s found in tieBreakers MDM.", name_or_id) + return mdm + + def find_mdm_in_standby(self, mdm_name=None, mdm_id=None, + cluster_details=None, name_or_id=None): + """Whether MDM exists with mdm_name or id in standby MDMs""" + + if 'standbyMDMs' in cluster_details: + for mdm in cluster_details['standbyMDMs']: + if ('name' in mdm and mdm_name == mdm['name']) or \ + mdm_id == mdm['id']: + LOG.info("MDM %s found in standby MDM.", name_or_id) + return mdm + + def is_mdm_name_id_exists(self, mdm_id=None, mdm_name=None, + cluster_details=None): + """Whether MDM exists with mdm_name or id """ + + name_or_id = mdm_id if mdm_id else mdm_name + # check in master MDM + if ('name' in cluster_details['master'] and mdm_name == cluster_details['master']['name']) \ + or mdm_id == cluster_details['master']['id']: + LOG.info("MDM %s is master MDM.", name_or_id) + return cluster_details['master'] + + # check in secondary MDMs + secondary_mdm = [] + secondary_mdm = self.\ + find_mdm_in_secondarys(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details, + name_or_id=name_or_id) + if secondary_mdm is not None: + return secondary_mdm + + # check in tie-breaker MDMs + tb_mdm = [] + tb_mdm = self.find_mdm_in_tb(mdm_name=mdm_name, mdm_id=mdm_id, + cluster_details=cluster_details, + name_or_id=name_or_id) + if tb_mdm is not None: + return tb_mdm + + # check in standby MDMs + standby_mdm = self.find_mdm_in_standby(mdm_name=mdm_name, + mdm_id=mdm_id, + cluster_details=cluster_details, + name_or_id=name_or_id) + if standby_mdm is not None: + return standby_mdm + + LOG.info("MDM %s does not exists in MDM Cluster.", name_or_id) + return None + + def get_mdm_cluster_details(self): + """Get MDM cluster details + :return: Details of MDM Cluster if existed. + """ + + try: + mdm_cluster_details = self.powerflex_conn.system.\ + get_mdm_cluster_details() + + if len(mdm_cluster_details) == 0: + msg = "MDM cluster not found" + LOG.error(msg) + self.module.fail_json(msg=msg) + + # Append Performance profile + resp = self.get_system_details() + if resp is not None: + mdm_cluster_details['perfProfile'] = resp['perfProfile'] + + return mdm_cluster_details + + except Exception as e: + error_msg = "Failed to get the MDM cluster with error {0}." + error_msg = error_msg.format(str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def check_ip_in_secondarys(self, standby_ip, cluster_details): + """whether standby IPs present in secondary MDMs""" + + for secondary_mdm in cluster_details['slaves']: + current_secondary_ips = secondary_mdm['ips'] + for ips in standby_ip: + if ips in current_secondary_ips: + LOG.info(self.exist_msg) + return False + return True + + def check_ip_in_tbs(self, standby_ip, cluster_details): + """whether standby IPs present in tie-breaker MDMs""" + + for tb_mdm in cluster_details['tieBreakers']: + current_tb_ips = tb_mdm['ips'] + for ips in standby_ip: + if ips in current_tb_ips: + LOG.info(self.exist_msg) + return False + return True + + def check_ip_in_standby(self, standby_ip, cluster_details): + """whether standby IPs present in standby MDMs""" + + if 'standbyMDMs' in cluster_details: + for stb_mdm in cluster_details['tieBreakers']: + current_stb_ips = stb_mdm['ips'] + for ips in standby_ip: + if ips in current_stb_ips: + LOG.info(self.exist_msg) + return False + return True + + def check_mdm_exists(self, standby_ip=None, cluster_details=None): + """Check whether standby MDM exists in MDM Cluster""" + + # check in master node + current_master_ips = cluster_details['master']['ips'] + for ips in standby_ip: + if ips in current_master_ips: + LOG.info(self.exist_msg) + return False + + # check in secondary nodes + in_secondary = self.check_ip_in_secondarys(standby_ip=standby_ip, + cluster_details=cluster_details) + if not in_secondary: + return False + + # check in tie-breaker nodes + in_tbs = self.check_ip_in_tbs(standby_ip=standby_ip, + cluster_details=cluster_details) + if not in_tbs: + return False + + # check in Standby nodes + in_standby = self.check_ip_in_standby(standby_ip=standby_ip, + cluster_details=cluster_details) + if not in_standby: + return False + + LOG.info("New Standby MDM does not exists in MDM cluster") + return True + + def get_system_details(self): + """Get system details + :return: Details of PowerFlex system + """ + + try: + resp = self.powerflex_conn.system.get() + if len(resp) == 0: + self.module.fail_json(msg="No system exist on the given " + "host.") + if len(resp) > 1: + self.module.fail_json(msg="Multiple systems exist on the " + "given host.") + return resp[0] + except Exception as e: + msg = "Failed to get system id with error %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_parameters(self): + """Validate the input parameters""" + + name_params = ['mdm_name', 'mdm_id', 'mdm_new_name'] + msg = "Please provide the valid {0}" + + for n_item in name_params: + if self.module.params[n_item] is not None and \ + (len(self.module.params[n_item].strip()) or + self.module.params[n_item].count(" ") > 0) == 0: + err_msg = msg.format(n_item) + self.module.fail_json(msg=err_msg) + + def perform_module_operation(self): + """ + Perform different actions on MDM cluster based on parameters passed in + the playbook + """ + mdm_name = self.module.params['mdm_name'] + mdm_id = self.module.params['mdm_id'] + mdm_new_name = self.module.params['mdm_new_name'] + standby_mdm = copy.deepcopy(self.module.params['standby_mdm']) + is_primary = self.module.params['is_primary'] + cluster_mode = self.module.params['cluster_mode'] + mdm = copy.deepcopy(self.module.params['mdm']) + mdm_state = self.module.params['mdm_state'] + virtual_ip_interfaces = self.module.params['virtual_ip_interfaces'] + clear_interfaces = self.module.params['clear_interfaces'] + performance_profile = self.module.params['performance_profile'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and MDM cluster details + changed = False + result = dict( + changed=False, + mdm_cluster_details={} + ) + self.validate_parameters() + + mdm_cluster_details = self.get_mdm_cluster_details() + msg = "Fetched the MDM cluster details {0}".\ + format(str(mdm_cluster_details)) + LOG.info(msg) + + standby_changed = False + performance_changed = False + renamed_changed = False + interface_changed = False + remove_changed = False + mode_changed = False + owner_changed = False + + # Add standby MDM + standby_changed, mdm_cluster_details = self.\ + add_standby_mdm(mdm_name, standby_mdm, mdm_cluster_details) + + # Update performance profile + performance_changed = self.\ + set_performance_profile(performance_profile, mdm_cluster_details) + + # Rename MDM + if state == 'present' and mdm_new_name: + renamed_changed = self.rename_mdm(mdm_name, mdm_id, mdm_new_name, + mdm_cluster_details) + + # Change MDM virtual IP interfaces + if state == 'present' and (virtual_ip_interfaces or clear_interfaces): + interface_changed = self.\ + set_mdm_virtual_interface(mdm_id, mdm_name, + virtual_ip_interfaces, + clear_interfaces, + mdm_cluster_details) + # change cluster mode + if state == 'present' and cluster_mode and mdm and mdm_state: + mode_changed = self.change_cluster_mode(cluster_mode, mdm, + mdm_cluster_details) + + # Remove standby MDM + if state == 'absent': + remove_changed = self.remove_standby_mdm(mdm_name, mdm_id, + mdm_cluster_details) + + # change ownership of MDM cluster + if state == 'present' and is_primary: + owner_changed = self.change_ownership(mdm_id, mdm_name, + mdm_cluster_details) + + # Setting Changed Flag + changed = update_change_flag(standby_changed, performance_changed, + renamed_changed, interface_changed, + mode_changed, remove_changed, + owner_changed) + + # Returning the updated MDM cluster details + # Checking whether owner of MDM cluster has changed + if owner_changed: + mdm_cluster_details = {} + else: + mdm_cluster_details = self.get_mdm_cluster_details() + + result['mdm_cluster_details'] = mdm_cluster_details + result['changed'] = changed + self.module.exit_json(**result) + + +def update_change_flag(standby_changed, performance_changed, renamed_changed, + interface_changed, mode_changed, remove_changed, + owner_changed): + """ Update the changed flag based on the operation performed in the task""" + + if standby_changed or performance_changed or renamed_changed or \ + interface_changed or mode_changed or remove_changed or \ + owner_changed: + return True + return False + + +def prepare_standby_payload(standby_mdm): + """prepare the payload for add standby MDM""" + payload_dict = {} + for mdm_keys in standby_mdm: + if standby_mdm[mdm_keys]: + payload_dict[mdm_keys] = standby_mdm[mdm_keys] + else: + payload_dict[mdm_keys] = None + return payload_dict + + +def is_modify_mdm_virtual_interface(virtual_ip_interfaces, clear_interfaces, + mdm_details): + """Check if modification in MDM virtual IP interface required.""" + + modify_list = [] + clear = False + existing_interfaces = mdm_details['virtualInterfaces'] + + # Idempotency check for virtual IP interface + if clear_interfaces is None and \ + len(existing_interfaces) == len(virtual_ip_interfaces) and \ + set(existing_interfaces) == set(virtual_ip_interfaces): + LOG.info("No changes required for virtual IP interface.") + return None, False + + # Idempotency check for clear_interfaces + if clear_interfaces and len(mdm_details['virtualInterfaces']) == 0: + LOG.info("No change required for clear interface.") + return None, False + + # clearing all virtual IP interfaces of MDM + elif clear_interfaces and len(mdm_details['virtualInterfaces']) != 0 and \ + virtual_ip_interfaces is None: + LOG.info("Clear all interfaces of the MDM.") + clear = True + return None, clear + + if virtual_ip_interfaces and clear_interfaces is None: + for interface in virtual_ip_interfaces: + modify_list.append(interface) + return modify_list, clear + + +def get_powerflex_mdm_cluster_parameters(): + """This method provide parameter required for the MDM cluster + module on PowerFlex""" + return dict( + mdm_name=dict(), mdm_id=dict(), mdm_new_name=dict(), + virtual_ip_interfaces=dict(type='list', elements='str'), + clear_interfaces=dict(type='bool'), is_primary=dict(type='bool'), + standby_mdm=dict(type='dict', options=dict( + mdm_ips=dict(type='list', elements='str', required=True), + role=dict(required=True, choices=['Manager', 'TieBreaker']), + management_ips=dict(type='list', elements='str'), + port=dict(type='int'), allow_multiple_ips=dict(type='bool'), + virtual_interfaces=dict(type='list', elements='str'))), + cluster_mode=dict(choices=['OneNode', 'ThreeNodes', 'FiveNodes']), + mdm=dict(type='list', elements='dict', + options=dict(mdm_id=dict(), mdm_name=dict(), + mdm_type=dict(required=True, + choices=['Secondary', 'TieBreaker']))), + mdm_state=dict(choices=['present-in-cluster', 'absent-in-cluster']), + performance_profile=dict(choices=['Compact', 'HighPerformance']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Perform actions on MDM cluster based on user input from playbook""" + obj = PowerFlexMdmCluster() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py new file mode 100644 index 00000000..5ffdc6b6 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py @@ -0,0 +1,1122 @@ +#!/usr/bin/python + +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing Protection Domain on Dell Technologies (Dell) PowerFlex""" +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: protection_domain +version_added: '1.2.0' +short_description: Manage Protection Domain on Dell PowerFlex +description: +- Managing Protection Domain on PowerFlex storage system includes creating, + modifying attributes, deleting and getting details of Protection Domain. +author: +- Bhavneet Sharma (@sharmb5) +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + protection_domain_name: + description: + - The name of the protection domain. + - Mandatory for create operation. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The ID of the protection domain. + - Except for create operation, all other operations can be performed + using protection_domain_id. + - Mutually exclusive with I(protection_domain_name). + type: str + protection_domain_new_name: + description: + - Used to rename the protection domain. + type: str + is_active: + description: + - Used to activate or deactivate the protection domain. + type: bool + network_limits: + description: + - Network bandwidth limit used by all SDS in protection domain. + type: dict + suboptions: + rebuild_limit: + description: + - Limit the network bandwidth for rebuild. + type: int + rebalance_limit: + description: + - Limit the network bandwidth for rebalance. + type: int + vtree_migration_limit: + description: + - Limit the network bandwidth for vtree migration. + type: int + overall_limit: + description: + - Limit the overall network bandwidth. + type: int + bandwidth_unit: + description: + - Unit for network bandwidth limits. + type: str + choices: ['KBps', 'MBps', 'GBps'] + default: 'KBps' + rf_cache_limits: + description: + - Used to set the RFcache parameters of the protection domain. + type: dict + suboptions: + is_enabled: + description: + - Used to enable or disable RFcache in the protection domain. + type: bool + page_size: + description: + - Used to set the cache page size in KB. + type: int + max_io_limit: + description: + - Used to set cache maximum I/O limit in KB. + type: int + pass_through_mode: + description: + - Used to set the cache mode. + choices: ['None', 'Read', 'Write', 'ReadAndWrite', 'WriteMiss'] + type: str + state: + description: + - State of the protection domain. + required: true + type: str + choices: ['present', 'absent'] +notes: + - The protection domain can only be deleted if all its related objects have + been dissociated from the protection domain. + - If the protection domain set to inactive, then no operation can be + performed on protection domain. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create protection domain + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Create protection domain with all parameters + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + is_active: true + network_limits: + rebuild_limit: 10 + rebalance_limit: 17 + vtree_migration_limit: 14 + overall_limit: 20 + bandwidth_unit: "MBps" + rf_cache_limits: + is_enabled: true + page_size: 16 + max_io_limit: 128 + pass_through_mode: "Read" + state: "present" + +- name: Get protection domain details using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + state: "present" + +- name: Get protection domain details using ID + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_id: "5718253c00000004" + state: "present" + +- name: Modify protection domain attributes + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + protection_domain_new_name: "domain1_new" + network_limits: + rebuild_limit: 14 + rebalance_limit: 20 + overall_limit: 25 + bandwidth_unit: "MBps" + rf_cache_limits: + page_size: 64 + pass_through_mode: "WriteMiss" + state: "present" + +- name: Delete protection domain using name + dellemc.powerflex.protection_domain: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1_new" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +protection_domain_details: + description: Details of the protection domain. + returned: When protection domain exists + type: dict + contains: + fglDefaultMetadataCacheSize: + description: FGL metadata cache size. + type: int + fglDefaultNumConcurrentWrites: + description: FGL concurrent writes. + type: str + fglMetadataCacheEnabled: + description: Whether FGL cache enabled. + type: bool + id: + description: Protection domain ID. + type: str + links: + description: Protection domain links. + type: list + contains: + href: + description: Protection domain instance URL. + type: str + rel: + description: Protection domain's relationship with + different entities. + type: str + mdmSdsNetworkDisconnectionsCounterParameters: + description: MDM's SDS counter parameter. + type: dict + contains: + longWindow: + description: Long window for Counter Parameters. + type: int + mediumWindow: + description: Medium window for Counter Parameters. + type: int + shortWindow: + description: Short window for Counter Parameters. + type: int + name: + description: Name of the protection domain. + type: str + overallIoNetworkThrottlingEnabled: + description: Whether overall network throttling enabled. + type: bool + overallIoNetworkThrottlingInKbps: + description: Overall network throttling in KBps. + type: int + protectedMaintenanceModeNetworkThrottlingEnabled: + description: Whether protected maintenance mode network throttling + enabled. + type: bool + protectedMaintenanceModeNetworkThrottlingInKbps: + description: Protected maintenance mode network throttling in + KBps. + type: int + protectionDomainState: + description: State of protection domain. + type: int + rebalanceNetworkThrottlingEnabled: + description: Whether rebalance network throttling enabled. + type: int + rebalanceNetworkThrottlingInKbps: + description: Rebalance network throttling in KBps. + type: int + rebuildNetworkThrottlingEnabled: + description: Whether rebuild network throttling enabled. + type: int + rebuildNetworkThrottlingInKbps: + description: Rebuild network throttling in KBps. + type: int + rfcacheAccpId: + description: Id of RF cache acceleration pool. + type: str + rfcacheEnabled: + description: Whether RF cache is enabled or not. + type: bool + rfcacheMaxIoSizeKb: + description: RF cache maximum I/O size in KB. + type: int + rfcacheOpertionalMode: + description: RF cache operational mode. + type: str + rfcachePageSizeKb: + description: RF cache page size in KB. + type: bool + sdrSdsConnectivityInfo: + description: Connectivity info of SDR and SDS. + type: dict + contains: + clientServerConnStatus: + description: Connectivity status of client and server. + type: str + disconnectedClientId: + description: Disconnected client ID. + type: str + disconnectedClientName: + description: Disconnected client name. + type: str + disconnectedServerId: + description: Disconnected server ID. + type: str + disconnectedServerIp: + description: Disconnected server IP. + type: str + disconnectedServerName: + description: Disconnected server name. + type: str + sdsSdsNetworkDisconnectionsCounterParameters: + description: Counter parameter for SDS-SDS network. + type: dict + contains: + longWindow: + description: Long window for Counter Parameters. + type: int + mediumWindow: + description: Medium window for Counter Parameters. + type: int + shortWindow: + description: Short window for Counter Parameters. + type: int + storagePool: + description: List of storage pools. + type: list + systemId: + description: ID of system. + type: str + vtreeMigrationNetworkThrottlingEnabled: + description: Whether V-Tree migration network throttling enabled. + type: bool + vtreeMigrationNetworkThrottlingInKbps: + description: V-Tree migration network throttling in KBps. + type: int + sample: { + "fglDefaultMetadataCacheSize": 0, + "fglDefaultNumConcurrentWrites": 1000, + "fglMetadataCacheEnabled": false, + "id": "7bd6457000000000", + "links": [ + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000", + "rel": "self" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/Statistics", + "rel": "/api/ProtectionDomain/relationship/Statistics" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/Sdr", + "rel": "/api/ProtectionDomain/relationship/Sdr" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/AccelerationPool", + "rel": "/api/ProtectionDomain/relationship/AccelerationPool" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/StoragePool", + "rel": "/api/ProtectionDomain/relationship/StoragePool" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/Sds", + "rel": "/api/ProtectionDomain/relationship/Sds" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/ReplicationConsistencyGroup", + "rel": "/api/ProtectionDomain/relationship/ + ReplicationConsistencyGroup" + }, + { + "href": "/api/instances/ProtectionDomain::7bd6457000000000/ + relationships/FaultSet", + "rel": "/api/ProtectionDomain/relationship/FaultSet" + }, + { + "href": "/api/instances/System::0989ce79058f150f", + "rel": "/api/parent/relationship/systemId" + } + ], + "mdmSdsNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "name": "domain1", + "overallIoNetworkThrottlingEnabled": false, + "overallIoNetworkThrottlingInKbps": null, + "protectedMaintenanceModeNetworkThrottlingEnabled": false, + "protectedMaintenanceModeNetworkThrottlingInKbps": null, + "protectionDomainState": "Active", + "rebalanceNetworkThrottlingEnabled": false, + "rebalanceNetworkThrottlingInKbps": null, + "rebuildNetworkThrottlingEnabled": false, + "rebuildNetworkThrottlingInKbps": null, + "rfcacheAccpId": null, + "rfcacheEnabled": true, + "rfcacheMaxIoSizeKb": 128, + "rfcacheOpertionalMode": "WriteMiss", + "rfcachePageSizeKb": 64, + "sdrSdsConnectivityInfo": { + "clientServerConnStatus": "CLIENT_SERVER_CONN_STATUS_ALL + _CONNECTED", + "disconnectedClientId": null, + "disconnectedClientName": null, + "disconnectedServerId": null, + "disconnectedServerIp": null, + "disconnectedServerName": null + }, + "sdsConfigurationFailureCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdsDecoupledCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdsReceiveBufferAllocationFailuresCounterParameters": { + "longWindow": { + "threshold": 2000000, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 200000, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 20000, + "windowSizeInSec": 60 + } + }, + "sdsSdsNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "storagePool": [ + { + "id": "8d1cba1700000000", + "name": "pool1" + } + ], + "systemId": "0989ce79058f150f", + "vtreeMigrationNetworkThrottlingEnabled": false, + "vtreeMigrationNetworkThrottlingInKbps": null + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('protection_domain') + + +class PowerFlexProtectionDomain(object): + """Class with protection domain operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_protection_domain_parameters()) + + mut_ex_args = [['protection_domain_name', 'protection_domain_id']] + + required_one_of_args = [['protection_domain_name', + 'protection_domain_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def validate_input_params(self): + """Validate the input parameters""" + + name_params = ['protection_domain_name', 'protection_domain_new_name', + 'protection_domain_id'] + msg = "Please provide the valid {0}" + + for n_item in name_params: + if self.module.params[n_item] is not None and (len( + self.module.params[n_item].strip()) or self. + module.params[n_item].count(" ") > 0) == 0: + err_msg = msg.format(n_item) + self.module.fail_json(msg=err_msg) + + def is_id_or_new_name_in_create(self): + """Checking if protection domain id or new names present in create """ + + if self.module.params['protection_domain_new_name'] or \ + self.module.params['protection_domain_id']: + error_msg = "protection_domain_new_name/protection_domain_id " \ + "are not supported during creation of protection " \ + "domain. Please try with protection_domain_name." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + def get_storage_pool(self, protection_domain_id): + """ + Get Storage pools details + :param protection_domain_id: Name of the protection domain + :type protection_domain_id: str + :return: list containing storage pools which are present in + protection domain + """ + + try: + sps_list = [] + resp = self.powerflex_conn.protection_domain. \ + get_storage_pools(protection_domain_id=protection_domain_id) + for items in resp: + sp_name_id = dict() + sp_name_id['id'] = items['id'] + sp_name_id['name'] = items['name'] + sps_list.append(sp_name_id) + return sps_list + + except Exception as e: + errmsg = "Failed to get the storage pools present in protection" \ + " domain %s with error %s" % (protection_domain_id, str(e)) + LOG.error(errmsg) + self.module.fail_json(msg=errmsg) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """ + Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details if exists + :rtype: dict + """ + + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + + try: + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + else: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if len(pd_details) == 0: + error_msg = "Unable to find the protection domain with " \ + "'%s'." % name_or_id + LOG.info(error_msg) + return None + + # Append storage pool list present in protection domain + pd_details[0]['storagePool'] = self.get_storage_pool(pd_details + [0]['id']) + return pd_details[0] + + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def create_protection_domain(self, protection_domain_name): + """ + Create Protection Domain + :param protection_domain_name: Name of the protection domain + :type protection_domain_name: str + :return: Boolean indicating if create operation is successful + """ + # Creation of Protection domain + try: + LOG.info("Creating protection domain with name: %s ", + protection_domain_name) + self.powerflex_conn.protection_domain.\ + create(name=protection_domain_name) + return True + + except Exception as e: + error_msg = "Create protection domain '%s' operation failed" \ + " with error '%s'" % (protection_domain_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_create_operation(self, state, pd_details, + protection_domain_name): + """performing creation of protection domain details""" + + if state == 'present' and not pd_details: + self.is_id_or_new_name_in_create() + create_change = self.\ + create_protection_domain(protection_domain_name) + if create_change: + pd_details = self. \ + get_protection_domain(protection_domain_name) + msg = "Protection domain created successfully, fetched" \ + " protection domain details {0}". \ + format(str(pd_details)) + LOG.info(msg) + return create_change, pd_details + + return False, pd_details + + def is_modify_required(self, pd_details, network_limits, rf_cache_limits, + protection_domain_new_name, is_active): + """Check if modification required""" + + if (self.module.params['state'] == 'present') and pd_details and \ + (network_limits is not None or rf_cache_limits is not None + or protection_domain_new_name is not None or is_active is + not None): + return True + + def modify_nw_limits(self, protection_domain_id, nw_modify_dict, + create_flag=False): + """ + Modify Protection domain attributes + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :param nw_modify_dict: Dictionary containing the attributes of + protection domain which are to be updated + :type nw_modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dict containing network modify params {0}".\ + format(str(nw_modify_dict)) + LOG.info(msg) + if 'rebuild_limit' in nw_modify_dict or 'rebalance_limit' in \ + nw_modify_dict or 'vtree_migration_limit' in \ + nw_modify_dict or 'overall_limit' in nw_modify_dict: + self.powerflex_conn.protection_domain.network_limits( + protection_domain_id=protection_domain_id, + rebuild_limit=nw_modify_dict['rebuild_limit'], + rebalance_limit=nw_modify_dict['rebalance_limit'], + vtree_migration_limit=nw_modify_dict['vtree_migration_limit'], + overall_limit=nw_modify_dict['overall_limit']) + msg = "The Network limits are updated to {0}, {1}, {2}, " \ + "{3} successfully.". \ + format(nw_modify_dict['rebuild_limit'], + nw_modify_dict['rebalance_limit'], + nw_modify_dict['vtree_migration_limit'], + nw_modify_dict['overall_limit']) + LOG.info(msg) + return True + + except Exception as e: + if create_flag: + err_msg = "Create protection domain is successful," \ + " but failed to update the network limits" \ + " {0} with error {1}".format(protection_domain_id, + str(e)) + else: + err_msg = "Failed to update the network limits of " \ + "protection domain {0} with error {1}".\ + format(protection_domain_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def modify_rf_limits(self, protection_domain_id, rf_modify_dict, + create_flag): + """ + Modify Protection domain attributes + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :param rf_modify_dict: Dict containing the attributes of rf cache + which are to be updated + :type rf_modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dict containing network modify params {0}". \ + format(str(rf_modify_dict)) + LOG.info(msg) + + if 'is_enabled' in rf_modify_dict and \ + rf_modify_dict['is_enabled'] is not None: + self.powerflex_conn.protection_domain.set_rfcache_enabled( + protection_domain_id, rf_modify_dict['is_enabled']) + msg = "The RFcache is enabled to '%s' successfully." \ + % rf_modify_dict['is_enabled'] + LOG.info(msg) + + if 'page_size' in rf_modify_dict or 'max_io_limit' in \ + rf_modify_dict or 'pass_through_mode' in rf_modify_dict: + self.powerflex_conn.protection_domain.rfcache_parameters( + protection_domain_id=protection_domain_id, + page_size=rf_modify_dict['page_size'], + max_io_limit=rf_modify_dict['max_io_limit'], + pass_through_mode=rf_modify_dict['pass_through_mode']) + msg = "The RFcache parameters are updated to {0}, {1},{2}.'" \ + .format(rf_modify_dict['page_size'], + rf_modify_dict['max_io_limit'], + rf_modify_dict['pass_through_mode']) + LOG.info(msg) + return True + + except Exception as e: + if create_flag: + err_msg = "Create protection domain is successful," \ + " but failed to update the rf cache limits" \ + " {0} with error {1}".format(protection_domain_id, + str(e)) + else: + err_msg = "Failed to update the rf cache limits of " \ + "protection domain {0} with error {1}". \ + format(protection_domain_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def modify_pd_attributes(self, protection_domain_id, modify_dict, + create_flag=False): + """ + Modify Protection domain attributes + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :param modify_dict: Dictionary containing the attributes of + protection domain which are to be updated + :type modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dictionary containing attributes which need to be" \ + " updated are '%s'." % (str(modify_dict)) + LOG.info(msg) + + if 'protection_domain_new_name' in modify_dict: + self.powerflex_conn.protection_domain. \ + rename(protection_domain_id, + modify_dict['protection_domain_new_name']) + msg = "The name of the protection domain is updated to " \ + "'%s' successfully." % \ + modify_dict['protection_domain_new_name'] + LOG.info(msg) + + if 'is_active' in modify_dict and modify_dict['is_active']: + self.powerflex_conn.protection_domain. \ + activate(protection_domain_id, modify_dict['is_active']) + msg = "The protection domain is activated successfully, by " \ + "setting as is_active: '%s' " % \ + modify_dict['is_active'] + LOG.info(msg) + + if 'is_active' in modify_dict and not modify_dict['is_active']: + self.powerflex_conn.protection_domain. \ + inactivate(protection_domain_id, modify_dict['is_active']) + msg = "The protection domain is inactivated successfully, " \ + "by setting as is_active: '%s' " % \ + modify_dict['is_active'] + LOG.info(msg) + return True + + except Exception as e: + if create_flag: + err_msg = "Create protection domain is successful," \ + " but failed to update the protection domain" \ + " {0} with error {1}".format(protection_domain_id, + str(e)) + else: + err_msg = "Failed to update the protection domain {0}" \ + " with error {1}".format(protection_domain_id, + str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def delete_protection_domain(self, protection_domain_id): + """ + Delete Protection Domain + :param protection_domain_id: ID of the protection domain + :type protection_domain_id: str + :return: Boolean indicating if delete operation is successful + """ + try: + self.powerflex_conn.protection_domain.delete(protection_domain_id) + LOG.info("Protection domain deleted successfully.") + return True + except Exception as e: + error_msg = "Delete protection domain '%s' operation failed" \ + " with error '%s'" % (protection_domain_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on protection domain based on parameters + passed in the playbook + """ + protection_domain_name = self.module.params['protection_domain_name'] + protection_domain_id = self.module.params['protection_domain_id'] + protection_domain_new_name = self.module.params[ + 'protection_domain_new_name'] + is_active = self.module.params['is_active'] + network_limits = self.convert_limits_in_kbps( + self.module.params['network_limits']) + rf_cache_limits = self.module.params['rf_cache_limits'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and protection domain + # details + changed = False + result = dict( + changed=False, + protection_domain_details=None + ) + + # Checking invalid value for id, name and rename + self.validate_input_params() + + # get Protection Domain details + pd_details = self.get_protection_domain(protection_domain_name, + protection_domain_id) + + if pd_details: + protection_domain_id = pd_details['id'] + msg = "Fetched the protection domain details with id '%s', name" \ + " '%s'" % (protection_domain_id, protection_domain_name) + LOG.info(msg) + + # create operation + create_changed = False + create_changed, pd_details = self.\ + perform_create_operation(state, pd_details, + protection_domain_name) + + # checking if basic protection domain parameters are modified or not + modify_dict = {} + nw_modify_dict = {} + rf_modify_dict = {} + + if self.is_modify_required(pd_details, network_limits, + rf_cache_limits, + protection_domain_new_name, is_active): + modify_dict = to_modify(pd_details, protection_domain_new_name, + is_active) + nw_modify_dict = to_nw_limit_modify(pd_details, network_limits) + rf_modify_dict = to_rf_limit_modify(pd_details, rf_cache_limits) + msg = "Parameters to be modified are as follows: %s %s, %s" \ + % (str(modify_dict), str(nw_modify_dict), + str(rf_modify_dict)) + LOG.info(msg) + + # modify operation + modify_changed = False + is_nw_limit = all(value is None for value in nw_modify_dict.values()) + is_rf_limit = all(value is None for value in rf_modify_dict.values()) + + if not is_nw_limit and state == 'present': + modify_changed = self.modify_nw_limits(pd_details['id'], + nw_modify_dict, + create_changed) + if not is_rf_limit and state == 'present': + modify_changed = self.modify_rf_limits(pd_details['id'], + rf_modify_dict, + create_changed) + if modify_dict and state == 'present': + modify_changed = self. \ + modify_pd_attributes(pd_details['id'], modify_dict, + create_changed) + if modify_changed: + pd_details = self.get_protection_domain( + protection_domain_id=pd_details['id']) + msg = "Protection domain details after modification:" \ + " '%s'" % str(pd_details) + LOG.info(msg) + + # delete operation + delete_changed = False + if state == 'absent' and pd_details: + delete_changed = self.\ + delete_protection_domain(pd_details['id']) + + if create_changed or modify_changed or delete_changed: + changed = True + + # Returning the updated Protection domain details + if state == 'present': + pd_details = self.get_protection_domain( + protection_domain_id=pd_details['id']) + result['protection_domain_details'] = pd_details + result['changed'] = changed + self.module.exit_json(**result) + + def convert_limits_in_kbps(self, network_limits): + """ + Convert the limits into KBps + + :param network_limits: dict containing all Network bandwidth limits + :rtype: converted network limits + """ + limit_params = ['rebuild_limit', 'rebalance_limit', + 'vtree_migration_limit', 'overall_limit'] + modified_limits = dict() + modified_limits['rebuild_limit'] = None + modified_limits['rebalance_limit'] = None + modified_limits['vtree_migration_limit'] = None + modified_limits['overall_limit'] = None + if network_limits is None: + return None + for limits in network_limits: + if network_limits[limits] is not None and limits in limit_params: + if network_limits['bandwidth_unit'] == "GBps": + modified_limits[limits] = \ + network_limits[limits] * 1024 * 1024 + elif network_limits['bandwidth_unit'] == "MBps": + modified_limits[limits] = network_limits[limits] * 1024 + else: + modified_limits[limits] = network_limits[limits] + + return modified_limits + + +def to_modify(pd_details, protection_domain_new_name, is_active): + """ + Check if modification required for rename and is_active for protection + domain + :param pd_details: Details of the protection domain + :type pd_details: dict + :param protection_domain_new_name: To rename protection domain + :type protection_domain_new_name: str + :param is_active: Whether to activate protection domain + :type is_active: bool + :return: Dictionary containing the attributes of protection domain + which are to be updated + :rtype: dict + """ + + modify_dict = dict() + if protection_domain_new_name is not None and \ + protection_domain_new_name != pd_details['name']: + modify_dict['protection_domain_new_name'] = \ + protection_domain_new_name + + if is_active is not None and \ + ((pd_details['protectionDomainState'] == 'Active' and + not is_active) or + (pd_details['protectionDomainState'] == 'Inactive' and + is_active)): + modify_dict['is_active'] = is_active + + return modify_dict + + +def to_nw_limit_modify(pd_details, network_limits): + """ + Check if modification required network bandwidth limit for protection + domain + :param pd_details: Details of the protection domain + :type pd_details: dict + :param network_limits: dict of Network bandwidth limit + :type network_limits: dict + :return: Dictionary containing the attributes of protection domain + which are to be updated + :rtype: dict + """ + + modify_dict = {} + if network_limits is not None: + modify_dict['rebuild_limit'] = None + modify_dict['rebalance_limit'] = None + modify_dict['vtree_migration_limit'] = None + modify_dict['overall_limit'] = None + + if network_limits['rebuild_limit'] is not None and \ + pd_details['rebuildNetworkThrottlingInKbps'] != network_limits['rebuild_limit']: + modify_dict['rebuild_limit'] = network_limits['rebuild_limit'] + + if network_limits['rebalance_limit'] is not None and \ + pd_details['rebalanceNetworkThrottlingInKbps'] \ + != network_limits['rebalance_limit']: + modify_dict['rebalance_limit'] = network_limits['rebalance_limit'] + + if network_limits['vtree_migration_limit'] is not None and \ + pd_details['vtreeMigrationNetworkThrottlingInKbps'] != \ + network_limits['vtree_migration_limit']: + modify_dict['vtree_migration_limit'] = network_limits['vtree_migration_limit'] + + if network_limits['overall_limit'] is not None and \ + pd_details['overallIoNetworkThrottlingInKbps'] != \ + network_limits['overall_limit']: + modify_dict['overall_limit'] = network_limits['overall_limit'] + + return modify_dict + + +def to_rf_limit_modify(pd_details, rf_cache_limits): + """ + Check if modification required for RF cache for protection domain + :param pd_details: Details of the protection domain + :type pd_details: dict + :param rf_cache_limits: dict for RF cache + :type rf_cache_limits: dict + :return: Dictionary containing the attributes of protection domain + which are to be updated + :rtype: dict + """ + modify_dict = {} + if rf_cache_limits is not None: + modify_dict['is_enabled'] = None + modify_dict['page_size'] = None + modify_dict['max_io_limit'] = None + modify_dict['pass_through_mode'] = None + + if rf_cache_limits['is_enabled'] is not None and pd_details['rfcacheEnabled'] != \ + rf_cache_limits['is_enabled']: + modify_dict['is_enabled'] = rf_cache_limits['is_enabled'] + + if rf_cache_limits['page_size'] is not None and pd_details['rfcachePageSizeKb'] != \ + rf_cache_limits['page_size']: + modify_dict['page_size'] = rf_cache_limits['page_size'] + + if rf_cache_limits['max_io_limit'] is not None and pd_details['rfcacheMaxIoSizeKb'] != \ + rf_cache_limits['max_io_limit']: + modify_dict['max_io_limit'] = rf_cache_limits['max_io_limit'] + + if rf_cache_limits['pass_through_mode'] is not None and \ + pd_details['rfcacheOpertionalMode'] != rf_cache_limits['pass_through_mode']: + modify_dict['pass_through_mode'] = rf_cache_limits['pass_through_mode'] + + return modify_dict + + +def get_powerflex_protection_domain_parameters(): + """This method provides parameters required for the protection domain + module on PowerFlex""" + return dict( + protection_domain_name=dict(), + protection_domain_new_name=dict(), + protection_domain_id=dict(), + is_active=dict(type='bool'), + network_limits=dict( + type='dict', options=dict( + rebuild_limit=dict(type='int'), + rebalance_limit=dict(type='int'), + vtree_migration_limit=dict(type='int'), + overall_limit=dict(type='int'), + bandwidth_unit=dict(choices=['KBps', 'MBps', 'GBps'], + default='KBps') + ) + ), + rf_cache_limits=dict( + type='dict', options=dict( + is_enabled=dict(type='bool'), + page_size=dict(type='int'), + max_io_limit=dict(type='int'), + pass_through_mode=dict(choices=['None', 'Read', 'Write', + 'ReadAndWrite', 'WriteMiss']) + ) + ), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex protection domain object and perform actions on it + based on user input from playbook""" + obj = PowerFlexProtectionDomain() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py new file mode 100644 index 00000000..2520c143 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py @@ -0,0 +1,907 @@ +#!/usr/bin/python + +# Copyright: (c) 2022, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing replication consistency groups on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: replication_consistency_group +version_added: '1.5.0' +short_description: Manage replication consistency groups on Dell PowerFlex +description: +- Managing replication consistency groups on PowerFlex storage system includes + getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze, + activate, inactivate and deleting a replication consistency group. +author: +- Trisha Datta (@Trisha-Datta) +- Jennifer John (@Jennifer-John) +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + rcg_name: + description: + - The name of the replication consistency group. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(rcg_id). + type: str + rcg_id: + description: + - The ID of the replication consistency group. + - Mutually exclusive with I(rcg_name). + type: str + create_snapshot: + description: + - Whether to create the snapshot of the replication consistency group. + type: bool + rpo: + description: + - Desired RPO in seconds. + type: int + protection_domain_id: + description: + - Protection domain id. + - Mutually exclusive with I(protection_domain_name). + type: str + protection_domain_name: + description: + - Protection domain name. + - Mutually exclusive with I(protection_domain_id). + type: str + activity_mode: + description: + - Activity mode of RCG. + - This parameter is supported for version 3.6 and above. + choices: ['Active', 'Inactive'] + type: str + pause: + description: + - Pause or resume the RCG. + type: bool + freeze: + description: + - Freeze or unfreeze the RCG. + type: bool + pause_mode: + description: + - Pause mode. + - It is required if pause is set as True. + choices: ['StopDataTransfer', 'OnlyTrackChanges'] + type: str + target_volume_access_mode: + description: + - Target volume access mode. + choices: ['ReadOnly', 'NoAccess'] + type: str + is_consistent: + description: + - Consistency of RCG. + type: bool + new_rcg_name: + description: + - Name of RCG to rename to. + type: str + remote_peer: + description: + - Remote peer system. + type: dict + suboptions: + hostname: + required: true + description: + - IP or FQDN of the remote peer gateway host. + type: str + aliases: + - gateway_host + username: + type: str + required: true + description: + - The username of the remote peer gateway host. + password: + type: str + required: true + description: + - The password of the remote peer gateway host. + validate_certs: + type: bool + default: true + aliases: + - verifycert + description: + - Boolean variable to specify whether or not to validate SSL + certificate. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be verified. + port: + description: + - Port number through which communication happens with remote peer + gateway host. + type: int + default: 443 + timeout: + description: + - Time after which connection will get terminated. + - It is to be mentioned in seconds. + type: int + default: 120 + protection_domain_id: + description: + - Remote protection domain id. + - Mutually exclusive with I(protection_domain_name). + type: str + protection_domain_name: + description: + - Remote protection domain name. + - Mutually exclusive with I(protection_domain_id). + type: str + state: + description: + - State of the replication consistency group. + choices: ['present', 'absent'] + default: present + type: str +notes: +- The I(check_mode) is supported. +- Idempotency is not supported for create snapshot operation. +- There is a delay in reflection of final state of RCG after few update operations on RCG. +- In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode + if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as True. +''' + +EXAMPLES = r''' + +- name: Get RCG details + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "{{rcg_name}}" + +- name: Create a snapshot of the RCG + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_id: "{{rcg_id}}" + create_snapshot: True + state: "present" + +- name: Create a replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + protection_domain_name: "domain1" + activity_mode: "active" + remote_peer: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + protection_domain_name: "domain1" + +- name: Modify replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + rpo: 60 + target_volume_access_mode: "ReadOnly" + activity_mode: "Inactive" + is_consistent: True + +- name: Rename replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + new_rcg_name: "rcg_test_rename" + +- name: Pause replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "pause" + pause_mode: "StopDataTransfer" + +- name: Resume replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "resume" + +- name: Freeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "freeze" + +- name: UnFreeze replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + action: "unfreeze" + +- name: Delete replication consistency group + dellemc.powerflex.replication_consistency_group: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + rcg_name: "rcg_test" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +replication_consistency_group_details: + description: Details of the replication consistency group. + returned: When replication consistency group exists + type: dict + contains: + id: + description: The ID of the replication consistency group. + type: str + name: + description: The name of the replication consistency group. + type: str + protectionDomainId: + description: The Protection Domain ID of the replication consistency group. + type: str + peerMdmId: + description: The ID of the peer MDM of the replication consistency group. + type: str + remoteId: + description: The ID of the remote replication consistency group. + type: str + remoteMdmId: + description: The ID of the remote MDM of the replication consistency group. + type: str + currConsistMode: + description: The current consistency mode of the replication consistency group. + type: str + freezeState: + description: The freeze state of the replication consistency group. + type: str + lifetimeState: + description: The Lifetime state of the replication consistency group. + type: str + pauseMode: + description: The Lifetime state of the replication consistency group. + type: str + snapCreationInProgress: + description: Whether the process of snapshot creation of the replication consistency group is in progress or not. + type: bool + lastSnapGroupId: + description: ID of the last snapshot of the replication consistency group. + type: str + lastSnapCreationRc: + description: The return code of the last snapshot of the replication consistency group. + type: int + targetVolumeAccessMode: + description: The access mode of the target volume of the replication consistency group. + type: str + remoteProtectionDomainId: + description: The ID of the remote Protection Domain. + type: str + remoteProtectionDomainName: + description: The Name of the remote Protection Domain. + type: str + failoverType: + description: The type of failover of the replication consistency group. + type: str + failoverState: + description: The state of failover of the replication consistency group. + type: str + activeLocal: + description: Whether the local replication consistency group is active. + type: bool + activeRemote: + description: Whether the remote replication consistency group is active + type: bool + abstractState: + description: The abstract state of the replication consistency group. + type: str + localActivityState: + description: The state of activity of the local replication consistency group. + type: str + remoteActivityState: + description: The state of activity of the remote replication consistency group.. + type: str + inactiveReason: + description: The reason for the inactivity of the replication consistency group. + type: int + rpoInSeconds: + description: The RPO value of the replication consistency group in seconds. + type: int + replicationDirection: + description: The direction of the replication of the replication consistency group. + type: str + disasterRecoveryState: + description: The state of disaster recovery of the local replication consistency group. + type: str + remoteDisasterRecoveryState: + description: The state of disaster recovery of the remote replication consistency group. + type: str + error: + description: The error code of the replication consistency group. + type: int + type: + description: The type of the replication consistency group. + type: str + sample: { + "protectionDomainId": "b969400500000000", + "peerMdmId": "6c3d94f600000000", + "remoteId": "2130961a00000000", + "remoteMdmId": "0e7a082862fedf0f", + "currConsistMode": "Consistent", + "freezeState": "Unfrozen", + "lifetimeState": "Normal", + "pauseMode": "None", + "snapCreationInProgress": false, + "lastSnapGroupId": "e58280b300000001", + "lastSnapCreationRc": "SUCCESS", + "targetVolumeAccessMode": "NoAccess", + "remoteProtectionDomainId": "4eeb304600000000", + "remoteProtectionDomainName": "domain1", + "failoverType": "None", + "failoverState": "None", + "activeLocal": true, + "activeRemote": true, + "abstractState": "Ok", + "localActivityState": "Active", + "remoteActivityState": "Active", + "inactiveReason": 11, + "rpoInSeconds": 30, + "replicationDirection": "LocalToRemote", + "disasterRecoveryState": "None", + "remoteDisasterRecoveryState": "None", + "error": 65, + "name": "test_rcg", + "type": "User", + "id": "aadc17d500000000" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('replication_consistency_group') + + +class PowerFlexReplicationConsistencyGroup(object): + """Class with replication consistency group operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_replication_consistency_group_parameters()) + + mut_ex_args = [['rcg_name', 'rcg_id'], ['protection_domain_id', 'protection_domain_name']] + + required_one_of_args = [['rcg_name', 'rcg_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=True, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_rcg(self, rcg_name=None, rcg_id=None): + """Get rcg details + :param rcg_name: Name of the rcg + :param rcg_id: ID of the rcg + :return: RCG details + """ + name_or_id = rcg_id if rcg_id else rcg_name + try: + rcg_details = None + if rcg_id: + rcg_details = self.powerflex_conn.replication_consistency_group.get( + filter_fields={'id': rcg_id}) + + if rcg_name: + rcg_details = self.powerflex_conn.replication_consistency_group.get( + filter_fields={'name': rcg_name}) + + if rcg_details: + rcg_details[0]['statistics'] = \ + self.powerflex_conn.replication_consistency_group.get_statistics(rcg_details[0]['id']) + rcg_details[0].pop('links', None) + self.append_protection_domain_name(rcg_details[0]) + return rcg_details[0] + + except Exception as e: + errormsg = "Failed to get the replication consistency group {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_rcg_snapshot(self, rcg_id): + """Create RCG snapshot + :param rcg_id: Unique identifier of the RCG. + :return: Boolean indicating if create snapshot operation is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.create_snapshot( + rcg_id=rcg_id) + return True + + except Exception as e: + errormsg = "Create RCG snapshot for RCG with id {0} operation failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_rcg(self, rcg_params): + """Create RCG""" + try: + resp = None + # Get remote system details + self.remote_powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params['remote_peer']) + LOG.info("Got the remote peer connection object instance") + protection_domain_id = rcg_params['protection_domain_id'] + if rcg_params['protection_domain_name']: + protection_domain_id = \ + self.get_protection_domain(self.powerflex_conn, rcg_params['protection_domain_name'])['id'] + + remote_protection_domain_id = rcg_params['remote_peer']['protection_domain_id'] + if rcg_params['remote_peer']['protection_domain_name']: + remote_protection_domain_id = \ + self.get_protection_domain(self.remote_powerflex_conn, + rcg_params['remote_peer']['protection_domain_name'])['id'] + + if not self.module.check_mode: + resp = self.powerflex_conn.replication_consistency_group.create( + rpo=rcg_params['rpo'], + protection_domain_id=protection_domain_id, + remote_protection_domain_id=remote_protection_domain_id, + destination_system_id=self.remote_powerflex_conn.system.get()[0]['id'], + name=rcg_params['rcg_name'], + activity_mode=rcg_params['activity_mode']) + return True, resp + + except Exception as e: + errormsg = "Create replication consistency group failed with error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_rpo(self, rcg_id, rpo): + """Modify rpo + :param rcg_id: Unique identifier of the RCG. + :param rpo: rpo value in seconds + :return: Boolean indicates if modify rpo is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.modify_rpo( + rcg_id, rpo) + return True + + except Exception as e: + errormsg = "Modify rpo for replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_target_volume_access_mode(self, rcg_id, target_volume_access_mode): + """Modify target volume access mode + :param rcg_id: Unique identifier of the RCG. + :param target_volume_access_mode: Target volume access mode. + :return: Boolean indicates if modify operation is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.modify_target_volume_access_mode( + rcg_id, target_volume_access_mode) + return True + + except Exception as e: + errormsg = "Modify target volume access mode for replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_activity_mode(self, rcg_id, rcg_details, activity_mode): + """Modify activity mode + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param activity_mode: RCG activity mode. + :return: Boolean indicates if modify operation is successful + """ + try: + if activity_mode == 'Active' and rcg_details['localActivityState'].lower() == 'inactive': + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.activate(rcg_id) + return True + elif activity_mode == 'Inactive' and rcg_details['localActivityState'].lower() == 'active': + if not self.module.check_mode: + rcg_details = self.powerflex_conn.replication_consistency_group.inactivate(rcg_id) + return True + except Exception as e: + errormsg = "Modify activity_mode for replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def pause_or_resume_rcg(self, rcg_id, rcg_details, pause, pause_mode=None): + """Perform specified rcg action + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param pause: Pause or resume RCG. + :param pause_mode: Specifies the pause mode if pause is True. + :return: Boolean indicates if rcg action is successful + """ + if pause and rcg_details['pauseMode'] == 'None': + if not pause_mode: + self.module.fail_json(msg="Specify pause_mode to perform pause on replication consistency group.") + return self.pause(rcg_id, pause_mode) + + if not pause and rcg_details['pauseMode'] != 'None': + return self.resume(rcg_id) + + def freeze_or_unfreeze_rcg(self, rcg_id, rcg_details, freeze): + """Perform specified rcg action + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param freeze: Freeze or unfreeze RCG. + :return: Boolean indicates if rcg action is successful + """ + if freeze and rcg_details['freezeState'].lower() == 'unfrozen': + return self.freeze(rcg_id) + + if not freeze and rcg_details['freezeState'].lower() == 'frozen': + return self.unfreeze(rcg_id) + + def freeze(self, rcg_id): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.freeze(rcg_id) + return True + except Exception as e: + errormsg = "Freeze replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def unfreeze(self, rcg_id): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.unfreeze(rcg_id) + return True + except Exception as e: + errormsg = "Unfreeze replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def pause(self, rcg_id, pause_mode): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.pause(rcg_id, pause_mode) + return True + except Exception as e: + errormsg = "Pause replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def resume(self, rcg_id): + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.resume(rcg_id) + return True + except Exception as e: + errormsg = "Resume replication consistency group {0} failed with error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def set_consistency(self, rcg_id, rcg_details, is_consistent): + """Set rcg to specified mode + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details. + :param is_consistent: RCG consistency. + :return: Boolean indicates if set consistency is successful + """ + try: + if is_consistent and rcg_details['currConsistMode'].lower() not in ('consistent', 'consistentpending'): + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.set_as_consistent(rcg_id) + return True + elif not is_consistent and rcg_details['currConsistMode'].lower() not in ('inconsistent', 'inconsistentpending'): + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.set_as_inconsistent(rcg_id) + return True + except Exception as e: + errormsg = "Modifying consistency of replication consistency group failed with error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def rename_rcg(self, rcg_id, rcg_details, new_name): + """Rename rcg + :param rcg_id: Unique identifier of the RCG. + :param rcg_details: RCG details + :param new_name: RCG name to rename to. + :return: Boolean indicates if rename is successful + """ + try: + if rcg_details['name'] != new_name: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.rename_rcg(rcg_id, new_name) + return True + except Exception as e: + errormsg = "Renaming replication consistency group to {0} failed with error {1}".format(new_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_rcg(self, rcg_id): + """Delete RCG + :param rcg_id: Unique identifier of the RCG. + :return: Boolean indicates if delete rcg operation is successful + """ + try: + if not self.module.check_mode: + self.powerflex_conn.replication_consistency_group.delete( + rcg_id=rcg_id) + return True + + except Exception as e: + errormsg = "Delete replication consistency group {0} failed with " \ + "error {1}".format(rcg_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_protection_domain(self, conn, protection_domain_name=None, protection_domain_id=None): + """ + Get protection domain details + :param conn: local or remote connection + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain id if exists + :rtype: str + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = [] + if protection_domain_id: + pd_details = conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if len(pd_details) == 0: + error_msg = "Unable to find the protection domain with " \ + "'%s'." % name_or_id + self.module.fail_json(msg=error_msg) + + return pd_details[0] + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_create(self, rcg_params): + """Validate create RCG params""" + params = ['create_snapshot', 'new_rcg_name'] + for param in params: + if rcg_params[param] is not None: + self.module.fail_json(msg="%s cannot be specified while creating replication consistency group" % param) + if not rcg_params['rpo']: + self.module.fail_json(msg='Enter rpo to create replication consistency group') + if not rcg_params['remote_peer']: + self.module.fail_json(msg='Enter remote_peer to create replication consistency group') + if not rcg_params['protection_domain_id'] and not rcg_params['protection_domain_name']: + self.module.fail_json(msg='Enter protection_domain_name or protection_domain_id to create replication consistency group') + if (not rcg_params['remote_peer']['protection_domain_id'] and not rcg_params['remote_peer']['protection_domain_name']) or \ + (rcg_params['remote_peer']['protection_domain_id'] is not None and + rcg_params['remote_peer']['protection_domain_name'] is not None): + self.module.fail_json(msg='Enter remote protection_domain_name or protection_domain_id to create replication consistency group') + + def modify_rcg(self, rcg_id, rcg_details): + create_snapshot = self.module.params['create_snapshot'] + rpo = self.module.params['rpo'] + target_volume_access_mode = self.module.params['target_volume_access_mode'] + pause = self.module.params['pause'] + freeze = self.module.params['freeze'] + is_consistent = self.module.params['is_consistent'] + activity_mode = self.module.params['activity_mode'] + new_rcg_name = self.module.params['new_rcg_name'] + changed = False + + if create_snapshot is True: + changed = self.create_rcg_snapshot(rcg_id) + if rpo and rcg_details['rpoInSeconds'] and \ + rpo != rcg_details['rpoInSeconds']: + changed = self.modify_rpo(rcg_id, rpo) + if target_volume_access_mode and \ + rcg_details['targetVolumeAccessMode'] != target_volume_access_mode: + changed = \ + self.modify_target_volume_access_mode(rcg_id, target_volume_access_mode) + if activity_mode and \ + self.modify_activity_mode(rcg_id, rcg_details, activity_mode): + changed = True + rcg_details = self.get_rcg(rcg_id=rcg_details['id']) + if pause is not None and \ + self.pause_or_resume_rcg(rcg_id, rcg_details, pause, self.module.params['pause_mode']): + changed = True + if freeze is not None and \ + self.freeze_or_unfreeze_rcg(rcg_id, rcg_details, freeze): + changed = True + if is_consistent is not None and \ + self.set_consistency(rcg_id, rcg_details, is_consistent): + changed = True + if new_rcg_name and self.rename_rcg(rcg_id, rcg_details, new_rcg_name): + changed = True + + return changed + + def validate_input(self, rcg_params): + try: + api_version = self.powerflex_conn.system.get()[0]['mdmCluster']['master']['versionInfo'] + if rcg_params['activity_mode'] is not None and utils.is_version_less_than_3_6(api_version): + self.module.fail_json(msg='activity_mode is supported only from version 3.6 and above') + params = ['rcg_name', 'new_rcg_name'] + for param in params: + if rcg_params[param] and utils.is_invalid_name(rcg_params[param]): + self.module.fail_json(msg='Enter a valid %s' % param) + if rcg_params['pause_mode'] and rcg_params['pause'] is None: + self.module.fail_json(msg='Specify pause as True to pause replication consistency group') + except Exception as e: + error_msg = "Validating input parameters failed with " \ + "error '%s'" % (str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def append_protection_domain_name(self, rcg_details): + try: + # Append protection domain name + if 'protectionDomainId' in rcg_details \ + and rcg_details['protectionDomainId']: + pd_details = self.get_protection_domain( + conn=self.powerflex_conn, + protection_domain_id=rcg_details['protectionDomainId']) + rcg_details['protectionDomainName'] = pd_details['name'] + except Exception as e: + error_msg = "Updating replication consistency group details with protection domain name failed with " \ + "error '%s'" % (str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on replication consistency group based on parameters passed in + the playbook + """ + self.validate_input(self.module.params) + rcg_name = self.module.params['rcg_name'] + new_rcg_name = self.module.params['new_rcg_name'] + rcg_id = self.module.params['rcg_id'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and RCG details + changed = False + result = dict( + changed=False, + replication_consistency_group_details=[] + ) + # get RCG details + rcg_details = self.get_rcg(rcg_name, rcg_id) + if rcg_details: + result['replication_consistency_group_details'] = rcg_details + rcg_id = rcg_details['id'] + msg = "Fetched the RCG details {0}".format(str(rcg_details)) + LOG.info(msg) + + # perform create + if state == "present": + if not rcg_details: + self.validate_create(self.module.params) + changed, rcg_details = self.create_rcg(self.module.params) + if rcg_details: + rcg_id = rcg_details['id'] + + if rcg_details and self.modify_rcg(rcg_id, rcg_details): + changed = True + + if state == "absent" and rcg_details: + changed = self.delete_rcg(rcg_id=rcg_details['id']) + + # Returning the RCG details + if changed: + result['replication_consistency_group_details'] = \ + self.get_rcg(new_rcg_name or rcg_name, rcg_id) + result['changed'] = changed + self.module.exit_json(**result) + + +def get_powerflex_replication_consistency_group_parameters(): + """This method provide parameter required for the replication_consistency_group + module on PowerFlex""" + return dict( + rcg_name=dict(), rcg_id=dict(), + create_snapshot=dict(type='bool'), + rpo=dict(type='int'), protection_domain_id=dict(), + protection_domain_name=dict(), new_rcg_name=dict(), + activity_mode=dict(choices=['Active', 'Inactive']), + pause=dict(type='bool'), freeze=dict(type='bool'), + pause_mode=dict(choices=['StopDataTransfer', 'OnlyTrackChanges']), + target_volume_access_mode=dict(choices=['ReadOnly', 'NoAccess']), + is_consistent=dict(type='bool'), + remote_peer=dict(type='dict', + options=dict(hostname=dict(type='str', aliases=['gateway_host'], required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', aliases=['verifycert'], default=True), + port=dict(type='int', default=443), + timeout=dict(type='int', default=120), + protection_domain_id=dict(), + protection_domain_name=dict())), + state=dict(default='present', type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex Replication Consistency Group object and perform actions on it + based on user input from playbook""" + obj = PowerFlexReplicationConsistencyGroup() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py new file mode 100644 index 00000000..a2f05a31 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py @@ -0,0 +1,365 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing SDCs on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: sdc +version_added: '1.0.0' +short_description: Manage SDCs on Dell PowerFlex +description: +- Managing SDCs on PowerFlex storage system includes getting details of SDC + and renaming SDC. + +author: +- Akash Shendge (@shenda1) + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +options: + sdc_name: + description: + - Name of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip) for get/rename operation. + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + sdc_id: + description: + - ID of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip) for get/rename operation. + - Mutually exclusive with I(sdc_name) and I(sdc_ip). + type: str + sdc_ip: + description: + - IP of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip) for get/rename operation. + - Mutually exclusive with I(sdc_id) and I(sdc_name). + type: str + sdc_new_name: + description: + - New name of the SDC. Used to rename the SDC. + type: str + state: + description: + - State of the SDC. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Get SDC details using SDC ip + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_ip: "{{sdc_ip}}" + state: "present" + +- name: Rename SDC using SDC name + dellemc.powerflex.sdc: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + sdc_name: "centos_sdc" + sdc_new_name: "centos_sdc_renamed" + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' + +sdc_details: + description: Details of the SDC. + returned: When SDC exists + type: dict + contains: + id: + description: The ID of the SDC. + type: str + name: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + osType: + description: OS type of the SDC. + type: str + mapped_volumes: + description: The details of the mapped volumes. + type: list + contains: + id: + description: The ID of the volume. + type: str + name: + description: The name of the volume. + type: str + volumeType: + description: Type of the volume. + type: str + sdcApproved: + description: Indicates whether an SDC has approved access to the + system. + type: bool + sample: { + "id": "07335d3d00000006", + "installedSoftwareVersionInfo": "R3_6.0.0", + "kernelBuildNumber": null, + "kernelVersion": "3.10.0", + "links": [ + { + "href": "/api/instances/Sdc::07335d3d00000006", + "rel": "self" + }, + { + "href": "/api/instances/Sdc::07335d3d00000006/relationships/ + Statistics", + "rel": "/api/Sdc/relationship/Statistics" + }, + { + "href": "/api/instances/Sdc::07335d3d00000006/relationships/ + Volume", + "rel": "/api/Sdc/relationship/Volume" + }, + { + "href": "/api/instances/System::4a54a8ba6df0690f", + "rel": "/api/parent/relationship/systemId" + } + ], + "mapped_volumes": [], + "mdmConnectionState": "Disconnected", + "memoryAllocationFailure": null, + "name": "LGLAP203", + "osType": "Linux", + "peerMdmId": null, + "perfProfile": "HighPerformance", + "sdcApproved": true, + "sdcApprovedIps": null, + "sdcGuid": "F8ECB844-23B8-4629-92BB-B6E49A1744CB", + "sdcIp": "N/A", + "sdcIps": null, + "sdcType": "AppSdc", + "sdrId": null, + "socketAllocationFailure": null, + "softwareVersionInfo": "R3_6.0.0", + "systemId": "4a54a8ba6df0690f", + "versionInfo": "R3_6.0.0" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils + +LOG = utils.get_logger('sdc') + + +class PowerFlexSdc(object): + """Class with SDC operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_sdc_parameters()) + + mutually_exclusive = [['sdc_id', 'sdc_ip', 'sdc_name']] + + required_one_of = [['sdc_id', 'sdc_ip', 'sdc_name']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def rename_sdc(self, sdc_id, new_name): + """Rename SDC + :param sdc_id: The ID of the SDC + :param new_name: The new name of the SDC + :return: Boolean indicating if rename operation is successful + """ + + try: + self.powerflex_conn.sdc.rename(sdc_id=sdc_id, name=new_name) + return True + except Exception as e: + errormsg = "Failed to rename SDC %s with error %s" % (sdc_id, + str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_mapped_volumes(self, sdc_id): + """Get volumes mapped to SDC + :param sdc_id: The ID of the SDC + :return: List containing volume details mapped to SDC + """ + + try: + resp = self.powerflex_conn.sdc.get_mapped_volumes(sdc_id=sdc_id) + return resp + except Exception as e: + errormsg = "Failed to get the volumes mapped to SDC %s with " \ + "error %s" % (sdc_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_sdc(self, sdc_name=None, sdc_ip=None, sdc_id=None): + """Get the SDC Details + :param sdc_name: The name of the SDC + :param sdc_ip: The IP of the SDC + :param sdc_id: The ID of the SDC + :return: The dict containing SDC details + """ + + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + try: + if sdc_name: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'name': sdc_name}) + elif sdc_ip: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'sdcIp': sdc_ip}) + else: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'id': sdc_id}) + + if len(sdc_details) == 0: + error_msg = "Unable to find SDC with identifier %s" \ + % id_ip_name + LOG.error(error_msg) + return None + sdc_details[0]['mapped_volumes'] = self.get_mapped_volumes( + sdc_details[0]['id']) + return sdc_details[0] + except Exception as e: + errormsg = "Failed to get the SDC %s with error %s" % ( + id_ip_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_parameters(self, sdc_name=None, sdc_id=None, sdc_ip=None): + """Validate the input parameters""" + + if all(param is None for param in [sdc_name, sdc_id, sdc_ip]): + self.module.fail_json(msg="Please provide sdc_name/sdc_id/sdc_ip " + "with valid input.") + + sdc_identifiers = ['sdc_name', 'sdc_id', 'sdc_ip'] + for param in sdc_identifiers: + if self.module.params[param] is not None and \ + len(self.module.params[param].strip()) == 0: + error_msg = "Please provide valid %s" % param + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on SDC based on parameters passed in + the playbook + """ + sdc_name = self.module.params['sdc_name'] + sdc_id = self.module.params['sdc_id'] + sdc_ip = self.module.params['sdc_ip'] + sdc_new_name = self.module.params['sdc_new_name'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and SDC details + changed = False + result = dict( + changed=False, + sdc_details={} + ) + + self.validate_parameters(sdc_name, sdc_id, sdc_ip) + + sdc_details = self.get_sdc(sdc_name=sdc_name, sdc_id=sdc_id, + sdc_ip=sdc_ip) + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + if state == 'present' and not sdc_details: + error_msg = 'Could not find any SDC instance with ' \ + 'identifier %s.' % id_ip_name + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if state == 'absent' and sdc_details: + error_msg = 'Removal of SDC is not allowed through Ansible ' \ + 'module.' + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if state == 'present' and sdc_details and sdc_new_name is not None: + if len(sdc_new_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid SDC name.") + + changed = self.rename_sdc(sdc_details['id'], sdc_new_name) + + if changed: + sdc_name = sdc_new_name + + if state == 'present': + result['sdc_details'] = self.get_sdc(sdc_name=sdc_name, + sdc_id=sdc_id, sdc_ip=sdc_ip) + result['changed'] = changed + self.module.exit_json(**result) + + +def get_powerflex_sdc_parameters(): + """This method provide parameter required for the Ansible SDC module on + PowerFlex""" + return dict( + sdc_id=dict(), + sdc_ip=dict(), + sdc_name=dict(), + sdc_new_name=dict(), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex SDC object and perform actions on it + based on user input from playbook""" + obj = PowerFlexSdc() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sds.py b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py new file mode 100644 index 00000000..91c28776 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py @@ -0,0 +1,1160 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing SDS on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: sds +version_added: '1.1.0' +short_description: Manage SDS on Dell PowerFlex +description: +- Managing SDS on PowerFlex storage system includes + creating new SDS, getting details of SDS, adding/removing IP to/from SDS, + modifying attributes of SDS, and deleting SDS. +author: +- Rajshree Khare (@khareRajshree) +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + sds_name: + description: + - The name of the SDS. + - Mandatory for create operation. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(sds_id). + type: str + sds_id: + description: + - The ID of the SDS. + - Except create operation, all other operations can be performed + using I(sds_id). + - Mutually exclusive with I(sds_name). + type: str + protection_domain_name: + description: + - The name of the protection domain. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The ID of the protection domain. + - Mutually exclusive with I(protection_domain_name). + type: str + sds_ip_list: + description: + - Dictionary of IPs and their roles for the SDS. + - At least one IP-role is mandatory while creating a SDS. + - IP-roles can be updated as well. + type: list + elements: dict + suboptions: + ip: + description: + - IP address of the SDS. + type: str + required: true + role: + description: + - Role assigned to the SDS IP address. + choices: ['sdsOnly', 'sdcOnly', 'all'] + type: str + required: true + sds_ip_state: + description: + - State of IP with respect to the SDS. + choices: ['present-in-sds', 'absent-in-sds'] + type: str + rfcache_enabled: + description: + - Whether to enable the Read Flash cache. + type: bool + rmcache_enabled: + description: + - Whether to enable the Read RAM cache. + type: bool + rmcache_size: + description: + - Read RAM cache size (in MB). + - Minimum size is 128 MB. + - Maximum size is 3911 MB. + type: int + sds_new_name: + description: + - SDS new name. + type: str + performance_profile: + description: + - Performance profile to apply to the SDS. + - The HighPerformance profile configures a predefined set of parameters + for very high performance use cases. + - Default value by API is C(HighPerformance). + choices: ['Compact', 'HighPerformance'] + type: str + state: + description: + - State of the SDS. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The maximum limit for the IPs that can be associated with an SDS is 8. + - There needs to be at least 1 IP for SDS communication and 1 for SDC + communication. + - If only 1 IP exists, it must be with role 'all'; else 1 IP + can be with role 'all'and other IPs with role 'sdcOnly'; or 1 IP must be + with role 'sdsOnly' and others with role 'sdcOnly'. + - There can be 1 or more IPs with role 'sdcOnly'. + - There must be only 1 IP with SDS role (either with role 'all' or + 'sdsOnly'). + - SDS can be created with RF cache disabled, but, be aware that the RF cache + is not always updated. In this case, the user should re-try the operation. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "all" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Create SDS with all parameters + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node1" + protection_domain_name: "domain1" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + rmcache_enabled: true + rmcache_size: 128 + performance_profile: "HighPerformance" + state: "present" + +- name: Get SDS details using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "present" + +- name: Get SDS details using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "present" + +- name: Modify SDS attributes using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Modify SDS attributes using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + sds_new_name: "node0_new" + rfcache_enabled: true + rmcache_enabled: true + rmcache_size: 256 + performance_profile: "HighPerformance" + state: "present" + +- name: Add IP and role to an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "present-in-sds" + state: "present" + +- name: Remove IP and role from an SDS + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + sds_ip_list: + - ip: "198.10.xxx.xxx" + role: "sdcOnly" + sds_ip_state: "absent-in-sds" + state: "present" + +- name: Delete SDS using name + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_name: "node0" + state: "absent" + +- name: Delete SDS using ID + dellemc.powerflex.sds: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + sds_id: "5718253c00000004" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +sds_details: + description: Details of the SDS. + returned: When SDS exists + type: dict + contains: + authenticationError: + description: Indicates authentication error. + type: str + certificateInfo: + description: Information about certificate. + type: str + configuredDrlMode: + description: Configured DRL mode. + type: str + drlMode: + description: DRL mode. + type: str + faultSetId: + description: Fault set ID. + type: str + fglMetadataCacheSize: + description: FGL metadata cache size. + type: int + fglMetadataCacheState: + description: FGL metadata cache state. + type: str + fglNumConcurrentWrites: + description: FGL concurrent writes. + type: int + id: + description: SDS ID. + type: str + ipList: + description: SDS IP list. + type: list + contains: + ip: + description: IP present in the SDS. + type: str + role: + description: Role of the SDS IP. + type: str + lastUpgradeTime: + description: Last time SDS was upgraded. + type: str + links: + description: SDS links. + type: list + contains: + href: + description: SDS instance URL. + type: str + rel: + description: SDS's relationship with different entities. + type: str + maintenanceState: + description: Maintenance state. + type: str + maintenanceType: + description: Maintenance type. + type: str + mdmConnectionState: + description: MDM connection state. + type: str + membershipState: + description: Membership state. + type: str + name: + description: Name of the SDS. + type: str + numOfIoBuffers: + description: Number of IO buffers. + type: int + numRestarts: + description: Number of restarts. + type: int + onVmWare: + description: Presence on VMware. + type: bool + perfProfile: + description: Performance profile. + type: str + port: + description: SDS port. + type: int + protectionDomainId: + description: Protection Domain ID. + type: str + protectionDomainName: + description: Protection Domain Name. + type: str + raidControllers: + description: Number of RAID controllers. + type: int + rfcacheEnabled: + description: Whether RF cache is enabled or not. + type: bool + rfcacheErrorApiVersionMismatch: + description: RF cache error for API version mismatch. + type: bool + rfcacheErrorDeviceDoesNotExist: + description: RF cache error for device does not exist. + type: bool + rfcacheErrorInconsistentCacheConfiguration: + description: RF cache error for inconsistent cache configuration. + type: bool + rfcacheErrorInconsistentSourceConfiguration: + description: RF cache error for inconsistent source configuration. + type: bool + rfcacheErrorInvalidDriverPath: + description: RF cache error for invalid driver path. + type: bool + rfcacheErrorLowResources: + description: RF cache error for low resources. + type: bool + rmcacheEnabled: + description: Whether Read RAM cache is enabled or not. + type: bool + rmcacheFrozen: + description: RM cache frozen. + type: bool + rmcacheMemoryAllocationState: + description: RM cache memory allocation state. + type: bool + rmcacheSizeInKb: + description: RM cache size in KB. + type: int + rmcacheSizeInMb: + description: RM cache size in MB. + type: int + sdsConfigurationFailure: + description: SDS configuration failure. + type: str + sdsDecoupled: + description: SDS decoupled. + type: str + sdsReceiveBufferAllocationFailures: + description: SDS receive buffer allocation failures. + type: str + sdsState: + description: SDS state. + type: str + softwareVersionInfo: + description: SDS software version information. + type: str + sample: { + "authenticationError": "None", + "certificateInfo": null, + "configuredDrlMode": "Volatile", + "drlMode": "Volatile", + "faultSetId": null, + "fglMetadataCacheSize": 0, + "fglMetadataCacheState": "Disabled", + "fglNumConcurrentWrites": 1000, + "id": "8f3bb0cc00000002", + "ipList": [ + { + "ip": "10.47.xxx.xxx", + "role": "all" + } + ], + "lastUpgradeTime": 0, + "links": [ + { + "href": "/api/instances/Sds::8f3bb0cc00000002", + "rel": "self" + }, + { + "href": "/api/instances/Sds::8f3bb0cc00000002/relationships + /Statistics", + "rel": "/api/Sds/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::8f3bb0cc00000002/relationships + /SpSds", + "rel": "/api/Sds/relationship/SpSds" + }, + { + "href": "/api/instances/Sds::8f3bb0cc00000002/relationships + /Device", + "rel": "/api/Sds/relationship/Device" + }, + { + "href": "/api/instances/ProtectionDomain::9300c1f900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "maintenanceState": "NoMaintenance", + "maintenanceType": "NoMaintenance", + "mdmConnectionState": "Connected", + "membershipState": "Joined", + "name": "node0", + "numOfIoBuffers": null, + "numRestarts": 2, + "onVmWare": true, + "perfProfile": "HighPerformance", + "port": 7072, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "raidControllers": null, + "rfcacheEnabled": true, + "rfcacheErrorApiVersionMismatch": false, + "rfcacheErrorDeviceDoesNotExist": false, + "rfcacheErrorInconsistentCacheConfiguration": false, + "rfcacheErrorInconsistentSourceConfiguration": false, + "rfcacheErrorInvalidDriverPath": false, + "rfcacheErrorLowResources": false, + "rmcacheEnabled": true, + "rmcacheFrozen": false, + "rmcacheMemoryAllocationState": "AllocationPending", + "rmcacheSizeInKb": 131072, + "rmcacheSizeInMb": 128, + "sdsConfigurationFailure": null, + "sdsDecoupled": null, + "sdsReceiveBufferAllocationFailures": null, + "sdsState": "Normal", + "softwareVersionInfo": "R3_6.0.0" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils +import copy + +LOG = utils.get_logger('sds') + + +class PowerFlexSDS(object): + """Class with SDS operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_sds_parameters()) + + mut_ex_args = [['sds_name', 'sds_id'], + ['protection_domain_name', 'protection_domain_id']] + + required_together_args = [['sds_ip_list', 'sds_ip_state']] + + required_one_of_args = [['sds_name', 'sds_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_together=required_together_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def validate_rmcache_size_parameter(self, rmcache_enabled, rmcache_size): + """Validate the input parameters""" + + # RM cache size cannot be set only when RM cache is enabled + if rmcache_size is not None and rmcache_enabled is False: + error_msg = "RM cache size can be set only when RM cache " \ + "is enabled, please enable it along with RM " \ + "cache size." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def validate_ip_parameter(self, sds_ip_list): + """Validate the input parameters""" + + if sds_ip_list is None or len(sds_ip_list) == 0: + error_msg = "Provide valid values for " \ + "sds_ip_list as 'ip' and 'role' for Create/Modify " \ + "operations." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_sds_details(self, sds_name=None, sds_id=None): + """Get SDS details + :param sds_name: Name of the SDS + :type sds_name: str + :param sds_id: ID of the SDS + :type sds_id: str + :return: Details of SDS if it exist + :rtype: dict + """ + + id_or_name = sds_id if sds_id else sds_name + + try: + if sds_name: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'name': sds_name}) + else: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'id': sds_id}) + + if len(sds_details) == 0: + msg = "SDS with identifier '%s' not found" % id_or_name + LOG.info(msg) + return None + + return sds_details[0] + + except Exception as e: + error_msg = "Failed to get the SDS '%s' with error '%s'" \ + % (id_or_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + :rtype: dict + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = None + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if not pd_details: + error_msg = "Unable to find the protection domain with " \ + "'%s'. Please enter a valid protection domain " \ + "name/id." % name_or_id + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + return pd_details[0] + + except Exception as e: + error_msg = "Failed to get the protection domain '%s' with " \ + "error '%s'" % (name_or_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def restructure_ip_role_dict(self, sds_ip_list): + """Restructure IP role dict + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :return: List of one or more IP addresses and their roles + :rtype: list[dict] + """ + new_sds_ip_list = [] + for item in sds_ip_list: + new_sds_ip_list.append({"SdsIp": item}) + return new_sds_ip_list + + def create_sds(self, protection_domain_id, sds_ip_list, sds_ip_state, + sds_name, rmcache_enabled=None, rmcache_size=None): + """Create SDS + :param protection_domain_id: ID of the Protection Domain + :type protection_domain_id: str + :param sds_ip_list: List of one or more IP addresses associated + with the SDS over which the data will be + transferred. + :type sds_ip_list: list[dict] + :param sds_ip_state: SDS IP state + :type sds_ip_state: str + :param sds_name: SDS name + :type sds_name: str + :param rmcache_enabled: Whether to enable the Read RAM cache + :type rmcache_enabled: bool + :param rmcache_size: Read RAM cache size (in MB) + :type rmcache_size: int + :return: Boolean indicating if create operation is successful + """ + try: + if sds_name is None or len(sds_name.strip()) == 0: + error_msg = "Please provide valid sds_name value for " \ + "creation of SDS." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if protection_domain_id is None: + error_msg = "Protection Domain is a mandatory parameter " \ + "for creating a SDS. Please enter a valid value." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_ip_list is None or len(sds_ip_list) == 0: + error_msg = "Please provide valid sds_ip_list values for " \ + "creation of SDS." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_ip_state is not None and sds_ip_state != "present-in-sds": + error_msg = "Incorrect IP state given for creation of SDS." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + # Restructure IP-role parameter format + if sds_ip_list and sds_ip_state == "present-in-sds": + sds_ip_list = self.restructure_ip_role_dict(sds_ip_list) + + if rmcache_size is not None: + self.validate_rmcache_size_parameter(rmcache_enabled, + rmcache_size) + # set rmcache size in KB + rmcache_size = rmcache_size * 1024 + + create_params = ("protection_domain_id: %s," + " sds_ip_list: %s," + " sds_name: %s," + " rmcache_enabled: %s, " + " rmcache_size_KB: %s" + % (protection_domain_id, sds_ip_list, + sds_name, rmcache_enabled, rmcache_size)) + LOG.info("Creating SDS with params: %s", create_params) + + self.powerflex_conn.sds.create( + protection_domain_id=protection_domain_id, + sds_ips=sds_ip_list, + name=sds_name, + rmcache_enabled=rmcache_enabled, + rmcache_size_in_kb=rmcache_size) + return True + + except Exception as e: + error_msg = "Create SDS '%s' operation failed with error '%s'" \ + % (sds_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def to_modify(self, sds_details, sds_new_name, rfcache_enabled, + rmcache_enabled, rmcache_size, performance_profile): + """ + :param sds_details: Details of the SDS + :type sds_details: dict + :param sds_new_name: New name of SDS + :type sds_new_name: str + :param rfcache_enabled: Whether to enable the Read Flash cache + :type rfcache_enabled: bool + :param rmcache_enabled: Whether to enable the Read RAM cache + :type rmcache_enabled: bool + :param rmcache_size: Read RAM cache size (in MB) + :type rmcache_size: int + :param performance_profile: Performance profile to apply to the SDS + :type performance_profile: str + :return: Dictionary containing the attributes of SDS which are to be + updated + :rtype: dict + """ + modify_dict = {} + + if sds_new_name is not None: + if len(sds_new_name.strip()) == 0: + error_msg = "Please provide valid SDS name." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + if sds_new_name != sds_details['name']: + modify_dict['name'] = sds_new_name + + if rfcache_enabled is not None and \ + sds_details['rfcacheEnabled'] != rfcache_enabled: + modify_dict['rfcacheEnabled'] = rfcache_enabled + + if rmcache_enabled is not None and \ + sds_details['rmcacheEnabled'] != rmcache_enabled: + modify_dict['rmcacheEnabled'] = rmcache_enabled + + if rmcache_size is not None: + self.validate_rmcache_size_parameter(rmcache_enabled, + rmcache_size) + exisitng_size_mb = sds_details['rmcacheSizeInKb'] / 1024 + if rmcache_size != exisitng_size_mb: + if sds_details['rmcacheEnabled']: + modify_dict['rmcacheSizeInMB'] = rmcache_size + else: + error_msg = "Failed to update RM cache size for the " \ + "SDS '%s' as RM cache is disabled " \ + "previously, please enable it before " \ + "setting the size." \ + % sds_details['name'] + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + if performance_profile is not None and \ + sds_details['perfProfile'] != performance_profile: + modify_dict['perfProfile'] = performance_profile + + return modify_dict + + def modify_sds_attributes(self, sds_id, modify_dict, + create_flag=False): + """Modify SDS attributes + :param sds_id: SDS ID + :type sds_id: str + :param modify_dict: Dictionary containing the attributes of SDS + which are to be updated + :type modify_dict: dict + :param create_flag: Flag to indicate whether modify operation is + followed by create operation or not + :type create_flag: bool + :return: Boolean indicating if the operation is successful + """ + try: + msg = "Dictionary containing attributes which are to be" \ + " updated is '%s'." % (str(modify_dict)) + LOG.info(msg) + + if 'name' in modify_dict: + self.powerflex_conn.sds.rename(sds_id, modify_dict['name']) + msg = "The name of the SDS is updated to '%s' successfully." \ + % modify_dict['name'] + LOG.info(msg) + + if 'rfcacheEnabled' in modify_dict: + self.powerflex_conn.sds.set_rfcache_enabled( + sds_id, modify_dict['rfcacheEnabled']) + msg = "The use RFcache is updated to '%s' successfully." \ + % modify_dict['rfcacheEnabled'] + LOG.info(msg) + + if 'rmcacheEnabled' in modify_dict: + self.powerflex_conn.sds.set_rmcache_enabled( + sds_id, modify_dict['rmcacheEnabled']) + msg = "The use RMcache is updated to '%s' successfully." \ + % modify_dict['rmcacheEnabled'] + LOG.info(msg) + + if 'rmcacheSizeInMB' in modify_dict: + self.powerflex_conn.sds.set_rmcache_size( + sds_id, modify_dict['rmcacheSizeInMB']) + msg = "The size of RMcache is updated to '%s' successfully." \ + % modify_dict['rmcacheSizeInMB'] + LOG.info(msg) + + if 'perfProfile' in modify_dict: + self.powerflex_conn.sds.set_performance_parameters( + sds_id, modify_dict['perfProfile']) + msg = "The performance profile is updated to '%s'" \ + % modify_dict['perfProfile'] + LOG.info(msg) + + return True + except Exception as e: + if create_flag: + error_msg = "Create SDS is successful, but failed to update" \ + " the SDS '%s' with error '%s'"\ + % (sds_id, str(e)) + else: + error_msg = "Failed to update the SDS '%s' with error '%s'" \ + % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def identify_ip_role(self, sds_ip_list, sds_details, sds_ip_state): + """Identify IPs before addition/removal + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :param sds_details: SDS details + :type sds_details: dict + :param sds_ip_state: State of IP in SDS + :type sds_ip_state: str + :return: List containing the key-value pairs of IP-role for an + SDS + :rtype: list[dict] + """ + existing_ip_role_list = sds_details['ipList'] + + # identify IPs to add or roles to update + if sds_ip_state == "present-in-sds": + update_role = [] + ips_to_add = [] + + # identify IPs to add + existing_ip_list = [] + if existing_ip_role_list: + for ip in existing_ip_role_list: + existing_ip_list.append(ip['ip']) + for given_ip in sds_ip_list: + ip = given_ip['ip'] + if ip not in existing_ip_list: + ips_to_add.append(given_ip) + LOG.info("IP(s) to be added: %s", ips_to_add) + + if len(ips_to_add) != 0: + for ip in ips_to_add: + sds_ip_list.remove(ip) + + # identify IPs whose role needs to be updated + update_role = [ip for ip in sds_ip_list + if ip not in existing_ip_role_list] + LOG.info("Role update needed for: %s", update_role) + + return ips_to_add, update_role + + elif sds_ip_state == "absent-in-sds": + # identify IPs to remove + ips_to_remove = [ip for ip in existing_ip_role_list + if ip in sds_ip_list] + if len(ips_to_remove) != 0: + LOG.info("IP(s) to remove: %s", ips_to_remove) + return ips_to_remove + else: + LOG.info("IP(s) do not exists.") + return False, None + + def add_ip(self, sds_id, sds_ip_list): + """Add IP to SDS + :param sds_id: SDS ID + :type sds_id: str + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :return: Boolean indicating if add IP operation is successful + """ + try: + for ip in sds_ip_list: + LOG.info("IP to add: %s", ip) + self.powerflex_conn.sds.add_ip(sds_id=sds_id, sds_ip=ip) + LOG.info("IP added successfully.") + return True + except Exception as e: + error_msg = "Add IP to SDS '%s' operation failed with " \ + "error '%s'" % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def update_role(self, sds_id, sds_ip_list): + """Update IP's role for an SDS + :param sds_id: SDS ID + :type sds_id: str + :param sds_ip_list: List of one or more IP addresses and + their roles + :type sds_ip_list: list[dict] + :return: Boolean indicating if add IP operation is successful + """ + try: + LOG.info("Role updates for: %s", sds_ip_list) + if len(sds_ip_list) != 0: + for ip in sds_ip_list: + LOG.info("ip-role: %s", ip) + self.powerflex_conn.sds.set_ip_role(sds_id, ip['ip'], + ip['role']) + msg = "The role '%s' for IP '%s' is updated " \ + "successfully." % (ip['role'], ip['ip']) + LOG.info(msg) + return True + except Exception as e: + error_msg = "Update role of IP for SDS '%s' operation failed " \ + "with error '%s'" % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def remove_ip(self, sds_id, sds_ip_list): + """Remove IP from SDS + :param sds_id: SDS ID + :type sds_id: str + :param sds_ip_list: List of one or more IP addresses and + their roles. + :type sds_ip_list: list[dict] + :return: Boolean indicating if remove IP operation is successful + """ + try: + for ip in sds_ip_list: + LOG.info("IP to remove: %s", ip) + self.powerflex_conn.sds.remove_ip(sds_id=sds_id, ip=ip['ip']) + LOG.info("IP removed successfully.") + return True + except Exception as e: + error_msg = "Remove IP from SDS '%s' operation failed with " \ + "error '%s'" % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def delete_sds(self, sds_id): + """Delete SDS + :param sds_id: SDS ID + :type sds_id: str + :return: Boolean indicating if delete operation is successful + """ + try: + self.powerflex_conn.sds.delete(sds_id) + return True + except Exception as e: + error_msg = "Delete SDS '%s' operation failed with error '%s'" \ + % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on SDS based on parameters passed in + the playbook + """ + sds_name = self.module.params['sds_name'] + sds_id = self.module.params['sds_id'] + sds_new_name = self.module.params['sds_new_name'] + protection_domain_name = self.module.params['protection_domain_name'] + protection_domain_id = self.module.params['protection_domain_id'] + rfcache_enabled = self.module.params['rfcache_enabled'] + rmcache_enabled = self.module.params['rmcache_enabled'] + rmcache_size = self.module.params['rmcache_size'] + sds_ip_list = copy.deepcopy(self.module.params['sds_ip_list']) + sds_ip_state = self.module.params['sds_ip_state'] + performance_profile = self.module.params['performance_profile'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and SDS details + changed = False + result = dict( + changed=False, + sds_details={} + ) + + # get SDS details + sds_details = self.get_sds_details(sds_name, sds_id) + if sds_details: + sds_id = sds_details['id'] + msg = "Fetched the SDS details %s" % (str(sds_details)) + LOG.info(msg) + + # get Protection Domain ID from name + if protection_domain_name: + pd_details = self.get_protection_domain(protection_domain_name) + if pd_details: + protection_domain_id = pd_details['id'] + msg = "Fetched the protection domain details with id '%s', " \ + "name '%s'" % (protection_domain_id, protection_domain_name) + LOG.info(msg) + + # create operation + create_changed = False + if state == 'present' and not sds_details: + if sds_id: + error_msg = "Creation of SDS is allowed using sds_name " \ + "only, sds_id given." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + if sds_new_name: + error_msg = "sds_new_name parameter is not supported " \ + "during creation of a SDS. Try renaming the " \ + "SDS after the creation." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + self.validate_ip_parameter(sds_ip_list) + + create_changed = self.create_sds(protection_domain_id, + sds_ip_list, sds_ip_state, + sds_name, rmcache_enabled, + rmcache_size) + if create_changed: + sds_details = self.get_sds_details(sds_name) + sds_id = sds_details['id'] + msg = "SDS created successfully, fetched SDS details %s"\ + % (str(sds_details)) + LOG.info(msg) + + # checking if basic SDS parameters are modified or not + modify_dict = {} + if sds_details and state == 'present': + modify_dict = self.to_modify(sds_details, sds_new_name, + rfcache_enabled, rmcache_enabled, + rmcache_size, performance_profile) + msg = "Parameters to be modified are as follows: %s"\ + % (str(modify_dict)) + LOG.info(msg) + + # modify operation + modify_changed = False + if modify_dict and state == 'present': + LOG.info("Modify SDS params.") + modify_changed = self.modify_sds_attributes(sds_id, modify_dict, + create_changed) + + # get updated SDS details + sds_details = self.get_sds_details(sds_id=sds_id) + + # add IPs to SDS + # update IP's role for an SDS + add_ip_changed = False + update_role_changed = False + if sds_details and state == 'present' \ + and sds_ip_state == "present-in-sds": + self.validate_ip_parameter(sds_ip_list) + ips_to_add, roles_to_update = self.identify_ip_role( + sds_ip_list, sds_details, sds_ip_state) + if ips_to_add: + add_ip_changed = self.add_ip(sds_id, ips_to_add) + if roles_to_update: + update_role_changed = self.update_role(sds_id, + roles_to_update) + + # remove IPs from SDS + remove_ip_changed = False + if sds_details and state == 'present' \ + and sds_ip_state == "absent-in-sds": + self.validate_ip_parameter(sds_ip_list) + ips_to_remove = self.identify_ip_role(sds_ip_list, sds_details, + sds_ip_state) + if ips_to_remove: + remove_ip_changed = self.remove_ip(sds_id, ips_to_remove) + + # delete operation + delete_changed = False + if sds_details and state == 'absent': + delete_changed = self.delete_sds(sds_id) + + if create_changed or modify_changed or add_ip_changed \ + or update_role_changed or remove_ip_changed or delete_changed: + changed = True + + # Returning the updated SDS details + if state == 'present': + sds_details = self.show_output(sds_id) + result['sds_details'] = sds_details + result['changed'] = changed + self.module.exit_json(**result) + + def show_output(self, sds_id): + """Show SDS details + :param sds_id: ID of the SDS + :type sds_id: str + :return: Details of SDS + :rtype: dict + """ + + try: + sds_details = self.powerflex_conn.sds.get( + filter_fields={'id': sds_id}) + + if len(sds_details) == 0: + msg = "SDS with identifier '%s' not found" % sds_id + LOG.error(msg) + return None + + # Append protection domain name + if 'protectionDomainId' in sds_details[0] \ + and sds_details[0]['protectionDomainId']: + pd_details = self.get_protection_domain( + protection_domain_id=sds_details[0]['protectionDomainId']) + sds_details[0]['protectionDomainName'] = pd_details['name'] + + # Append rmcache size in MB + if 'rmcacheSizeInKb' in sds_details[0] \ + and sds_details[0]['rmcacheSizeInKb']: + rmcache_size_mb = sds_details[0]['rmcacheSizeInKb'] / 1024 + sds_details[0]['rmcacheSizeInMb'] = int(rmcache_size_mb) + + return sds_details[0] + + except Exception as e: + error_msg = "Failed to get the SDS '%s' with error '%s'"\ + % (sds_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + +def get_powerflex_sds_parameters(): + """This method provide parameter required for the SDS module on + PowerFlex""" + return dict( + sds_name=dict(), + sds_id=dict(), + sds_new_name=dict(), + protection_domain_name=dict(), + protection_domain_id=dict(), + sds_ip_list=dict( + type='list', elements='dict', options=dict( + ip=dict(required=True), + role=dict(required=True, choices=['all', 'sdsOnly', + 'sdcOnly']) + ) + ), + sds_ip_state=dict(choices=['present-in-sds', 'absent-in-sds']), + rfcache_enabled=dict(type='bool'), + rmcache_enabled=dict(type='bool'), + rmcache_size=dict(type='int'), + performance_profile=dict(choices=['Compact', 'HighPerformance']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex SDS object and perform actions on it + based on user input from playbook""" + obj = PowerFlexSDS() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py new file mode 100644 index 00000000..69caea07 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py @@ -0,0 +1,1285 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing Snapshots on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: snapshot +version_added: '1.0.0' +short_description: Manage Snapshots on Dell PowerFlex +description: +- Managing snapshots on PowerFlex Storage System includes + creating, getting details, mapping/unmapping to/from SDC, + modifying the attributes and deleting snapshot. + +author: +- Akash Shendge (@shenda1) + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +options: + snapshot_name: + description: + - The name of the snapshot. + - Mandatory for create operation. + - Specify either I(snapshot_name) or I(snapshot_id) (but not both) for any operation. + type: str + snapshot_id: + description: + - The ID of the Snapshot. + type: str + vol_name: + description: + - The name of the volume for which snapshot will be taken. + - Specify either I(vol_name) or I(vol_id) while creating snapshot. + type: str + vol_id: + description: + - The ID of the volume. + type: str + read_only: + description: + - Specifies whether mapping of the created snapshot volume will have + read-write access or limited to read-only access. + - If C(true), snapshot is created with read-only access. + - If C(false), snapshot is created with read-write access. + type: bool + size: + description: + - The size of the snapshot. + type: int + cap_unit: + description: + - The unit of the volume size. It defaults to C(GB), if not specified. + choices: ['GB' , 'TB'] + type: str + snapshot_new_name: + description: + - New name of the snapshot. Used to rename the snapshot. + type: str + allow_multiple_mappings: + description: + - Specifies whether to allow multiple mappings or not. + type: bool + desired_retention: + description: + - The retention value for the Snapshot. + - If the desired_retention is not mentioned during creation, snapshot + will be created with unlimited retention. + - Maximum supported desired retention is 31 days. + type: int + retention_unit: + description: + - The unit for retention. It defaults to C(hours), if not specified. + choices: [hours, days] + type: str + sdc: + description: + - Specifies SDC parameters. + type: list + elements: dict + suboptions: + sdc_name: + description: + - Name of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + sdc_id: + description: + - ID of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_name) and I(sdc_ip). + type: str + sdc_ip: + description: + - IP of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + access_mode: + description: + - Define the access mode for all mappings of the snapshot. + choices: ['READ_WRITE', 'READ_ONLY', 'NO_ACCESS'] + type: str + bandwidth_limit: + description: + - Limit of snapshot network bandwidth. + - Need to mention in multiple of 1024 Kbps. + - To set no limit, 0 is to be passed. + type: int + iops_limit: + description: + - Limit of snapshot IOPS. + - Minimum IOPS limit is 11 and specify 0 for unlimited iops. + type: int + sdc_state: + description: + - Mapping state of the SDC. + choices: ['mapped', 'unmapped'] + type: str + remove_mode: + description: + - Removal mode for the snapshot. + - It defaults to C(ONLY_ME), if not specified. + choices: ['ONLY_ME', 'INCLUDING_DESCENDANTS'] + type: str + state: + description: + - State of the snapshot. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_snapshot" + vol_name: "ansible_volume" + read_only: False + desired_retention: 2 + state: "present" + +- name: Get snapshot details using snapshot id + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + state: "present" + +- name: Map snapshot to SDC + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + sdc: + - sdc_ip: "198.10.xxx.xxx" + - sdc_id: "663ac0d200000001" + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Modify the attributes of SDC mapped to snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + sdc: + - sdc_ip: "198.10.xxx.xxx" + iops_limit: 11 + bandwidth_limit: 4096 + - sdc_id: "663ac0d200000001" + iops_limit: 20 + bandwidth_limit: 2048 + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Extend the size of snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + size: 16 + state: "present" + +- name: Unmap SDCs from snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + sdc: + - sdc_ip: "198.10.xxx.xxx" + - sdc_id: "663ac0d200000001" + sdc_state: "unmapped" + state: "present" + +- name: Rename snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + snapshot_new_name: "ansible_renamed_snapshot_10" + state: "present" + +- name: Delete snapshot + dellemc.powerflex.snapshot: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "fe6cb28200000007" + remove_mode: "ONLY_ME" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' + +snapshot_details: + description: Details of the snapshot. + returned: When snapshot exists + type: dict + contains: + ancestorVolumeId: + description: The ID of the root of the specified volume's V-Tree. + type: str + ancestorVolumeName: + description: The name of the root of the specified volume's V-Tree. + type: str + creationTime: + description: The creation time of the snapshot. + type: int + id: + description: The ID of the snapshot. + type: str + mappedSdcInfo: + description: The details of the mapped SDC. + type: dict + contains: + sdcId: + description: ID of the SDC. + type: str + sdcName: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + accessMode: + description: Mapping access mode for the specified snapshot. + type: str + limitIops: + description: IOPS limit for the SDC. + type: int + limitBwInMbps: + description: Bandwidth limit for the SDC. + type: int + name: + description: Name of the snapshot. + type: str + secureSnapshotExpTime: + description: Expiry time of the snapshot. + type: int + sizeInKb: + description: Size of the snapshot. + type: int + sizeInGb: + description: Size of the snapshot. + type: int + retentionInHours: + description: Retention of the snapshot in hours. + type: int + storagePoolId: + description: The ID of the Storage pool in which snapshot resides. + type: str + storagePoolName: + description: The name of the Storage pool in which snapshot resides. + type: str + sample: { + "accessModeLimit": "ReadOnly", + "ancestorVolumeId": "cdd883cf00000002", + "ancestorVolumeName": "ansible-volume-1", + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": "22f1e80c00000001", + "creationTime": 1631619229, + "dataLayout": "MediumGranularity", + "id": "cdd883d000000004", + "links": [ + { + "href": "/api/instances/Volume::cdd883d000000004", + "rel": "self" + }, + { + "href": "/api/instances/Volume::cdd883d000000004/relationships + /Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/Volume::cdd883cf00000002", + "rel": "/api/parent/relationship/ancestorVolumeId" + }, + { + "href": "/api/instances/VTree::6e86255c00000001", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": null, + "name": "ansible_vol_snap_1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionInHours": 0, + "retentionLevels": [], + "secureSnapshotExpTime": 0, + "sizeInGb": 16, + "sizeInKb": 16777216, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "storagePoolId": "e0d8f6c900000000", + "storagePoolName": "pool1", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "Snapshot", + "vtreeId": "6e86255c00000001" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils +from datetime import datetime, timedelta +import time +import copy + +LOG = utils.get_logger('snapshot') + + +class PowerFlexSnapshot(object): + """Class with Snapshot operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_snapshot_parameters()) + + mutually_exclusive = [['snapshot_name', 'snapshot_id'], + ['vol_name', 'vol_id'], + ['snapshot_id', 'vol_name'], + ['snapshot_id', 'vol_id']] + + required_together = [['sdc', 'sdc_state']] + + required_one_of = [['snapshot_name', 'snapshot_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_storage_pool(self, storage_pool_id): + """Get storage pool details + :param storage_pool_id: The storage pool id + :return: Storage pool details + """ + + try: + return self.powerflex_conn.storage_pool.get( + filter_fields={'id': storage_pool_id}) + + except Exception as e: + errormsg = "Failed to get the storage pool %s with error " \ + "%s" % (storage_pool_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_snapshot(self, snapshot_name=None, snapshot_id=None): + """Get snapshot details + :param snapshot_name: Name of the snapshot + :param snapshot_id: ID of the snapshot + :return: Details of snapshot if exist. + """ + + id_or_name = snapshot_id if snapshot_id else snapshot_name + + try: + if snapshot_name: + snapshot_details = self.powerflex_conn.volume.get( + filter_fields={'name': snapshot_name}) + else: + snapshot_details = self.powerflex_conn.volume.get( + filter_fields={'id': snapshot_id}) + + if len(snapshot_details) == 0: + msg = "Snapshot with identifier %s is not found" % id_or_name + LOG.error(msg) + return None + + if len(snapshot_details) > 1: + errormsg = "Multiple instances of snapshot " \ + "exist with name {0}".format(snapshot_name) + self.module.fail_json(msg=errormsg) + + # Add ancestor volume name + if 'ancestorVolumeId' in snapshot_details[0] and \ + snapshot_details[0]['ancestorVolumeId']: + vol = self.get_volume( + vol_id=snapshot_details[0]['ancestorVolumeId']) + snapshot_details[0]['ancestorVolumeName'] = vol['name'] + + # Add size in GB + if 'sizeInKb' in snapshot_details[0] and \ + snapshot_details[0]['sizeInKb']: + snapshot_details[0]['sizeInGb'] = utils.get_size_in_gb( + snapshot_details[0]['sizeInKb'], 'KB') + + # Add storage pool name + if 'storagePoolId' in snapshot_details[0] and \ + snapshot_details[0]['storagePoolId']: + sp = self.get_storage_pool(snapshot_details[0]['storagePoolId']) + if len(sp) > 0: + snapshot_details[0]['storagePoolName'] = sp[0]['name'] + + # Add retention in hours + if 'secureSnapshotExpTime' in snapshot_details[0] and\ + 'creationTime' in snapshot_details[0]: + if snapshot_details[0]['secureSnapshotExpTime'] != 0: + expiry_obj = datetime.fromtimestamp( + snapshot_details[0]['secureSnapshotExpTime']) + creation_obj = datetime.fromtimestamp( + snapshot_details[0]['creationTime']) + + td = utils.dateutil.relativedelta.relativedelta( + expiry_obj, creation_obj) + snapshot_details[0]['retentionInHours'] = td.hours + else: + snapshot_details[0]['retentionInHours'] = 0 + + # Match volume details with snapshot details + if any([self.module.params['vol_name'], + self.module.params['vol_id']]): + self.match_vol_details(snapshot_details[0]) + return snapshot_details[0] + except Exception as e: + errormsg = "Failed to get the snapshot %s with error %s" % ( + id_or_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def match_vol_details(self, snapshot): + """Match the given volume details with the response + :param snapshot: The snapshot details + """ + vol_name = self.module.params['vol_name'] + vol_id = self.module.params['vol_id'] + + try: + if vol_name and vol_name != snapshot['ancestorVolumeName']: + errormsg = "Given volume name do not match with the " \ + "corresponding snapshot details." + self.module.fail_json(msg=errormsg) + + if vol_id and vol_id != snapshot['ancestorVolumeId']: + errormsg = "Given volume ID do not match with the " \ + "corresponding snapshot details." + self.module.fail_json(msg=errormsg) + except Exception as e: + errormsg = "Failed to match volume details with the snapshot " \ + "with error %s" % str(e) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_volume(self, vol_name=None, vol_id=None): + """Get the volume id + :param vol_name: The name of the volume + :param vol_id: The ID of the volume + :return: The volume details + """ + + try: + if vol_name: + vol_details = self.powerflex_conn.volume.get( + filter_fields={'name': vol_name}) + else: + vol_details = self.powerflex_conn.volume.get( + filter_fields={'id': vol_id}) + + if len(vol_details) == 0: + error_msg = "Unable to find volume with name {0}".format( + vol_name) + self.module.fail_json(msg=error_msg) + return vol_details[0] + except Exception as e: + errormsg = "Failed to get the volume %s with error " \ + "%s" % (vol_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_sdc_id(self, sdc_name=None, sdc_ip=None, sdc_id=None): + """Get the SDC ID + :param sdc_name: The name of the SDC + :param sdc_ip: The IP of the SDC + :param sdc_id: The ID of the SDC + :return: The ID of the SDC + """ + + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + try: + if sdc_name: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'name': sdc_name}) + elif sdc_ip: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'sdcIp': sdc_ip}) + else: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'id': sdc_id}) + + if len(sdc_details) == 0: + error_msg = "Unable to find SDC with identifier {0}".format( + id_ip_name) + self.module.fail_json(msg=error_msg) + return sdc_details[0]['id'] + except Exception as e: + errormsg = "Failed to get the SDC %s with error " \ + "%s" % (id_ip_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_system_id(self): + """Get system id""" + + try: + resp = self.powerflex_conn.system.get() + + if len(resp) == 0: + self.module.fail_json(msg="No system exist on the given host.") + + if len(resp) > 1: + self.module.fail_json(msg="Multiple systems exist on the " + "given host.") + return resp[0]['id'] + except Exception as e: + msg = "Failed to get system id with error %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def create_snapshot(self, snapshot_name, vol_id, system_id, + access_mode, retention): + """Create snapshot + :param snapshot_name: The name of the snapshot + :param vol_id: The ID of the source volume + :param system_id: The system id + :param access_mode: Access mode for the snapshot + :param retention: The retention for the snapshot + :return: Boolean indicating if create operation is successful + """ + LOG.debug("Creating Snapshot") + + try: + self.powerflex_conn.system.snapshot_volumes( + system_id=system_id, + snapshot_defs=[utils.SnapshotDef(vol_id, snapshot_name)], + access_mode=access_mode, + retention_period=retention + ) + + return True + except Exception as e: + errormsg = "Create snapshot %s operation failed with " \ + "error %s" % (snapshot_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_retention(self, snapshot_id, new_retention): + """Modify snapshot retention + :param snapshot_id: The snapshot id + :param new_retention: Desired retention of the snapshot + :return: Boolean indicating if modifying retention is successful + """ + + try: + self.powerflex_conn.volume.set_retention_period(snapshot_id, + new_retention) + return True + except Exception as e: + errormsg = "Modify retention of snapshot %s operation failed " \ + "with error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_size(self, snapshot_id, new_size): + """Modify snapshot size + :param snapshot_id: The snapshot id + :param new_size: Size of the snapshot + :return: Boolean indicating if extend operation is successful + """ + + try: + self.powerflex_conn.volume.extend(snapshot_id, new_size) + return True + except Exception as e: + errormsg = "Extend snapshot %s operation failed with " \ + "error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_snap_access_mode(self, snapshot_id, snap_access_mode): + """Modify access mode of snapshot + :param snapshot_id: The snapshot id + :param snap_access_mode: Access mode of the snapshot + :return: Boolean indicating if modifying access mode of + snapshot is successful + """ + + try: + self.powerflex_conn.volume.set_volume_access_mode_limit( + volume_id=snapshot_id, access_mode_limit=snap_access_mode) + return True + except Exception as e: + errormsg = "Modify access mode of snapshot %s operation " \ + "failed with error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_access_mode(self, snapshot_id, access_mode_list): + """Modify access mode of SDCs mapped to snapshot + :param snapshot_id: The snapshot id + :param access_mode_list: List containing SDC ID's whose access mode + is to modified + :return: Boolean indicating if modifying access mode is successful + """ + + try: + changed = False + for temp in access_mode_list: + if temp['accessMode']: + self.powerflex_conn.volume.set_access_mode_for_sdc( + volume_id=snapshot_id, sdc_id=temp['sdc_id'], + access_mode=temp['accessMode']) + changed = True + return changed + except Exception as e: + errormsg = "Modify access mode of SDC %s operation failed " \ + "with error %s" % (temp['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_limits(self, payload): + """Modify IOPS and bandwidth limits of SDC's mapped to snapshot + :param snapshot_id: The snapshot id + :param limits_dict: Dict containing SDC ID's whose bandwidth and + IOPS is to modified + :return: Boolean indicating if modifying limits is successful + """ + + try: + changed = False + if payload['bandwidth_limit'] is not None or \ + payload['iops_limit'] is not None: + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed + except Exception as e: + errormsg = "Modify bandwidth/iops limits of SDC %s operation " \ + "failed with error %s" % (payload['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def rename_snapshot(self, snapshot_id, new_name): + """Rename snapshot + :param snapshot_id: The snapshot id + :param new_name: The new name of the snapshot + :return: Boolean indicating if rename operation is successful + """ + + try: + self.powerflex_conn.volume.rename(snapshot_id, new_name) + return True + except Exception as e: + errormsg = "Rename snapshot %s operation failed with " \ + "error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_snapshot(self, snapshot_id, remove_mode): + """Delete snapshot + :param snapshot_id: The snapshot id + :param remove_mode: Removal mode for the snapshot + :return: Boolean indicating if delete operation is successful + """ + + try: + self.powerflex_conn.volume.delete(snapshot_id, remove_mode) + return True + except Exception as e: + errormsg = "Delete snapshot %s operation failed with " \ + "error %s" % (snapshot_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_desired_retention(self, desired_retention, retention_unit): + """Validates the specified desired retention. + :param desired_retention: Desired retention of the snapshot + :param retention_unit: Retention unit for snapshot + """ + + if retention_unit == 'hours' and (desired_retention < 1 or + desired_retention > 744): + self.module.fail_json(msg="Please provide a valid integer as the" + " desired retention between 1 and 744.") + elif retention_unit == 'days' and (desired_retention < 1 or + desired_retention > 31): + self.module.fail_json(msg="Please provide a valid integer as the" + " desired retention between 1 and 31.") + + def unmap_snapshot_from_sdc(self, snapshot, sdc): + """Unmap SDC's from snapshot + :param snapshot: Snapshot details + :param sdc: List of SDCs to be unmapped + :return: Boolean indicating if unmap operation is successful + """ + + current_sdcs = snapshot['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id in current_sdc_ids: + sdc_id_list.append(sdc_id) + + LOG.info("SDC IDs to remove %s", sdc_id_list) + + if len(sdc_id_list) == 0: + return False + + try: + for sdc_id in sdc_id_list: + self.powerflex_conn.volume.remove_mapped_sdc( + snapshot['id'], sdc_id) + return True + except Exception as e: + errormsg = "Unmap SDC %s from snapshot %s failed with error " \ + "%s" % (sdc_id, snapshot['id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def map_snapshot_to_sdc(self, snapshot, sdc): + """Map SDC's to snapshot + :param snapshot: Snapshot details + :param sdc: List of SDCs + :return: Boolean indicating if mapping operation is successful + """ + + current_sdcs = snapshot['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + sdc_map_list = [] + sdc_modify_list1 = [] + sdc_modify_list2 = [] + + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id not in current_sdc_ids: + sdc_id_list.append(sdc_id) + temp['sdc_id'] = sdc_id + if 'access_mode' in temp: + temp['access_mode'] = get_access_mode(temp['access_mode']) + if 'bandwidth_limit' not in temp: + temp['bandwidth_limit'] = None + if 'iops_limit' not in temp: + temp['iops_limit'] = None + sdc_map_list.append(temp) + else: + access_mode_dict, limits_dict = check_for_sdc_modification( + snapshot, sdc_id, temp) + if access_mode_dict: + sdc_modify_list1.append(access_mode_dict) + if limits_dict: + sdc_modify_list2.append(limits_dict) + + LOG.info("SDC to add: %s", sdc_map_list) + + if not sdc_map_list: + return False, sdc_modify_list1, sdc_modify_list2 + + try: + changed = False + for sdc in sdc_map_list: + payload = { + "volume_id": snapshot['id'], + "sdc_id": sdc['sdc_id'], + "access_mode": sdc['access_mode'], + "allow_multiple_mappings": self.module.params['allow_multiple_mappings'] + } + self.powerflex_conn.volume.add_mapped_sdc(**payload) + + if sdc['bandwidth_limit'] or sdc['iops_limit']: + payload = { + "volume_id": snapshot['id'], + "sdc_id": sdc['sdc_id'], + "bandwidth_limit": sdc['bandwidth_limit'], + "iops_limit": sdc['iops_limit'] + } + + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed, sdc_modify_list1, sdc_modify_list2 + + except Exception as e: + errormsg = "Mapping snapshot %s to SDC %s " \ + "failed with error %s" % (snapshot['name'], + sdc['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_parameters(self): + """Validate the input parameters""" + + sdc = self.module.params['sdc'] + cap_unit = self.module.params['cap_unit'] + size = self.module.params['size'] + desired_retention = self.module.params['desired_retention'] + retention_unit = self.module.params['retention_unit'] + + param_list = ['snapshot_name', 'snapshot_id', 'vol_name', 'vol_id'] + for param in param_list: + if self.module.params[param] is not None and \ + len(self.module.params[param].strip()) == 0: + error_msg = "Please provide valid %s" % param + self.module.fail_json(msg=error_msg) + + if sdc: + for temp in sdc: + if (all([temp['sdc_id'], temp['sdc_ip']]) or + all([temp['sdc_id'], temp['sdc_name']]) or + all([temp['sdc_ip'], temp['sdc_name']])): + self.module.fail_json(msg="sdc_id, sdc_ip and sdc_name " + "are mutually exclusive") + + if (cap_unit is not None) and not size: + self.module.fail_json(msg="cap_unit can be specified along " + "with size") + + if (retention_unit is not None) and not desired_retention: + self.module.fail_json(msg="retention_unit can be specified along " + "with desired_retention") + + def perform_module_operation(self): + """ + Perform different actions on snapshot based on parameters passed in + the playbook + """ + snapshot_name = self.module.params['snapshot_name'] + snapshot_id = self.module.params['snapshot_id'] + vol_name = self.module.params['vol_name'] + vol_id = self.module.params['vol_id'] + read_only = self.module.params['read_only'] + size = self.module.params['size'] + cap_unit = self.module.params['cap_unit'] + snapshot_new_name = self.module.params['snapshot_new_name'] + sdc = copy.deepcopy(self.module.params['sdc']) + sdc_state = self.module.params['sdc_state'] + desired_retention = self.module.params['desired_retention'] + retention_unit = self.module.params['retention_unit'] + remove_mode = self.module.params['remove_mode'] + state = self.module.params['state'] + + # result is a dictionary to contain end state and snapshot details + changed = False + is_modified = False + result = dict( + changed=False, + snapshot_details={} + ) + + self.validate_parameters() + + if size and not cap_unit: + cap_unit = 'GB' + + if desired_retention and not retention_unit: + retention_unit = 'hours' + + if desired_retention is not None: + self.validate_desired_retention(desired_retention, retention_unit) + + snapshot_details = self.get_snapshot(snapshot_name, snapshot_id) + + if snapshot_details: + snap_access_mode = None + if read_only is not None: + if read_only: + snap_access_mode = 'ReadOnly' + else: + snap_access_mode = 'ReadWrite' + is_modified, flag1, flag2, flag3 = check_snapshot_modified( + snapshot_details, desired_retention, retention_unit, size, + cap_unit, snap_access_mode) + + if state == 'present' and not snapshot_details: + if snapshot_id: + self.module.fail_json(msg="Creation of snapshot is allowed " + "using snapshot_name only, " + "snapshot_id given.") + + if snapshot_name is None or len(snapshot_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid snapshot " + "name.") + + if vol_name is None and vol_id is None: + self.module.fail_json(msg="Please provide volume details to " + "create new snapshot") + + if snapshot_new_name is not None: + self.module.fail_json(msg="snapshot_new_name is not required" + " while creating snapshot") + + if remove_mode: + self.module.fail_json(msg="remove_mode is not required while " + "creating snapshot") + + if vol_name: + vol = self.get_volume(vol_name=vol_name) + vol_id = vol['id'] + + retention = 0 + if desired_retention: + retention = calculate_retention(desired_retention, + retention_unit) + + system_id = self.get_system_id() + if read_only: + access_mode = 'ReadOnly' + else: + access_mode = 'ReadWrite' + + changed = self.create_snapshot(snapshot_name, vol_id, system_id, + access_mode, retention) + if changed: + snapshot_details = self.get_snapshot(snapshot_name) + + if size: + if cap_unit == 'GB': + new_size = size * 1024 * 1024 + else: + new_size = size * 1024 * 1024 * 1024 + + if new_size != snapshot_details['sizeInKb']: + if cap_unit == 'TB': + size = size * 1024 + changed = self.modify_size(snapshot_details['id'], size) + + if is_modified: + if flag1: + retention = calculate_retention(desired_retention, + retention_unit) + changed = self.modify_retention(snapshot_details['id'], + retention) + + if flag2: + new_size = size + if cap_unit == 'TB': + new_size = size * 1024 + changed = self.modify_size(snapshot_details['id'], new_size) + + if flag3: + changed = self.modify_snap_access_mode( + snapshot_details['id'], snap_access_mode) + + if state == 'present' and snapshot_details and sdc and \ + sdc_state == 'mapped': + + changed_mode = False + changed_limits = False + + changed, access_mode_list, limits_list = \ + self.map_snapshot_to_sdc(snapshot_details, sdc) + + if len(access_mode_list) > 0: + changed_mode = self.modify_access_mode( + snapshot_details['id'], access_mode_list) + + if len(limits_list) > 0: + for temp in limits_list: + payload = { + "volume_id": snapshot_details['id'], + "sdc_id": temp['sdc_id'], + "bandwidth_limit": temp['bandwidth_limit'], + "iops_limit": temp['iops_limit'] + } + changed_limits = self.modify_limits(payload) + + if changed_mode or changed_limits: + changed = True + + if state == 'present' and snapshot_details and sdc and \ + sdc_state == 'unmapped': + changed = self.unmap_snapshot_from_sdc(snapshot_details, sdc) + + if state == 'present' and snapshot_details and \ + snapshot_new_name is not None: + if len(snapshot_new_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid snapshot " + "name.") + changed = self.rename_snapshot(snapshot_details['id'], + snapshot_new_name) + if changed: + snapshot_name = snapshot_new_name + + if state == 'absent' and snapshot_details: + if remove_mode is None: + remove_mode = "ONLY_ME" + changed = self.delete_snapshot(snapshot_details['id'], remove_mode) + + if state == 'present': + snapshot_details = self.get_snapshot(snapshot_name, snapshot_id) + result['snapshot_details'] = snapshot_details + result['changed'] = changed + self.module.exit_json(**result) + + +def check_snapshot_modified(snapshot=None, desired_retention=None, + retention_unit=None, size=None, cap_unit=None, + access_mode=None): + """Check if snapshot modification is required + :param snapshot: Snapshot details + :param desired_retention: Desired retention of the snapshot + :param retention_unit: Retention unit for snapshot + :param size: Size of the snapshot + :param cap_unit: Capacity unit for the snapshot + :param access_mode: Access mode of the snapshot + :return: Boolean indicating if modification is needed + """ + + snap_creation_timestamp = None + expiration_timestamp = None + is_timestamp_modified = False + is_size_modified = False + is_access_modified = False + is_modified = False + + if 'creationTime' in snapshot: + snap_creation_timestamp = snapshot['creationTime'] + + if desired_retention: + if retention_unit == 'hours': + expiration_timestamp = \ + datetime.fromtimestamp(snap_creation_timestamp) + \ + timedelta(hours=desired_retention) + expiration_timestamp = time.mktime(expiration_timestamp.timetuple()) + else: + expiration_timestamp = \ + datetime.fromtimestamp(snap_creation_timestamp) + \ + timedelta(days=desired_retention) + expiration_timestamp = time.mktime(expiration_timestamp.timetuple()) + + if 'secureSnapshotExpTime' in snapshot and expiration_timestamp and \ + snapshot['secureSnapshotExpTime'] != expiration_timestamp: + existing_timestamp = snapshot['secureSnapshotExpTime'] + new_timestamp = expiration_timestamp + + info_message = 'The existing timestamp is: %s and the new ' \ + 'timestamp is: %s' % (existing_timestamp, + new_timestamp) + LOG.info(info_message) + + existing_time_obj = datetime.fromtimestamp(existing_timestamp) + new_time_obj = datetime.fromtimestamp(new_timestamp) + + if existing_time_obj > new_time_obj: + td = utils.dateutil.relativedelta.relativedelta( + existing_time_obj, new_time_obj) + else: + td = utils.dateutil.relativedelta.relativedelta( + new_time_obj, existing_time_obj) + + LOG.info("Time difference: %s", td.minutes) + + # A delta of two minutes is treated as idempotent + if td.seconds > 120 or td.minutes > 2: + is_timestamp_modified = True + + if size: + if cap_unit == 'GB': + new_size = size * 1024 * 1024 + else: + new_size = size * 1024 * 1024 * 1024 + + if new_size != snapshot['sizeInKb']: + is_size_modified = True + + if access_mode and snapshot['accessModeLimit'] != access_mode: + is_access_modified = True + + if is_timestamp_modified or is_size_modified or is_access_modified: + is_modified = True + return is_modified, is_timestamp_modified, is_size_modified, is_access_modified + + +def calculate_retention(desired_retention=None, retention_unit=None): + """ + :param desired_retention: Desired retention of the snapshot + :param retention_unit: Retention unit for snapshot + :return: Retention in minutes + """ + + retention = 0 + if retention_unit == 'days': + retention = desired_retention * 24 * 60 + else: + retention = desired_retention * 60 + return retention + + +def check_for_sdc_modification(snapshot, sdc_id, sdc_details): + """ + :param snapshot: The snapshot details + :param sdc_id: The ID of the SDC + :param sdc_details: The details of SDC + :return: Dictionary with SDC attributes to be modified + """ + access_mode_dict = dict() + limits_dict = dict() + + for sdc in snapshot['mappedSdcInfo']: + if sdc['sdcId'] == sdc_id: + if sdc['accessMode'] != get_access_mode(sdc_details['access_mode']): + access_mode_dict['sdc_id'] = sdc_id + access_mode_dict['accessMode'] = get_access_mode( + sdc_details['access_mode']) + if sdc['limitIops'] != sdc_details['iops_limit'] or \ + sdc['limitBwInMbps'] != sdc_details['bandwidth_limit']: + limits_dict['sdc_id'] = sdc_id + limits_dict['iops_limit'] = None + limits_dict['bandwidth_limit'] = None + if sdc['limitIops'] != sdc_details['iops_limit']: + limits_dict['iops_limit'] = sdc_details['iops_limit'] + if sdc['limitBwInMbps'] != get_limits_in_mb(sdc_details['bandwidth_limit']): + limits_dict['bandwidth_limit'] = \ + sdc_details['bandwidth_limit'] + break + return access_mode_dict, limits_dict + + +def get_limits_in_mb(limits): + """ + :param limits: Limits in KB + :return: Limits in MB + """ + + if limits: + return limits / 1024 + + +def get_access_mode(access_mode): + """ + :param access_mode: Access mode of the SDC + :return: The enum for the access mode + """ + + access_mode_dict = { + "READ_WRITE": "ReadWrite", + "READ_ONLY": "ReadOnly", + "NO_ACCESS": "NoAccess" + } + return access_mode_dict.get(access_mode) + + +def get_powerflex_snapshot_parameters(): + """This method provide parameter required for the Ansible snapshot + module on PowerFlex""" + return dict( + snapshot_name=dict(), snapshot_id=dict(), + vol_name=dict(), vol_id=dict(), + read_only=dict(required=False, type='bool'), + size=dict(required=False, type='int'), + cap_unit=dict(choices=['GB', 'TB']), + snapshot_new_name=dict(), + allow_multiple_mappings=dict(required=False, type='bool'), + sdc=dict( + type='list', elements='dict', options=dict( + sdc_id=dict(), sdc_ip=dict(), + sdc_name=dict(), + access_mode=dict(choices=['READ_WRITE', 'READ_ONLY', + 'NO_ACCESS']), + bandwidth_limit=dict(type='int'), + iops_limit=dict(type='int') + ) + ), + desired_retention=dict(type='int'), + retention_unit=dict(choices=['hours', 'days']), + remove_mode=dict(choices=['ONLY_ME', 'INCLUDING_DESCENDANTS']), + sdc_state=dict(choices=['mapped', 'unmapped']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex Snapshot object and perform actions on it + based on user input from playbook""" + obj = PowerFlexSnapshot() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py new file mode 100644 index 00000000..ca343212 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py @@ -0,0 +1,914 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing Dell Technologies (Dell) PowerFlex storage pool""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: storagepool + +version_added: '1.0.0' + +short_description: Managing Dell PowerFlex storage pool + +description: +- Dell PowerFlex storage pool module includes getting the details of + storage pool, creating a new storage pool, and modifying the attribute of + a storage pool. + +extends_documentation_fragment: + - dellemc.powerflex.powerflex + +author: +- Arindam Datta (@dattaarindam) +- P Srinivas Rao (@srinivas-rao5) + +options: + storage_pool_name: + description: + - The name of the storage pool. + - If more than one storage pool is found with the same name then + protection domain id/name is required to perform the task. + - Mutually exclusive with I(storage_pool_id). + type: str + storage_pool_id: + description: + - The id of the storage pool. + - It is auto generated, hence should not be provided during + creation of a storage pool. + - Mutually exclusive with I(storage_pool_name). + type: str + protection_domain_name: + description: + - The name of the protection domain. + - During creation of a pool, either protection domain name or id must be + mentioned. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The id of the protection domain. + - During creation of a pool, either protection domain name or id must + be mentioned. + - Mutually exclusive with I(protection_domain_name). + type: str + media_type: + description: + - Type of devices in the storage pool. + type: str + choices: ['HDD', 'SSD', 'TRANSITIONAL'] + storage_pool_new_name: + description: + - New name for the storage pool can be provided. + - This parameter is used for renaming the storage pool. + type: str + use_rfcache: + description: + - Enable/Disable RFcache on a specific storage pool. + type: bool + use_rmcache: + description: + - Enable/Disable RMcache on a specific storage pool. + type: bool + state: + description: + - State of the storage pool. + type: str + choices: ["present", "absent"] + required: true +notes: + - TRANSITIONAL media type is supported only during modification. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' + +- name: Get the details of storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "sample_pool_name" + protection_domain_name: "sample_protection_domain" + state: "present" + +- name: Get the details of storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + state: "present" + +- name: Create a new storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + media_type: "HDD" + state: "present" + +- name: Modify a storage pool by name + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_name: "ansible_test_pool" + protection_domain_id: "1c957da800000000" + use_rmcache: True + use_rfcache: True + state: "present" + +- name: Rename storage pool by id + dellemc.powerflex.storagepool: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + storage_pool_id: "abcd1234ab12r" + storage_pool_new_name: "new_ansible_pool" + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +storage_pool_details: + description: Details of the storage pool. + returned: When storage pool exists + type: dict + contains: + mediaType: + description: Type of devices in the storage pool. + type: str + useRfcache: + description: Enable/Disable RFcache on a specific storage pool. + type: bool + useRmcache: + description: Enable/Disable RMcache on a specific storage pool. + type: bool + id: + description: ID of the storage pool under protection domain. + type: str + name: + description: Name of the storage pool under protection domain. + type: str + protectionDomainId: + description: ID of the protection domain in which pool resides. + type: str + protectionDomainName: + description: Name of the protection domain in which pool resides. + type: str + "statistics": + description: Statistics details of the storage pool. + type: dict + contains: + "capacityInUseInKb": + description: Total capacity of the storage pool. + type: str + "unusedCapacityInKb": + description: Unused capacity of the storage pool. + type: str + "deviceIds": + description: Device Ids of the storage pool. + type: list + sample: { + "addressSpaceUsage": "Normal", + "addressSpaceUsageType": "DeviceCapacityLimit", + "backgroundScannerBWLimitKBps": 3072, + "backgroundScannerMode": "DataComparison", + "bgScannerCompareErrorAction": "ReportAndFix", + "bgScannerReadErrorAction": "ReportAndFix", + "capacityAlertCriticalThreshold": 90, + "capacityAlertHighThreshold": 80, + "capacityUsageState": "Normal", + "capacityUsageType": "NetCapacity", + "checksumEnabled": false, + "compressionMethod": "Invalid", + "dataLayout": "MediumGranularity", + "externalAccelerationType": "None", + "fglAccpId": null, + "fglExtraCapacity": null, + "fglMaxCompressionRatio": null, + "fglMetadataSizeXx100": null, + "fglNvdimmMetadataAmortizationX100": null, + "fglNvdimmWriteCacheSizeInMb": null, + "fglOverProvisioningFactor": null, + "fglPerfProfile": null, + "fglWriteAtomicitySize": null, + "fragmentationEnabled": true, + "id": "e0d8f6c900000000", + "links": [ + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "self" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Statistics", + "rel": "/api/StoragePool/relationship/Statistics" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/SpSds", + "rel": "/api/StoragePool/relationship/SpSds" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Volume", + "rel": "/api/StoragePool/relationship/Volume" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Device", + "rel": "/api/StoragePool/relationship/Device" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/VTree", + "rel": "/api/StoragePool/relationship/VTree" + }, + { + "href": "/api/instances/ProtectionDomain::9300c1f900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "statistics": { + "BackgroundScannedInMB": 3466920, + "activeBckRebuildCapacityInKb": 0, + "activeEnterProtectedMaintenanceModeCapacityInKb": 0, + "aggregateCompressionLevel": "Uncompressed", + "atRestCapacityInKb": 1248256, + "backgroundScanCompareErrorCount": 0, + "backgroundScanFixedCompareErrorCount": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 369098752, + "capacityInUseInKb": 2496512, + "capacityInUseNoOverheadInKb": 2496512, + "capacityLimitInKb": 845783040, + "compressedDataCompressionRatio": 0.0, + "compressionRatio": 1.0, + "currentFglMigrationSizeInKb": 0, + "deviceIds": [ + ], + "enterProtectedMaintenanceModeCapacityInKb": 0, + "enterProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "enterProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exposedCapacityInKb": 0, + "failedCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 184549376, + "inaccessibleCapacityInKb": 0, + "logWrittenBlocksInKb": 0, + "maxCapacityInKb": 845783040, + "migratingVolumeIds": [ + ], + "migratingVtreeIds": [ + ], + "movingCapacityInKb": 0, + "netCapacityInUseInKb": 1248256, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDeviceAtFaultRebuilds": 0, + "numOfDevices": 3, + "numOfIncomingVtreeMigrations": 0, + "numOfVolumes": 8, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 8, + "overallUsageRatio": 73.92289, + "pendingBckRebuildCapacityInKb": 0, + "pendingEnterProtectedMaintenanceModeCapacityInKb": 0, + "pendingExitProtectedMaintenanceModeCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "persistentChecksumBuilderProgress": 100.0, + "persistentChecksumCapacityInKb": 414720, + "primaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 92274688, + "primaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "protectedCapacityInKb": 2496512, + "protectedVacInKb": 184549376, + "provisionedAddressesInKb": 2496512, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rmPendingAllocatedInKb": 0, + "rmPendingThickInKb": 0, + "rplJournalCapAllowed": 0, + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 92274688, + "secondaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 0, + "snapCapacityInUseOccupiedInKb": 0, + "snapshotCapacityInKb": 0, + "spSdsIds": [ + "abdfe71b00030001", + "abdce71d00040001", + "abdde71e00050001" + ], + "spareCapacityInKb": 84578304, + "targetOtherLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "tempCapacityInKb": 0, + "tempCapacityVacInKb": 0, + "thickCapacityInUseInKb": 0, + "thinAndSnapshotRatio": 73.92289, + "thinCapacityAllocatedInKm": 184549376, + "thinCapacityInUseInKb": 0, + "thinUserDataCapacityInKb": 2496512, + "totalFglMigrationSizeInKb": 0, + "totalReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "totalWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "trimmedUserDataCapacityInKb": 0, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 758708224, + "userDataCapacityInKb": 2496512, + "userDataCapacityNoTrimInKb": 2496512, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volumeAddressSpaceInKb": 922XXXXX, + "volumeAllocationLimitInKb": 3707XXXXX, + "volumeIds": [ + "456afc7900XXXXXXXX" + ], + "vtreeAddresSpaceInKb": 92274688, + "vtreeIds": [ + "32b1681bXXXXXXXX", + ] + }, + "mediaType": "HDD", + "name": "pool1", + "numOfParallelRebuildRebalanceJobsPerDevice": 2, + "persistentChecksumBuilderLimitKb": 3072, + "persistentChecksumEnabled": true, + "persistentChecksumState": "Protected", + "persistentChecksumValidateOnRead": false, + "protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps": null, + "protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold": null, + "protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps": 10240, + "protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice": 1, + "protectedMaintenanceModeIoPriorityPolicy": "limitNumOfConcurrentIos", + "protectedMaintenanceModeIoPriorityQuietPeriodInMsec": null, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "rebalanceEnabled": true, + "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, + "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebalanceIoPriorityPolicy": "favorAppIos", + "rebalanceIoPriorityQuietPeriodInMsec": null, + "rebuildEnabled": true, + "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebuildIoPriorityAppIopsPerDeviceThreshold": null, + "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", + "rebuildIoPriorityQuietPeriodInMsec": null, + "replicationCapacityMaxRatio": 32, + "rmcacheWriteHandlingMode": "Cached", + "sparePercentage": 10, + "useRfcache": false, + "useRmcache": false, + "vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps": null, + "vtreeMigrationIoPriorityAppIopsPerDeviceThreshold": null, + "vtreeMigrationIoPriorityBwLimitPerDeviceInKbps": 10240, + "vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice": 1, + "vtreeMigrationIoPriorityPolicy": "favorAppIos", + "vtreeMigrationIoPriorityQuietPeriodInMsec": null, + "zeroPaddingEnabled": true + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils + +LOG = utils.get_logger('storagepool') + + +class PowerFlexStoragePool(object): + """Class with StoragePool operations""" + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_storagepool_parameters()) + + """ initialize the ansible module """ + mut_ex_args = [['storage_pool_name', 'storage_pool_id'], + ['protection_domain_name', 'protection_domain_id'], + ['storage_pool_id', 'protection_domain_name'], + ['storage_pool_id', 'protection_domain_id']] + + required_one_of_args = [['storage_pool_name', 'storage_pool_id']] + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info('Got the PowerFlex system connection object instance') + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + filter_fields = {} + if protection_domain_id: + filter_fields = {'id': protection_domain_id} + if protection_domain_name: + filter_fields = {'name': protection_domain_name} + + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields=filter_fields) + if pd_details: + return pd_details[0] + + if not pd_details: + err_msg = "Unable to find the protection domain with {0}. " \ + "Please enter a valid protection domain" \ + " name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + + except Exception as e: + errormsg = "Failed to get the protection domain {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, + pd_id=None): + """Get storage pool details + :param pd_id: ID of the protection domain + :param storage_pool_name: The name of the storage pool + :param storage_pool_id: The storage pool id + :return: Storage pool details + """ + name_or_id = storage_pool_id if storage_pool_id \ + else storage_pool_name + try: + filter_fields = {} + if storage_pool_id: + filter_fields = {'id': storage_pool_id} + if storage_pool_name: + filter_fields.update({'name': storage_pool_name}) + if pd_id: + filter_fields.update({'protectionDomainId': pd_id}) + pool_details = self.powerflex_conn.storage_pool.get( + filter_fields=filter_fields) + if pool_details: + if len(pool_details) > 1: + + err_msg = "More than one storage pool found with {0}," \ + " Please provide protection domain Name/Id" \ + " to fetch the unique" \ + " storage pool".format(storage_pool_name) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + elif len(pool_details) == 1: + pool_details = pool_details[0] + statistics = self.powerflex_conn.storage_pool.get_statistics(pool_details['id']) + pool_details['statistics'] = statistics if statistics else {} + pd_id = pool_details['protectionDomainId'] + pd_name = self.get_protection_domain( + protection_domain_id=pd_id)['name'] + # adding protection domain name in the pool details + pool_details['protectionDomainName'] = pd_name + else: + pool_details = None + + return pool_details + + except Exception as e: + errormsg = "Failed to get the storage pool {0} with error " \ + "{1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_storage_pool(self, pool_name, pd_id, media_type, + use_rfcache=None, use_rmcache=None): + """ + Create a storage pool + :param pool_name: Name of the storage pool + :param pd_id: ID of the storage pool + :param media_type: Type of storage device in the pool + :param use_rfcache: Enable/Disable RFcache on pool + :param use_rmcache: Enable/Disable RMcache on pool + :return: True, if the operation is successful + """ + try: + if media_type == "Transitional": + self.module.fail_json(msg="TRANSITIONAL media type is not" + " supported during creation." + " Please enter a valid media type") + + if pd_id is None: + self.module.fail_json( + msg="Please provide protection domain details for " + "creation of a storage pool") + self.powerflex_conn.storage_pool.create( + media_type=media_type, + protection_domain_id=pd_id, name=pool_name, + use_rfcache=use_rfcache, use_rmcache=use_rmcache) + + return True + except Exception as e: + errormsg = "Failed to create the storage pool {0} with error " \ + "{1}".format(pool_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_storage_pool(self, pool_id, modify_dict): + """ + Modify the parameters of the storage pool. + :param modify_dict: Dict containing parameters which are to be + modified + :param pool_id: Id of the pool. + :return: True, if the operation is successful. + """ + + try: + + if 'new_name' in modify_dict: + self.powerflex_conn.storage_pool.rename( + pool_id, modify_dict['new_name']) + if 'use_rmcache' in modify_dict: + self.powerflex_conn.storage_pool.set_use_rmcache( + pool_id, modify_dict['use_rmcache']) + if 'use_rfcache' in modify_dict: + self.powerflex_conn.storage_pool.set_use_rfcache( + pool_id, modify_dict['use_rfcache']) + if 'media_type' in modify_dict: + self.powerflex_conn.storage_pool.set_media_type( + pool_id, modify_dict['media_type']) + return True + + except Exception as e: + err_msg = "Failed to update the storage pool {0} with error " \ + "{1}".format(pool_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def verify_params(self, pool_details, pd_name, pd_id): + """ + :param pool_details: Details of the storage pool + :param pd_name: Name of the protection domain + :param pd_id: Id of the protection domain + """ + if pd_id and pd_id != pool_details['protectionDomainId']: + self.module.fail_json(msg="Entered protection domain id does not" + " match with the storage pool's " + "protection domain id. Please enter " + "a correct protection domain id.") + + if pd_name and pd_name != pool_details['protectionDomainName']: + self.module.fail_json(msg="Entered protection domain name does" + " not match with the storage pool's " + "protection domain name. Please enter" + " a correct protection domain name.") + + def perform_module_operation(self): + """ Perform different actions on Storage Pool based on user input + in the playbook """ + + pool_name = self.module.params['storage_pool_name'] + pool_id = self.module.params['storage_pool_id'] + pool_new_name = self.module.params['storage_pool_new_name'] + state = self.module.params['state'] + pd_name = self.module.params['protection_domain_name'] + pd_id = self.module.params['protection_domain_id'] + use_rmcache = self.module.params['use_rmcache'] + use_rfcache = self.module.params['use_rfcache'] + media_type = self.module.params['media_type'] + if media_type == "TRANSITIONAL": + media_type = 'Transitional' + + result = dict( + storage_pool_details={} + ) + changed = False + pd_details = None + if pd_name or pd_id: + pd_details = self.get_protection_domain( + protection_domain_id=pd_id, + protection_domain_name=pd_name) + if pd_details: + pd_id = pd_details['id'] + + if pool_name is not None and (len(pool_name.strip()) == 0): + self.module.fail_json( + msg="Empty or white spaced string provided in " + "storage_pool_name. Please provide valid storage" + " pool name.") + + # Get the details of the storage pool. + pool_details = self.get_storage_pool(storage_pool_id=pool_id, + storage_pool_name=pool_name, + pd_id=pd_id) + if pool_name and pool_details: + pool_id = pool_details['id'] + self.verify_params(pool_details, pd_name, pd_id) + + # create a storage pool + if state == 'present' and not pool_details: + LOG.info("Creating new storage pool") + if pool_id: + self.module.fail_json( + msg="storage_pool_name is missing & name required to " + "create a storage pool. Please enter a valid " + "storage_pool_name.") + if pool_new_name is not None: + self.module.fail_json( + msg="storage_pool_new_name is passed during creation. " + "storage_pool_new_name is not allowed during " + "creation of a storage pool.") + changed = self.create_storage_pool( + pool_name, pd_id, media_type, use_rfcache, use_rmcache) + if changed: + pool_id = self.get_storage_pool(storage_pool_id=pool_id, + storage_pool_name=pool_name, + pd_id=pd_id)['id'] + + # modify the storage pool parameters + if state == 'present' and pool_details: + # check if the parameters are to be updated or not + if pool_new_name is not None and len(pool_new_name.strip()) == 0: + self.module.fail_json( + msg="Empty/White spaced name is not allowed during " + "renaming of a storage pool. Please enter a valid " + "storage pool new name.") + modify_dict = to_modify(pool_details, use_rmcache, use_rfcache, + pool_new_name, media_type) + if bool(modify_dict): + LOG.info("Modify attributes of storage pool") + changed = self.modify_storage_pool(pool_id, modify_dict) + + # Delete a storage pool + if state == 'absent' and pool_details: + msg = "Deleting storage pool is not supported through" \ + " ansible module." + LOG.error(msg) + self.module.fail_json(msg=msg) + + # Show the updated storage pool details + if state == 'present': + pool_details = self.get_storage_pool(storage_pool_id=pool_id) + # fetching Id from pool details to address a case where + # protection domain is not passed + pd_id = pool_details['protectionDomainId'] + pd_name = self.get_protection_domain( + protection_domain_id=pd_id)['name'] + # adding protection domain name in the pool details + pool_details['protectionDomainName'] = pd_name + result['storage_pool_details'] = pool_details + result['changed'] = changed + + self.module.exit_json(**result) + + +def to_modify(pool_details, use_rmcache, use_rfcache, new_name, media_type): + """ + Check whether a parameter is required to be updated. + + :param media_type: Type of the media supported by the pool. + :param pool_details: Details of the storage pool + :param use_rmcache: Enable/Disable RMcache on pool + :param use_rfcache: Enable/Disable RFcache on pool + :param new_name: New name for the storage pool + :return: dict, containing parameters to be modified + """ + pool_name = pool_details['name'] + pool_use_rfcache = pool_details['useRfcache'] + pool_use_rmcache = pool_details['useRmcache'] + pool_media_type = pool_details['mediaType'] + modify_params = {} + + if new_name is not None and pool_name != new_name: + modify_params['new_name'] = new_name + if use_rfcache is not None and pool_use_rfcache != use_rfcache: + modify_params['use_rfcache'] = use_rfcache + if use_rmcache is not None and pool_use_rmcache != use_rmcache: + modify_params['use_rmcache'] = use_rmcache + if media_type is not None and media_type != pool_media_type: + modify_params['media_type'] = media_type + return modify_params + + +def get_powerflex_storagepool_parameters(): + """This method provides parameters required for the ansible + Storage Pool module on powerflex""" + return dict( + storage_pool_name=dict(required=False, type='str'), + storage_pool_id=dict(required=False, type='str'), + protection_domain_name=dict(required=False, type='str'), + protection_domain_id=dict(required=False, type='str'), + media_type=dict(required=False, type='str', + choices=['HDD', 'SSD', 'TRANSITIONAL']), + use_rfcache=dict(required=False, type='bool'), + use_rmcache=dict(required=False, type='bool'), + storage_pool_new_name=dict(required=False, type='str'), + state=dict(required=True, type='str', choices=['present', 'absent'])) + + +def main(): + """ Create PowerFlex Storage Pool object and perform action on it + based on user input from playbook""" + obj = PowerFlexStoragePool() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/volume.py b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py new file mode 100644 index 00000000..9c1e1cd2 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py @@ -0,0 +1,1599 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Dell Technologies +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing volumes on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +module: volume +version_added: '1.0.0' +short_description: Manage volumes on Dell PowerFlex +description: +- Managing volumes on PowerFlex storage system includes + creating, getting details, modifying attributes and deleting volume. +- It also includes adding/removing snapshot policy, + mapping/unmapping volume to/from SDC and listing + associated snapshots. +author: +- P Srinivas Rao (@srinivas-rao5) +extends_documentation_fragment: + - dellemc.powerflex.powerflex +options: + vol_name: + description: + - The name of the volume. + - Mandatory for create operation. + - It is unique across the PowerFlex array. + - Mutually exclusive with I(vol_id). + type: str + vol_id: + description: + - The ID of the volume. + - Except create operation, all other operations can be performed + using I(vol_id). + - Mutually exclusive with I(vol_name). + type: str + storage_pool_name: + description: + - The name of the storage pool. + - Either name or the id of the storage pool is required for creating a + volume. + - During creation, if storage pool name is provided then either + protection domain name or id must be mentioned along with it. + - Mutually exclusive with I(storage_pool_id). + type: str + storage_pool_id: + description: + - The ID of the storage pool. + - Either name or the id of the storage pool is required for creating + a volume. + - Mutually exclusive with I(storage_pool_name). + type: str + protection_domain_name: + description: + - The name of the protection domain. + - During creation of a volume, if more than one storage pool exists with + the same name then either protection domain name or id must be + mentioned along with it. + - Mutually exclusive with I(protection_domain_id). + type: str + protection_domain_id: + description: + - The ID of the protection domain. + - During creation of a volume, if more than one storage pool exists with + the same name then either protection domain name or id must be + mentioned along with it. + - Mutually exclusive with I(protection_domain_name). + type: str + vol_type: + description: + - Type of volume provisioning. + choices: ["THICK_PROVISIONED", "THIN_PROVISIONED"] + type: str + compression_type: + description: + - Type of the compression method. + choices: ["NORMAL", "NONE"] + type: str + use_rmcache: + description: + - Whether to use RM Cache or not. + type: bool + snapshot_policy_name: + description: + - Name of the snapshot policy. + - To remove/detach snapshot policy, empty + I(snapshot_policy_id)/I(snapshot_policy_name) is to be passed along with + I(auto_snap_remove_type). + type: str + snapshot_policy_id: + description: + - ID of the snapshot policy. + - To remove/detach snapshot policy, empty + I(snapshot_policy_id)/I(snapshot_policy_name) is to be passed along with + I(auto_snap_remove_type). + type: str + auto_snap_remove_type: + description: + - Whether to remove or detach the snapshot policy. + - To remove/detach snapshot policy, empty + I(snapshot_policy_id)/I(snapshot_policy_name) is to be passed along with + I(auto_snap_remove_type). + - If the snapshot policy name/id is passed empty then + I(auto_snap_remove_type) is defaulted to C(detach). + choices: ['remove', 'detach'] + type: str + size: + description: + - The size of the volume. + - Size of the volume will be assigned as higher multiple of 8 GB. + type: int + cap_unit: + description: + - The unit of the volume size. It defaults to 'GB'. + choices: ['GB' , 'TB'] + type: str + vol_new_name: + description: + - New name of the volume. Used to rename the volume. + type: str + allow_multiple_mappings: + description: + - Specifies whether to allow or not allow multiple mappings. + - If the volume is mapped to one SDC then for every new mapping + I(allow_multiple_mappings) has to be passed as True. + type: bool + sdc: + description: + - Specifies SDC parameters. + type: list + elements: dict + suboptions: + sdc_name: + description: + - Name of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + sdc_id: + description: + - ID of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_name) and I(sdc_ip). + type: str + sdc_ip: + description: + - IP of the SDC. + - Specify either I(sdc_name), I(sdc_id) or I(sdc_ip). + - Mutually exclusive with I(sdc_id) and I(sdc_ip). + type: str + access_mode: + description: + - Define the access mode for all mappings of the volume. + choices: ['READ_WRITE', 'READ_ONLY', 'NO_ACCESS'] + type: str + bandwidth_limit: + description: + - Limit of volume network bandwidth. + - Need to mention in multiple of 1024 Kbps. + - To set no limit, 0 is to be passed. + type: int + iops_limit: + description: + - Limit of volume IOPS. + - Minimum IOPS limit is 11 and specify 0 for unlimited iops. + type: int + sdc_state: + description: + - Mapping state of the SDC. + choices: ['mapped', 'unmapped'] + type: str + delete_snapshots: + description: + - If C(True), the volume and all its dependent snapshots will be deleted. + - If C(False), only the volume will be deleted. + - It can be specified only when the I(state) is C(absent). + - It defaults to C(False), if not specified. + type: bool + state: + description: + - State of the volume. + choices: ['present', 'absent'] + required: true + type: str +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + protection_domain_name: "pd_1" + vol_type: "THICK_PROVISIONED" + compression_type: "NORMAL" + use_rmcache: True + size: 16 + state: "present" + +- name: Map a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + allow_multiple_mappings: True + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + sdc_state: "mapped" + state: "present" + +- name: Unmap a SDC to volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + sdc_state: "unmapped" + state: "present" + +- name: Map multiple SDCs to a volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + protection_domain_name: "pd_1" + sdc: + - sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764" + access_mode: "READ_WRITE" + bandwidth_limit: 2048 + iops_limit: 20 + - sdc_ip: "198.10.xxx.xxx" + access_mode: "READ_ONLY" + allow_multiple_mappings: True + sdc_state: "mapped" + state: "present" + +- name: Get the details of the volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_id: "fe6c8b7100000005" + state: "present" + +- name: Modify the details of the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + storage_pool_name: "pool_1" + new_vol_name: "new_sample_volume" + size: 64 + state: "present" + +- name: Delete the Volume + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: False + state: "absent" + +- name: Delete the Volume and all its dependent snapshots + dellemc.powerflex.volume: + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "sample_volume" + delete_snapshots: True + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: 'false' +volume_details: + description: Details of the volume. + returned: When volume exists + type: dict + contains: + id: + description: The ID of the volume. + type: str + mappedSdcInfo: + description: The details of the mapped SDC. + type: dict + contains: + sdcId: + description: ID of the SDC. + type: str + sdcName: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + accessMode: + description: Mapping access mode for the specified volume. + type: str + limitIops: + description: IOPS limit for the SDC. + type: int + limitBwInMbps: + description: Bandwidth limit for the SDC. + type: int + name: + description: Name of the volume. + type: str + sizeInKb: + description: Size of the volume in Kb. + type: int + sizeInGb: + description: Size of the volume in Gb. + type: int + storagePoolId: + description: ID of the storage pool in which volume resides. + type: str + storagePoolName: + description: Name of the storage pool in which volume resides. + type: str + protectionDomainId: + description: ID of the protection domain in which volume resides. + type: str + protectionDomainName: + description: Name of the protection domain in which volume resides. + type: str + snapshotPolicyId: + description: ID of the snapshot policy associated with volume. + type: str + snapshotPolicyName: + description: Name of the snapshot policy associated with volume. + type: str + snapshotsList: + description: List of snapshots associated with the volume. + type: str + "statistics": + description: Statistics details of the storage pool. + type: dict + contains: + "numOfChildVolumes": + description: Number of child volumes. + type: int + "numOfMappedSdcs": + description: Number of mapped Sdcs of the volume. + type: int + sample: { + "accessModeLimit": "ReadWrite", + "ancestorVolumeId": null, + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": null, + "creationTime": 1631618520, + "dataLayout": "MediumGranularity", + "id": "cdd883cf00000002", + "links": [ + { + "href": "/api/instances/Volume::cdd883cf00000002", + "rel": "self" + }, + { + "href": "/api/instances/Volume::cdd883cf00000002/relationships + /Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/VTree::6e86255c00000001", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": null, + "name": "ansible-volume-1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionLevels": [], + "secureSnapshotExpTime": 0, + "sizeInGB": 16, + "sizeInKb": 16777216, + "snapshotPolicyId": null, + "snapshotPolicyName": null, + "snapshotsList": [ + { + "accessModeLimit": "ReadOnly", + "ancestorVolumeId": "cdd883cf00000002", + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": "22f1e80c00000001", + "creationTime": 1631619229, + "dataLayout": "MediumGranularity", + "id": "cdd883d000000004", + "links": [ + { + "href": "/api/instances/Volume::cdd883d000000004", + "rel": "self" + }, + { + "href": "/api/instances/Volume::cdd883d000000004 + /relationships/Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/Volume::cdd883cf00000002", + "rel": "/api/parent/relationship/ancestorVolumeId" + }, + { + "href": "/api/instances/VTree::6e86255c00000001", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": null, + "name": "ansible_vol_snap_1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionLevels": [], + "secureSnapshotExpTime": 0, + "sizeInKb": 16777216, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "storagePoolId": "e0d8f6c900000000", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "Snapshot", + "vtreeId": "6e86255c00000001" + } + ], + "statistics": { + "childVolumeIds": [ + ], + "descendantVolumeIds": [ + ], + "initiatorSdcId": null, + "mappedSdcIds": [ + "c42425XXXXXX" + ], + "numOfChildVolumes": 0, + "numOfDescendantVolumes": 0, + "numOfMappedSdcs": 1, + "registrationKey": null, + "registrationKeys": [ + ], + "replicationJournalVolume": false, + "replicationState": "UnmarkedForReplication", + "reservationType": "NotReserved", + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + } + }, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "storagePoolId": "e0d8f6c900000000", + "storagePoolName": "pool1", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "ThinProvisioned", + "vtreeId": "6e86255c00000001" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ + import utils +import copy + +LOG = utils.get_logger('volume') + + +class PowerFlexVolume(object): + """Class with volume operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_powerflex_gateway_host_parameters() + self.module_params.update(get_powerflex_volume_parameters()) + + mut_ex_args = [['vol_name', 'vol_id'], + ['storage_pool_name', 'storage_pool_id'], + ['protection_domain_name', 'protection_domain_id'], + ['snapshot_policy_name', 'snapshot_policy_id'], + ['vol_id', 'storage_pool_name'], + ['vol_id', 'storage_pool_id'], + ['vol_id', 'protection_domain_name'], + ['vol_id', 'protection_domain_id']] + + required_together_args = [['sdc', 'sdc_state']] + + required_one_of_args = [['vol_name', 'vol_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_together=required_together_args, + required_one_of=required_one_of_args) + + utils.ensure_required_libs(self.module) + + try: + self.powerflex_conn = utils.get_powerflex_gateway_host_connection( + self.module.params) + LOG.info("Got the PowerFlex system connection object instance") + except Exception as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + + def get_protection_domain(self, protection_domain_name=None, + protection_domain_id=None): + """Get protection domain details + :param protection_domain_name: Name of the protection domain + :param protection_domain_id: ID of the protection domain + :return: Protection domain details + """ + name_or_id = protection_domain_id if protection_domain_id \ + else protection_domain_name + try: + pd_details = None + if protection_domain_id: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'id': protection_domain_id}) + + if protection_domain_name: + pd_details = self.powerflex_conn.protection_domain.get( + filter_fields={'name': protection_domain_name}) + + if not pd_details: + err_msg = "Unable to find the protection domain with {0}. " \ + "Please enter a valid protection domain" \ + " name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + + return pd_details[0] + + except Exception as e: + errormsg = "Failed to get the protection domain {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_snapshot_policy(self, snap_pol_id=None, snap_pol_name=None): + """Get snapshot policy details + :param snap_pol_name: Name of the snapshot policy + :param snap_pol_id: ID of the snapshot policy + :return: snapshot policy details + """ + name_or_id = snap_pol_id if snap_pol_id else snap_pol_name + try: + snap_pol_details = None + if snap_pol_id: + snap_pol_details = self.powerflex_conn.snapshot_policy.get( + filter_fields={'id': snap_pol_id}) + + if snap_pol_name: + snap_pol_details = self.powerflex_conn.snapshot_policy.get( + filter_fields={'name': snap_pol_name}) + + if not snap_pol_details: + err_msg = "Unable to find the snapshot policy with {0}. " \ + "Please enter a valid snapshot policy" \ + " name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + + return snap_pol_details[0] + + except Exception as e: + errormsg = "Failed to get the snapshot policy {0} with" \ + " error {1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, + protection_domain_id=None): + """Get storage pool details + :param protection_domain_id: ID of the protection domain + :param storage_pool_name: The name of the storage pool + :param storage_pool_id: The storage pool id + :return: Storage pool details + """ + name_or_id = storage_pool_id if storage_pool_id \ + else storage_pool_name + try: + sp_details = None + if storage_pool_id: + sp_details = self.powerflex_conn.storage_pool.get( + filter_fields={'id': storage_pool_id}) + + if storage_pool_name: + sp_details = self.powerflex_conn.storage_pool.get( + filter_fields={'name': storage_pool_name}) + + if len(sp_details) > 1 and protection_domain_id is None: + err_msg = "More than one storage pool found with {0}," \ + " Please provide protection domain Name/Id" \ + " to fetch the unique" \ + " pool".format(storage_pool_name) + self.module.fail_json(msg=err_msg) + + if len(sp_details) > 1 and protection_domain_id: + sp_details = self.powerflex_conn.storage_pool.get( + filter_fields={'name': storage_pool_name, + 'protectionDomainId': + protection_domain_id}) + if not sp_details: + err_msg = "Unable to find the storage pool with {0}. " \ + "Please enter a valid pool " \ + "name/id.".format(name_or_id) + self.module.fail_json(msg=err_msg) + return sp_details[0] + + except Exception as e: + errormsg = "Failed to get the storage pool {0} with error " \ + "{1}".format(name_or_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_volume(self, vol_name=None, vol_id=None): + """Get volume details + :param vol_name: Name of the volume + :param vol_id: ID of the volume + :return: Details of volume if exist. + """ + + id_or_name = vol_id if vol_id else vol_name + + try: + if vol_name: + volume_details = self.powerflex_conn.volume.get( + filter_fields={'name': vol_name}) + else: + volume_details = self.powerflex_conn.volume.get( + filter_fields={'id': vol_id}) + + if len(volume_details) == 0: + msg = "Volume with identifier {0} not found".format( + id_or_name) + LOG.info(msg) + return None + + # Append size in GB in the volume details + if 'sizeInKb' in volume_details[0] and \ + volume_details[0]['sizeInKb']: + volume_details[0]['sizeInGB'] = utils.get_size_in_gb( + volume_details[0]['sizeInKb'], 'KB') + + # Append storage pool name and id. + sp = None + pd_id = None + if 'storagePoolId' in volume_details[0] and \ + volume_details[0]['storagePoolId']: + sp = \ + self.get_storage_pool(volume_details[0]['storagePoolId']) + if len(sp) > 0: + volume_details[0]['storagePoolName'] = sp['name'] + pd_id = sp['protectionDomainId'] + + # Append protection domain name and id + if sp and 'protectionDomainId' in sp and \ + sp['protectionDomainId']: + pd = self.get_protection_domain(protection_domain_id=pd_id) + volume_details[0]['protectionDomainId'] = pd_id + volume_details[0]['protectionDomainName'] = pd['name'] + + # Append snapshot policy name and id + if volume_details[0]['snplIdOfSourceVolume'] is not None: + snap_policy_id = volume_details[0]['snplIdOfSourceVolume'] + volume_details[0]['snapshotPolicyId'] = snap_policy_id + volume_details[0]['snapshotPolicyName'] = \ + self.get_snapshot_policy(snap_policy_id)['name'] + + return volume_details[0] + + except Exception as e: + error_msg = "Failed to get the volume {0} with error {1}" + error_msg = error_msg.format(id_or_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_sdc_id(self, sdc_name=None, sdc_ip=None, sdc_id=None): + """Get the SDC ID + :param sdc_name: The name of the SDC + :param sdc_ip: The IP of the SDC + :param sdc_id: The ID of the SDC + :return: The ID of the SDC + """ + + if sdc_name: + id_ip_name = sdc_name + elif sdc_ip: + id_ip_name = sdc_ip + else: + id_ip_name = sdc_id + + try: + if sdc_name: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'name': sdc_name}) + elif sdc_ip: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'sdcIp': sdc_ip}) + else: + sdc_details = self.powerflex_conn.sdc.get( + filter_fields={'id': sdc_id}) + + if len(sdc_details) == 0: + error_msg = "Unable to find SDC with identifier {0}".format( + id_ip_name) + self.module.fail_json(msg=error_msg) + return sdc_details[0]['id'] + except Exception as e: + errormsg = "Failed to get the SDC {0} with error " \ + "{1}".format(id_ip_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_volume(self, vol_name, pool_id, size, vol_type=None, + use_rmcache=None, comp_type=None): + """Create volume + :param use_rmcache: Boolean indicating whether to use RM cache. + :param comp_type: Type of compression method for the volume. + :param vol_type: Type of volume. + :param size: Size of the volume. + :param pool_id: Id of the storage pool. + :param vol_name: The name of the volume. + :return: Boolean indicating if create operation is successful + """ + try: + if vol_name is None or len(vol_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid volume name.") + + if not size: + self.module.fail_json(msg="Size is a mandatory parameter " + "for creating a volume. Please " + "enter a valid size") + pool_data_layout = None + if pool_id: + pool_details = self.get_storage_pool(storage_pool_id=pool_id) + pool_data_layout = pool_details['dataLayout'] + if comp_type and pool_data_layout and \ + pool_data_layout != "FineGranularity": + err_msg = "compression_type for volume can only be " \ + "mentioned when storage pools have Fine " \ + "Granularity layout. Storage Pool found" \ + " with {0}".format(pool_data_layout) + self.module.fail_json(msg=err_msg) + + # Basic volume created. + self.powerflex_conn.volume.create( + storage_pool_id=pool_id, size_in_gb=size, name=vol_name, + volume_type=vol_type, use_rmcache=use_rmcache, + compression_method=comp_type) + return True + + except Exception as e: + errormsg = "Create volume {0} operation failed with " \ + "error {1}".format(vol_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_access_mode(self, vol_id, access_mode_list): + """Modify access mode of SDCs mapped to volume + :param vol_id: The volume id + :param access_mode_list: List containing SDC ID's + whose access mode is to modified + :return: Boolean indicating if modifying access + mode is successful + """ + + try: + changed = False + for temp in access_mode_list: + if temp['accessMode']: + self.powerflex_conn.volume.set_access_mode_for_sdc( + volume_id=vol_id, sdc_id=temp['sdc_id'], + access_mode=temp['accessMode']) + changed = True + return changed + except Exception as e: + errormsg = "Modify access mode of SDC operation failed " \ + "with error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_limits(self, payload): + """Modify IOPS and bandwidth limits of SDC's mapped to volume + :param payload: Dict containing SDC ID's whose bandwidth and + IOPS is to modified + :return: Boolean indicating if modifying limits is successful + """ + + try: + changed = False + if payload['bandwidth_limit'] is not None or \ + payload['iops_limit'] is not None: + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed + except Exception as e: + errormsg = "Modify bandwidth/iops limits of SDC %s operation " \ + "failed with error %s" % (payload['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_volume(self, vol_id, remove_mode): + """Delete volume + :param vol_id: The volume id + :param remove_mode: Removal mode for the volume + :return: Boolean indicating if delete operation is successful + """ + + try: + self.powerflex_conn.volume.delete(vol_id, remove_mode) + return True + except Exception as e: + errormsg = "Delete volume {0} operation failed with " \ + "error {1}".format(vol_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def unmap_volume_from_sdc(self, volume, sdc): + """Unmap SDC's from volume + :param volume: volume details + :param sdc: List of SDCs to be unmapped + :return: Boolean indicating if unmap operation is successful + """ + + current_sdcs = volume['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + sdc_id = None + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id in current_sdc_ids: + sdc_id_list.append(sdc_id) + + LOG.info("SDC IDs to remove %s", sdc_id_list) + + if len(sdc_id_list) == 0: + return False + + try: + for sdc_id in sdc_id_list: + self.powerflex_conn.volume.remove_mapped_sdc( + volume['id'], sdc_id) + return True + except Exception as e: + errormsg = "Unmap SDC {0} from volume {1} failed with error " \ + "{2}".format(sdc_id, volume['id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def map_volume_to_sdc(self, volume, sdc): + """Map SDC's to volume + :param volume: volume details + :param sdc: List of SDCs + :return: Boolean indicating if mapping operation is successful + """ + + current_sdcs = volume['mappedSdcInfo'] + current_sdc_ids = [] + sdc_id_list = [] + sdc_map_list = [] + sdc_modify_list1 = [] + sdc_modify_list2 = [] + + if current_sdcs: + for temp in current_sdcs: + current_sdc_ids.append(temp['sdcId']) + + for temp in sdc: + if 'sdc_name' in temp and temp['sdc_name']: + sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name']) + elif 'sdc_ip' in temp and temp['sdc_ip']: + sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip']) + else: + sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id']) + if sdc_id not in current_sdc_ids: + sdc_id_list.append(sdc_id) + temp['sdc_id'] = sdc_id + if 'access_mode' in temp: + temp['access_mode'] = \ + get_access_mode(temp['access_mode']) + if 'bandwidth_limit' not in temp: + temp['bandwidth_limit'] = None + if 'iops_limit' not in temp: + temp['iops_limit'] = None + sdc_map_list.append(temp) + else: + access_mode_dict, limits_dict = check_for_sdc_modification( + volume, sdc_id, temp) + if access_mode_dict: + sdc_modify_list1.append(access_mode_dict) + if limits_dict: + sdc_modify_list2.append(limits_dict) + + LOG.info("SDC to add: %s", sdc_map_list) + + if not sdc_map_list: + return False, sdc_modify_list1, sdc_modify_list2 + + try: + changed = False + for sdc in sdc_map_list: + payload = { + "volume_id": volume['id'], + "sdc_id": sdc['sdc_id'], + "access_mode": sdc['access_mode'], + "allow_multiple_mappings": + self.module.params['allow_multiple_mappings'] + } + self.powerflex_conn.volume.add_mapped_sdc(**payload) + + if sdc['bandwidth_limit'] or sdc['iops_limit']: + payload = { + "volume_id": volume['id'], + "sdc_id": sdc['sdc_id'], + "bandwidth_limit": sdc['bandwidth_limit'], + "iops_limit": sdc['iops_limit'] + } + + self.powerflex_conn.volume.set_mapped_sdc_limits(**payload) + changed = True + return changed, sdc_modify_list1, sdc_modify_list2 + except Exception as e: + errormsg = "Mapping volume {0} to SDC {1} " \ + "failed with error {2}".format(volume['name'], + sdc['sdc_id'], str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_parameters(self, auto_snap_remove_type, snap_pol_id, + snap_pol_name, delete_snaps, state): + """Validate the input parameters""" + + sdc = self.module.params['sdc'] + cap_unit = self.module.params['cap_unit'] + size = self.module.params['size'] + + if sdc: + for temp in sdc: + if (all([temp['sdc_id'], temp['sdc_ip']]) or + all([temp['sdc_id'], temp['sdc_name']]) or + all([temp['sdc_ip'], temp['sdc_name']])): + self.module.fail_json(msg="sdc_id, sdc_ip and sdc_name " + "are mutually exclusive") + + if (cap_unit is not None) and not size: + self.module.fail_json(msg="cap_unit can be specified along " + "with size only. Please enter a valid" + " value for size") + + if auto_snap_remove_type and snap_pol_name is None \ + and snap_pol_id is None: + err_msg = "To remove/detach snapshot policy, please provide" \ + " empty snapshot policy name/id along with " \ + "auto_snap_remove_type parameter" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if state == "present" and delete_snaps is not None: + self.module.fail_json( + msg="delete_snapshots can be specified only when the state" + " is passed as absent.") + + def modify_volume(self, vol_id, modify_dict): + """ + Update the volume attributes + :param vol_id: Id of the volume + :param modify_dict: Dictionary containing the attributes of + volume which are to be updated + :return: True, if the operation is successful + """ + try: + msg = "Dictionary containing attributes which are to be" \ + " updated is {0}.".format(str(modify_dict)) + LOG.info(msg) + + if 'auto_snap_remove_type' in modify_dict: + snap_type = modify_dict['auto_snap_remove_type'] + msg = "Removing/detaching the snapshot policy from a " \ + "volume. auto_snap_remove_type: {0} and snapshot " \ + "policy id: " \ + "{1}".format(snap_type, modify_dict['snap_pol_id']) + LOG.info(msg) + self.powerflex_conn.snapshot_policy.remove_source_volume( + modify_dict['snap_pol_id'], vol_id, snap_type) + msg = "The snapshot policy has been {0}ed " \ + "successfully".format(snap_type) + LOG.info(msg) + + if 'auto_snap_remove_type' not in modify_dict\ + and 'snap_pol_id' in modify_dict: + self.powerflex_conn.snapshot_policy.add_source_volume( + modify_dict['snap_pol_id'], vol_id) + msg = "Attached the snapshot policy {0} to volume" \ + " successfully.".format(modify_dict['snap_pol_id']) + LOG.info(msg) + + if 'new_name' in modify_dict: + self.powerflex_conn.volume.rename(vol_id, + modify_dict['new_name']) + msg = "The name of the volume is updated" \ + " to {0} sucessfully.".format(modify_dict['new_name']) + LOG.info(msg) + + if 'new_size' in modify_dict: + self.powerflex_conn.volume.extend(vol_id, + modify_dict['new_size']) + msg = "The size of the volume is extended to {0} " \ + "sucessfully.".format(str(modify_dict['new_size'])) + LOG.info(msg) + + if 'use_rmcache' in modify_dict: + self.powerflex_conn.volume.set_use_rmcache( + vol_id, modify_dict['use_rmcache']) + msg = "The use RMcache is updated to {0}" \ + " sucessfully.".format(modify_dict['use_rmcache']) + LOG.info(msg) + + if 'comp_type' in modify_dict: + self.powerflex_conn.volume.set_compression_method( + vol_id, modify_dict['comp_type']) + msg = "The compression method is updated to {0}" \ + " successfully.".format(modify_dict['comp_type']) + LOG.info(msg) + return True + + except Exception as e: + err_msg = "Failed to update the volume {0}" \ + " with error {1}".format(vol_id, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def to_modify(self, vol_details, new_size, use_rmcache, comp_type, + new_name, snap_pol_id, + auto_snap_remove_type): + """ + + :param vol_details: Details of the volume + :param new_size: Size of the volume + :param use_rmcache: Bool value of use rm cache + :param comp_type: Type of compression method + :param new_name: The new name of the volume + :param snap_pol_id: Id of the snapshot policy + :param auto_snap_remove_type: Whether to remove or detach the policy + :return: Dictionary containing the attributes of + volume which are to be updated + """ + modify_dict = {} + + if comp_type: + pool_id = vol_details['storagePoolId'] + pool_details = self.get_storage_pool(storage_pool_id=pool_id) + pool_data_layout = pool_details['dataLayout'] + if pool_data_layout != "FineGranularity": + err_msg = "compression_type for volume can only be " \ + "mentioned when storage pools have Fine " \ + "Granularity layout. Storage Pool found" \ + " with {0}".format(pool_data_layout) + self.module.fail_json(msg=err_msg) + + if comp_type != vol_details['compressionMethod']: + modify_dict['comp_type'] = comp_type + + if use_rmcache is not None and \ + vol_details['useRmcache'] != use_rmcache: + modify_dict['use_rmcache'] = use_rmcache + + vol_size_in_gb = utils.get_size_in_gb(vol_details['sizeInKb'], 'KB') + + if new_size is not None and \ + not ((vol_size_in_gb - 8) < new_size <= vol_size_in_gb): + modify_dict['new_size'] = new_size + + if new_name is not None: + if new_name is None or len(new_name.strip()) == 0: + self.module.fail_json(msg="Please provide valid volume " + "name.") + if new_name != vol_details['name']: + modify_dict['new_name'] = new_name + + if snap_pol_id is not None and snap_pol_id == "" and \ + auto_snap_remove_type and vol_details['snplIdOfSourceVolume']: + modify_dict['auto_snap_remove_type'] = auto_snap_remove_type + modify_dict['snap_pol_id'] = \ + vol_details['snplIdOfSourceVolume'] + + if snap_pol_id is not None and snap_pol_id != "": + if auto_snap_remove_type and vol_details['snplIdOfSourceVolume']: + err_msg = "To remove/detach a snapshot policy, provide the" \ + " snapshot policy name/id as empty string" + self.module.fail_json(msg=err_msg) + if auto_snap_remove_type is None and \ + vol_details['snplIdOfSourceVolume'] is None: + modify_dict['snap_pol_id'] = snap_pol_id + + return modify_dict + + def verify_params(self, vol_details, snap_pol_name, snap_pol_id, pd_name, + pd_id, pool_name, pool_id): + """ + :param vol_details: Details of the volume + :param snap_pol_name: Name of the snapshot policy + :param snap_pol_id: Id of the snapshot policy + :param pd_name: Name of the protection domain + :param pd_id: Id of the protection domain + :param pool_name: Name of the storage pool + :param pool_id: Id of the storage pool + """ + + if snap_pol_id and 'snapshotPolicyId' in vol_details and \ + snap_pol_id != vol_details['snapshotPolicyId']: + self.module.fail_json(msg="Entered snapshot policy id does not" + " match with the snapshot policy's id" + " attached to the volume. Please enter" + " a correct snapshot policy id.") + + if snap_pol_name and 'snapshotPolicyId' in vol_details and \ + snap_pol_name != vol_details['snapshotPolicyName']: + self.module.fail_json(msg="Entered snapshot policy name does not" + " match with the snapshot policy's " + "name attached to the volume. Please" + " enter a correct snapshot policy" + " name.") + + if pd_id and pd_id != vol_details['protectionDomainId']: + self.module.fail_json(msg="Entered protection domain id does not" + " match with the volume's protection" + " domain id. Please enter a correct" + " protection domain id.") + + if pool_id and pool_id != vol_details['storagePoolId']: + self.module.fail_json(msg="Entered storage pool id does" + " not match with the volume's " + "storage pool id. Please enter" + " a correct storage pool id.") + + if pd_name and pd_name != vol_details['protectionDomainName']: + self.module.fail_json(msg="Entered protection domain name does" + " not match with the volume's " + "protection domain name. Please enter" + " a correct protection domain name.") + + if pool_name and pool_name != vol_details['storagePoolName']: + self.module.fail_json(msg="Entered storage pool name does" + " not match with the volume's " + "storage pool name. Please enter" + " a correct storage pool name.") + + def perform_module_operation(self): + """ + Perform different actions on volume based on parameters passed in + the playbook + """ + vol_name = self.module.params['vol_name'] + vol_id = self.module.params['vol_id'] + vol_type = self.module.params['vol_type'] + compression_type = self.module.params['compression_type'] + sp_name = self.module.params['storage_pool_name'] + sp_id = self.module.params['storage_pool_id'] + pd_name = self.module.params['protection_domain_name'] + pd_id = self.module.params['protection_domain_id'] + snap_pol_name = self.module.params['snapshot_policy_name'] + snap_pol_id = self.module.params['snapshot_policy_id'] + auto_snap_remove_type = self.module.params['auto_snap_remove_type'] + use_rmcache = self.module.params['use_rmcache'] + size = self.module.params['size'] + cap_unit = self.module.params['cap_unit'] + vol_new_name = self.module.params['vol_new_name'] + sdc = copy.deepcopy(self.module.params['sdc']) + sdc_state = self.module.params['sdc_state'] + delete_snapshots = self.module.params['delete_snapshots'] + state = self.module.params['state'] + + if compression_type: + compression_type = compression_type.capitalize() + if vol_type: + vol_type = get_vol_type(vol_type) + if auto_snap_remove_type: + auto_snap_remove_type = auto_snap_remove_type.capitalize() + + # result is a dictionary to contain end state and volume details + changed = False + result = dict( + changed=False, + volume_details={} + ) + self.validate_parameters(auto_snap_remove_type, snap_pol_id, + snap_pol_name, delete_snapshots, state) + + if not auto_snap_remove_type and\ + (snap_pol_name == "" or snap_pol_id == ""): + auto_snap_remove_type = "Detach" + if size: + if not cap_unit: + cap_unit = 'GB' + + if cap_unit == 'TB': + size = size * 1024 + + if pd_name: + pd_details = self.get_protection_domain(pd_name) + if pd_details: + pd_id = pd_details['id'] + msg = "Fetched the protection domain details with id {0}," \ + " name {1}".format(pd_id, pd_name) + LOG.info(msg) + + if sp_name: + sp_details = self.get_storage_pool(storage_pool_name=sp_name, + protection_domain_id=pd_id) + if sp_details: + sp_id = sp_details['id'] + msg = "Fetched the storage pool details id {0}," \ + " name {1}".format(sp_id, sp_name) + LOG.info(msg) + + if snap_pol_name is not None: + snap_pol_details = None + if snap_pol_name: + snap_pol_details = \ + self.get_snapshot_policy(snap_pol_name=snap_pol_name) + if snap_pol_details: + snap_pol_id = snap_pol_details['id'] + + if snap_pol_name == "": + snap_pol_id = "" + msg = "Fetched the snapshot policy details with id {0}," \ + " name {1}".format(snap_pol_id, snap_pol_name) + LOG.info(msg) + + # get volume details + volume_details = self.get_volume(vol_name, vol_id) + if volume_details: + vol_id = volume_details['id'] + msg = "Fetched the volume details {0}".format(str(volume_details)) + LOG.info(msg) + + if vol_name and volume_details: + self.verify_params( + volume_details, snap_pol_name, snap_pol_id, pd_name, pd_id, + sp_name, sp_id) + + # create operation + create_changed = False + if state == 'present' and not volume_details: + if vol_id: + self.module.fail_json(msg="Creation of volume is allowed " + "using vol_name only, " + "vol_id given.") + + if vol_new_name: + self.module.fail_json( + msg="vol_new_name parameter is not supported during " + "creation of a volume. Try renaming the volume after" + " the creation.") + create_changed = self.create_volume(vol_name, sp_id, size, + vol_type, use_rmcache, + compression_type) + if create_changed: + volume_details = self.get_volume(vol_name) + vol_id = volume_details['id'] + msg = "Volume created successfully, fetched " \ + "volume details {0}".format(str(volume_details)) + LOG.info(msg) + + # checking if basic volume parameters are modified or not. + modify_dict = {} + if volume_details and state == 'present': + modify_dict = self.to_modify( + volume_details, size, use_rmcache, compression_type, + vol_new_name, snap_pol_id, auto_snap_remove_type) + msg = "Parameters to be modified are as" \ + " follows: {0}".format(str(modify_dict)) + LOG.info(msg) + + # Mapping the SDCs to a volume + mode_changed = False + limits_changed = False + map_changed = False + if state == 'present' and volume_details and sdc and \ + sdc_state == 'mapped': + map_changed, access_mode_list, limits_list = \ + self.map_volume_to_sdc(volume_details, sdc) + if len(access_mode_list) > 0: + mode_changed = self.modify_access_mode(vol_id, + access_mode_list) + if len(limits_list) > 0: + for temp in limits_list: + payload = { + "volume_id": volume_details['id'], + "sdc_id": temp['sdc_id'], + "bandwidth_limit": temp['bandwidth_limit'], + "iops_limit": temp['iops_limit'] + } + limits_changed = self.modify_limits(payload) + + # Unmap the SDCs to a volume + unmap_changed = False + if state == 'present' and volume_details and sdc and \ + sdc_state == 'unmapped': + unmap_changed = self.unmap_volume_from_sdc(volume_details, sdc) + + # Update the basic volume attributes + modify_changed = False + if modify_dict and state == 'present': + modify_changed = self.modify_volume(vol_id, modify_dict) + + # delete operation + del_changed = False + if state == 'absent' and volume_details: + if delete_snapshots is True: + delete_snapshots = 'INCLUDING_DESCENDANTS' + if delete_snapshots is None or delete_snapshots is False: + delete_snapshots = 'ONLY_ME' + del_changed = \ + self.delete_volume(vol_id, delete_snapshots) + + if modify_changed or unmap_changed or map_changed or create_changed\ + or del_changed or mode_changed or limits_changed: + changed = True + + # Returning the updated volume details + if state == 'present': + vol_details = self.show_output(vol_id) + result['volume_details'] = vol_details + result['changed'] = changed + self.module.exit_json(**result) + + def show_output(self, vol_id): + """Show volume details + :param vol_id: ID of the volume + :return: Details of volume if exist. + """ + + try: + volume_details = self.powerflex_conn.volume.get( + filter_fields={'id': vol_id}) + + if len(volume_details) == 0: + msg = "Volume with identifier {0} not found".format( + vol_id) + LOG.error(msg) + return None + + # Append size in GB in the volume details + if 'sizeInKb' in volume_details[0] and \ + volume_details[0]['sizeInKb']: + volume_details[0]['sizeInGB'] = utils.get_size_in_gb( + volume_details[0]['sizeInKb'], 'KB') + + # Append storage pool name and id. + sp = None + pd_id = None + if 'storagePoolId' in volume_details[0] and \ + volume_details[0]['storagePoolId']: + sp = \ + self.get_storage_pool(volume_details[0]['storagePoolId']) + if len(sp) > 0: + volume_details[0]['storagePoolName'] = sp['name'] + pd_id = sp['protectionDomainId'] + + # Append protection domain name and id + if sp and 'protectionDomainId' in sp and \ + sp['protectionDomainId']: + pd = self.get_protection_domain(protection_domain_id=pd_id) + volume_details[0]['protectionDomainId'] = pd_id + volume_details[0]['protectionDomainName'] = pd['name'] + + # Append snapshot policy name and id + if volume_details[0]['snplIdOfSourceVolume'] is not None: + snap_policy_id = volume_details[0]['snplIdOfSourceVolume'] + volume_details[0]['snapshotPolicyId'] = snap_policy_id + volume_details[0]['snapshotPolicyName'] = \ + self.get_snapshot_policy(snap_policy_id)['name'] + else: + volume_details[0]['snapshotPolicyId'] = None + volume_details[0]['snapshotPolicyName'] = None + + # Append the list of snapshots associated with the volume + list_of_snaps = self.powerflex_conn.volume.get( + filter_fields={'ancestorVolumeId': volume_details[0]['id']}) + volume_details[0]['snapshotsList'] = list_of_snaps + + # Append statistics + statistics = self.powerflex_conn.volume.get_statistics(volume_details[0]['id']) + volume_details[0]['statistics'] = statistics if statistics else {} + + return volume_details[0] + + except Exception as e: + error_msg = "Failed to get the volume {0} with error {1}" + error_msg = error_msg.format(vol_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + +def check_for_sdc_modification(volume, sdc_id, sdc_details): + """ + :param volume: The volume details + :param sdc_id: The ID of the SDC + :param sdc_details: The details of SDC + :return: Dictionary with SDC attributes to be modified + """ + access_mode_dict = dict() + limits_dict = dict() + + for sdc in volume['mappedSdcInfo']: + if sdc['sdcId'] == sdc_id: + if sdc['accessMode'] != \ + get_access_mode(sdc_details['access_mode']): + access_mode_dict['sdc_id'] = sdc_id + access_mode_dict['accessMode'] = get_access_mode( + sdc_details['access_mode']) + if sdc['limitIops'] != sdc_details['iops_limit'] or \ + sdc['limitBwInMbps'] != sdc_details['bandwidth_limit']: + limits_dict['sdc_id'] = sdc_id + limits_dict['iops_limit'] = None + limits_dict['bandwidth_limit'] = None + if sdc['limitIops'] != sdc_details['iops_limit']: + limits_dict['iops_limit'] = sdc_details['iops_limit'] + if sdc['limitBwInMbps'] != \ + get_limits_in_mb(sdc_details['bandwidth_limit']): + limits_dict['bandwidth_limit'] = \ + sdc_details['bandwidth_limit'] + break + return access_mode_dict, limits_dict + + +def get_limits_in_mb(limits): + """ + :param limits: Limits in KB + :return: Limits in MB + """ + + if limits: + return limits / 1024 + + +def get_access_mode(access_mode): + """ + :param access_mode: Access mode of the SDC + :return: The enum for the access mode + """ + + access_mode_dict = { + "READ_WRITE": "ReadWrite", + "READ_ONLY": "ReadOnly", + "NO_ACCESS": "NoAccess" + } + return access_mode_dict.get(access_mode) + + +def get_vol_type(vol_type): + """ + :param vol_type: Type of the volume + :return: Corresponding value for the entered vol_type + """ + vol_type_dict = { + "THICK_PROVISIONED": "ThickProvisioned", + "THIN_PROVISIONED": "ThinProvisioned", + } + return vol_type_dict.get(vol_type) + + +def get_powerflex_volume_parameters(): + """This method provide parameter required for the volume + module on PowerFlex""" + return dict( + vol_name=dict(), vol_id=dict(), + storage_pool_name=dict(), storage_pool_id=dict(), + protection_domain_name=dict(), protection_domain_id=dict(), + use_rmcache=dict(type='bool'), snapshot_policy_name=dict(), + snapshot_policy_id=dict(), + size=dict(type='int'), + cap_unit=dict(choices=['GB', 'TB']), + vol_type=dict(choices=['THICK_PROVISIONED', 'THIN_PROVISIONED']), + compression_type=dict(choices=['NORMAL', 'NONE']), + auto_snap_remove_type=dict(choices=['detach', 'remove']), + vol_new_name=dict(), + allow_multiple_mappings=dict(type='bool'), + delete_snapshots=dict(type='bool'), + sdc=dict( + type='list', elements='dict', options=dict( + sdc_id=dict(), sdc_ip=dict(), + sdc_name=dict(), + access_mode=dict(choices=['READ_WRITE', 'READ_ONLY', + 'NO_ACCESS']), + bandwidth_limit=dict(type='int'), + iops_limit=dict(type='int') + ) + ), + sdc_state=dict(choices=['mapped', 'unmapped']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create PowerFlex volume object and perform actions on it + based on user input from playbook""" + obj = PowerFlexVolume() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/powerflex/requirements.txt b/ansible_collections/dellemc/powerflex/requirements.txt new file mode 100644 index 00000000..d0fb0f63 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/requirements.txt @@ -0,0 +1,4 @@ +PyPowerFlex +requests>=2.23.0 +python-dateutil>=2.8.0 +setuptools diff --git a/ansible_collections/dellemc/powerflex/requirements.yml b/ansible_collections/dellemc/powerflex/requirements.yml new file mode 100644 index 00000000..1fa924bf --- /dev/null +++ b/ansible_collections/dellemc/powerflex/requirements.yml @@ -0,0 +1,3 @@ +--- +collections: + - name: dellemc.powerflex diff --git a/ansible_collections/dellemc/powerflex/tests/requirements.txt b/ansible_collections/dellemc/powerflex/tests/requirements.txt new file mode 100644 index 00000000..3541acd1 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/requirements.txt @@ -0,0 +1,7 @@ +pytest +pytest-xdist +pytest-mock +pytest-cov +pytest-forked +coverage==4.5.4 +mock diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt new file mode 100644 index 00000000..adc32988 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt @@ -0,0 +1,10 @@ +plugins/modules/device.py validate-modules:missing-gplv3-license +plugins/modules/sdc.py validate-modules:missing-gplv3-license +plugins/modules/sds.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/protection_domain.py validate-modules:missing-gplv3-license +plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license +plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt new file mode 100644 index 00000000..adc32988 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt @@ -0,0 +1,10 @@ +plugins/modules/device.py validate-modules:missing-gplv3-license +plugins/modules/sdc.py validate-modules:missing-gplv3-license +plugins/modules/sds.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/protection_domain.py validate-modules:missing-gplv3-license +plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license +plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt new file mode 100644 index 00000000..adc32988 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt @@ -0,0 +1,10 @@ +plugins/modules/device.py validate-modules:missing-gplv3-license +plugins/modules/sdc.py validate-modules:missing-gplv3-license +plugins/modules/sds.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/protection_domain.py validate-modules:missing-gplv3-license +plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license +plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/powerflex/tests/unit/__init__.py b/ansible_collections/dellemc/powerflex/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/__init__.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_api_exception.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_api_exception.py new file mode 100644 index 00000000..5128e54b --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_api_exception.py @@ -0,0 +1,14 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock ApiException for Dell Technologies (Dell) PowerFlex Test modules""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockApiException(Exception): + body = "PyPowerFlex Error message" + status = "500" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py new file mode 100644 index 00000000..9af1ac7f --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py @@ -0,0 +1,235 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of info module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_volume_api import MockVolumeApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_replication_consistency_group_api \ + import MockReplicationConsistencyGroupApi + + +__metaclass__ = type + + +class MockInfoApi: + INFO_COMMON_ARGS = { + "hostname": "**.***.**.***", + "gather_subset": [], + "filters": None + } + + DUMMY_IP = 'xx.xx.xx.xx' + INFO_ARRAY_DETAILS = [ + { + 'systemVersionName': 'DellEMC PowerFlex Version', + 'perfProfile': 'Compact', + 'authenticationMethod': 'Native', + 'capacityAlertHighThresholdPercent': 80, + 'capacityAlertCriticalThresholdPercent': 90, + 'upgradeState': 'NoUpgrade', + 'remoteReadOnlyLimitState': False, + 'mdmManagementPort': 6611, + 'mdmExternalPort': 7611, + 'sdcMdmNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 800, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 4000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 20000, + 'windowSizeInSec': 86400 + } + }, + 'sdcMemoryAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcSocketAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcLongOperationsCounterParameters': { + 'shortWindow': { + 'threshold': 10000, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 100000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 1000000, + 'windowSizeInSec': 86400 + } + }, + 'cliPasswordAllowed': True, + 'managementClientSecureCommunicationEnabled': True, + 'tlsVersion': 'TLSv1.2', + 'showGuid': True, + 'defragmentationEnabled': True, + 'mdmSecurityPolicy': 'None', + 'mdmCluster': { + 'clusterState': 'ClusteredNormal', + 'clusterMode': 'ThreeNodes', + 'slaves': [ + { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': '', + 'virtualInterfaces': [ + '' + ], + 'opensslVersion': 'OpenSSL 26 Jan 2017', + 'role': 'Manager', + 'status': 'Normal', + 'name': 'test_node1_MDM', + 'id': 'test_id_1', + 'port': 0000 + } + ], + 'goodNodesNum': 3, + 'master': { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': 'R3_6.0.0', + 'virtualInterfaces': [ + 'ens192' + ], + 'opensslVersion': 'OpenSSL26 Jan 2017', + 'role': 'Manager', + 'status': 'Normal', + 'name': 'test_node_0', + 'id': 'test_id_2', + 'port': 0000 + }, + 'tieBreakers': [ + { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': '', + 'opensslVersion': 'N/A', + 'role': 'TieBreaker', + 'status': 'Normal', + 'id': 'test_id_3', + 'port': 0000 + } + ], + 'goodReplicasNum': 2, + 'id': '' + }, + 'sdcSdsConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'addressSpaceUsage': 'Normal', + 'lastUpgradeTime': 0, + 'sdcSdrConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'sdrSdsConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'isInitialLicense': False, + 'capacityTimeLeftInDays': '253', + 'swid': 'abcdXXX', + 'installId': 'id_111', + 'restrictedSdcModeEnabled': False, + 'restrictedSdcMode': 'None', + 'enterpriseFeaturesEnabled': True, + 'daysInstalled': 112, + 'maxCapacityInGb': '5120', + 'id': 'id_222' + } + ] + + INFO_VOLUME_GET_LIST = MockVolumeApi.VOLUME_GET_LIST + + INFO_VOLUME_STATISTICS = { + 'test_vol_id_1': MockVolumeApi.VOLUME_STATISTICS + } + + INFO_STORAGE_POOL_GET_LIST = MockStoragePoolApi.STORAGE_POOL_GET_LIST + + INFO_STORAGE_POOL_STATISTICS = { + 'test_pool_id_1': MockStoragePoolApi.STORAGE_POOL_STATISTICS + } + + RCG_LIST = MockReplicationConsistencyGroupApi.get_rcg_details() + + @staticmethod + def get_exception_response(response_type): + if response_type == 'volume_get_details': + return "Get volumes list from powerflex array failed with error " + elif response_type == 'sp_get_details': + return "Get storage pool list from powerflex array failed with error " + elif response_type == 'rcg_get_details': + return "Get replication consistency group list from powerflex array failed with error " diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_mdm_cluster_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_mdm_cluster_api.py new file mode 100644 index 00000000..e2966fad --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_mdm_cluster_api.py @@ -0,0 +1,403 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of MDM cluster module on PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockMdmClusterApi: + MODULE_PATH = 'ansible_collections.dellemc.powerflex.plugins.modules.mdm_cluster.PowerFlexMdmCluster' + MODULE_UTILS_PATH = 'ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils' + + MDM_CLUSTER_COMMON_ARGS = { + "hostname": "**.***.**.***", + "mdm_id": None, + "mdm_name": None, + "mdm_new_name": None, + "performance_profile": None, + "standby_mdm": None, + "is_primary": None, + "cluster_mode": None, + "mdm": None, + "mdm_state": None, + "virtual_ip_interfaces": None, + "clear_interfaces": None, + 'state': None + } + + MDM_NAME = "mdm_node1" + MDM_NAME_STB_MGR = "mdm_node_mgr" + MDM_ID = "5908d328581d1401" + STB_TB_MDM_ID = "5908d328581d1403" + STB_MGR_MDM_ID = "36279b98215e5a04" + IP_1 = "10.x.y.z" + IP_2 = "10.x.x.z" + IP_3 = "10.x.z.z" + IP_4 = "10.x.y.y" + SSL_VERSION = "OpenSSL 1.0.2k-fips 26 Jan 2017" + SYS_VERSION = "DellEMC PowerFlex Version: R3_6.0.354" + + THREE_MDM_CLUSTER_DETAILS = { + "clusterState": "ClusteredNormal", + "clusterMode": "ThreeNodes", + "goodNodesNum": 3, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_2 + ], + "ips": [ + IP_2 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm1", + "id": MDM_ID, + "port": 9011 + } + ], + "tieBreakers": [ + { + "managementIPs": [], + "ips": [ + IP_4 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "standbyMDMs": [ + { + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "name": MDM_NAME, + "id": STB_TB_MDM_ID, + "port": 9011 + }, + { + "virtualInterfaces": [ + "ens12" + ], + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "Manager", + "status": "Normal", + "name": MDM_NAME_STB_MGR, + "id": STB_MGR_MDM_ID, + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } + + THREE_MDM_CLUSTER_DETAILS_2 = { + "clusterState": "ClusteredNormal", + "clusterMode": "ThreeNodes", + "goodNodesNum": 3, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_2 + ], + "ips": [ + IP_2 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm1", + "id": MDM_ID, + "port": 9011 + } + ], + "tieBreakers": [ + { + "managementIPs": [], + "ips": [ + IP_4 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } + + FIVE_MDM_CLUSTER_DETAILS = { + "clusterState": "ClusteredNormal", + "clusterMode": "FiveNodes", + "goodNodesNum": 5, + "master": { + "virtualInterfaces": [ + "ens1" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm", + "id": "5908d328581d1400", + "port": 9011 + }, + "perfProfile": "HighPerformance", + "slaves": [ + { + "virtualInterfaces": [], + "managementIPs": [ + IP_2 + ], + "ips": [ + IP_2 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": SSL_VERSION, + "role": "Manager", + "status": "Normal", + "name": "sample_mdm11", + "id": MDM_ID, + "port": 9011 + }, + { + "virtualInterfaces": [ + "ens12" + ], + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "Manager", + "status": "Normal", + "name": MDM_NAME_STB_MGR, + "id": STB_MGR_MDM_ID, + "port": 9011 + } + ], + "tieBreakers": [ + { + "managementIPs": [ + IP_3 + ], + "ips": [ + IP_3 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "name": MDM_NAME, + "id": STB_TB_MDM_ID, + "port": 9011 + }, + { + "managementIPs": [], + "ips": [ + IP_4 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "TieBreaker", + "status": "Normal", + "id": "5908d328581d1402", + "port": 9011 + } + ], + "standbyMDMs": [ + { + "virtualInterfaces": [ + "ens13" + ], + "managementIPs": [ + IP_1 + ], + "ips": [ + IP_1 + ], + "versionInfo": "R3_6.0.0", + "opensslVersion": "N/A", + "role": "Manager", + "status": "Normal", + "name": "mgr_node_2", + "id": "5120af354fb17305", + "port": 9011 + } + ], + "goodReplicasNum": 2, + "id": "cdd883cf00000002" + } + PARTIAL_SYSTEM_DETAILS = [ + { + "systemVersionName": SYS_VERSION, + "perfProfile": "Compact", + "name": "System:3c567fd2298f020f", + "id": "3c567fd2298f020f" + }, + { + "systemVersionName": SYS_VERSION, + "perfProfile": "Compact", + "name": "System:3c567fd2298f0201", + "id": "3c567fd2298f0201" + } + ] + PARTIAL_SYSTEM_DETAILS_1 = [ + { + "systemVersionName": SYS_VERSION, + "perfProfile": "Compact", + "name": "System:3c567fd2298f020f", + "id": "3c567fd2298f020f" + } + ] + + @staticmethod + def get_failed_response(): + return "Failed to get the MDM cluster with error" + + @staticmethod + def rename_failed_response(): + return "Failed to rename the MDM mdm_node1 with error" + + @staticmethod + def perf_profile_failed_response(): + return "Failed to update performance profile to Compact with error" + + @staticmethod + def virtual_ip_interface_failed_response(): + return "Failed to modify the virtual IP interfaces of MDM 5908d328581d1401 with error" + + @staticmethod + def remove_mdm_failed_response(): + return "Failed to remove the standby MDM 5908d328581d1403 from the MDM cluster with error" + + @staticmethod + def add_mdm_failed_response(): + return "Failed to Add a standby MDM with error" + + @staticmethod + def owner_failed_response(): + return "Failed to update the Owner of MDM cluster to MDM sample_mdm1 with error" + + @staticmethod + def switch_mode_failed_response(): + return "Failed to change the MDM cluster mode with error" + + @staticmethod + def system_failed_response(): + return "Failed to get system id with error" + + @staticmethod + def multiple_system_failed_response(): + return "Multiple systems exist on the given host." + + @staticmethod + def remove_mdm_no_id_name_failed_response(): + return "Either mdm_name or mdm_id is required while removing the standby MDM." + + @staticmethod + def without_standby_failed_response(): + return "No Standby MDMs found. To expand cluster size, first add standby MDMs." + + @staticmethod + def no_cluster_failed_response(): + return "MDM cluster not found" + + @staticmethod + def id_none_interface_failed_response(): + return "Please provide mdm_name/mdm_id to modify virtual IP interfaces the MDM" + + @staticmethod + def id_none_rename_failed_response(): + return "Please provide mdm_name/mdm_id to rename the MDM" + + @staticmethod + def id_none_change_owner_failed_response(): + return "Either mdm_name or mdm_id is required while changing ownership of MDM cluster" + + @staticmethod + def new_name_add_mdm_failed_response(): + return "Parameters mdm_id/mdm_new_name are not allowed while adding a standby MDM" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py new file mode 100644 index 00000000..60452ecd --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py @@ -0,0 +1,68 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of protection domain module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockProtectionDomainApi: + MODULE_PATH = 'ansible_collections.dellemc.powerflex.plugins.modules.protection_domain.PowerFlexProtectionDomain' + MODULE_UTILS_PATH = 'ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils' + + PROTECTION_DOMAIN = { + "protectiondomain": [ + { + "id": "7bd6457000000000", + "name": "test_domain", + "protectionDomainState": "Active", + "overallIoNetworkThrottlingInKbps": 20480, + "rebalanceNetworkThrottlingInKbps": 10240, + "rebuildNetworkThrottlingInKbps": 10240, + "vtreeMigrationNetworkThrottlingInKbps": 10240, + "rfcacheEnabled": "false", + "rfcacheMaxIoSizeKb": 128, + "rfcacheOpertionalMode": "None", + "rfcachePageSizeKb": 64, + "storagePools": [ + { + "id": "8d1cba1700000000", + "name": "pool1" + } + ] + } + ] + } + STORAGE_POOL = { + "storagepool": [ + { + "protectionDomainId": "7bd6457000000000", + "rebuildEnabled": True, + "mediaType": "HDD", + "name": "pool1", + "id": "8d1cba1700000000" + } + ] + } + + @staticmethod + def modify_pd_with_failed_msg(protection_domain_name): + return "Failed to update the rf cache limits of protection domain " + protection_domain_name + " with error " + + @staticmethod + def delete_pd_failed_msg(protection_domain_id): + return "Delete protection domain '" + protection_domain_id + "' operation failed with error ''" + + @staticmethod + def rename_pd_failed_msg(protection_domain_name): + return "Failed to update the protection domain " + protection_domain_name + " with error " + + @staticmethod + def version_pd_failed_msg(): + return "Getting PyPowerFlex SDK version, failed with Error The 'PyPowerFlex' distribution was " \ + "not found and is required by the application" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py new file mode 100644 index 00000000..6671fd87 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py @@ -0,0 +1,70 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of volume module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockReplicationConsistencyGroupApi: + RCG_COMMON_ARGS = { + "hostname": "**.***.**.***", + "rcg_name": None, + "rcg_id": None, + "create_snapshot": None, "new_rcg_name": None, + "rpo": None, "protection_domain_name": None, "protection_domain_id": None, + "activity_mode": None, "pause": None, "pause_mode": None, "freeze": None, + "remote_peer": {"hostname": None, "username": None, "password": None, + "verifycert": None, "port": None, "protection_domain_name": None, + "protection_domain_id": None}, + "target_volume_access_mode": None, "is_consistent": None, + "state": None + } + RCG_ID = "aadc17d500000000" + FAIL_MSG = " failed with error" + + @staticmethod + def get_rcg_details(pause_mode="None", freeze_state="Unfrozen", activity_mode="Active", consistency="Consistent"): + return [{"protectionDomainId": "b969400500000000", + "peerMdmId": "6c3d94f600000000", + "remoteId": "2130961a00000000", + "remoteMdmId": "0e7a082862fedf0f", + "currConsistMode": consistency, + "freezeState": freeze_state, + "lifetimeState": "Normal", + "pauseMode": pause_mode, + "snapCreationInProgress": False, + "lastSnapGroupId": "e58280b300000001", + "lastSnapCreationRc": "SUCCESS", + "targetVolumeAccessMode": "NoAccess", + "remoteProtectionDomainId": "4eeb304600000000", + "remoteProtectionDomainName": "domain1", + "failoverType": "None", + "failoverState": "None", + "activeLocal": True, + "activeRemote": True, + "abstractState": "Ok", + "localActivityState": activity_mode, + "remoteActivityState": "Active", + "inactiveReason": 11, + "rpoInSeconds": 30, + "replicationDirection": "LocalToRemote", + "disasterRecoveryState": "None", + "remoteDisasterRecoveryState": "None", + "error": 65, + "name": "test_rcg", + "type": "User", + "id": "aadc17d500000000"}] + + @staticmethod + def get_exception_response(response_type): + return "Failed to get the replication consistency group " + + @staticmethod + def create_snapshot_exception_response(response_type, rcg_id): + return "Create RCG snapshot for RCG with id " + rcg_id + " operation failed" diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdk_response.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdk_response.py new file mode 100644 index 00000000..9e47f4ba --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdk_response.py @@ -0,0 +1,15 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock SDKResponse for Unit tests for Dell Technologies (Dell) PowerFlex modules""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockSDKResponse: + def __init__(self, data=None, status_code=200): + self.data = data + self.status_code = status_code diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py new file mode 100644 index 00000000..0246b9dd --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py @@ -0,0 +1,467 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of storage pool module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockStoragePoolApi: + STORAGE_POOL_COMMON_ARGS = { + "hostname": "**.***.**.***", + "storage_pool_name": None, + "storage_pool_id": None, + "storage_pool_new_name": None, + "protection_domain_name": None, + "protection_domain_id": None, + "use_rmcache": None, + "use_rfcache": None, + "media_type": None, + 'state': None + } + + STORAGE_POOL_GET_LIST = [ + { + 'protectionDomainId': '4eeb304600000000', + 'rebuildEnabled': True, + 'dataLayout': 'MediumGranularity', + 'persistentChecksumState': 'Protected', + 'addressSpaceUsage': 'Normal', + 'externalAccelerationType': 'None', + 'rebalanceEnabled': True, + 'sparePercentage': 10, + 'rmcacheWriteHandlingMode': 'Cached', + 'checksumEnabled': False, + 'useRfcache': False, + 'compressionMethod': 'Invalid', + 'fragmentationEnabled': True, + 'numOfParallelRebuildRebalanceJobsPerDevice': 2, + 'capacityAlertHighThreshold': 80, + 'capacityAlertCriticalThreshold': 90, + 'capacityUsageState': 'Normal', + 'capacityUsageType': 'NetCapacity', + 'addressSpaceUsageType': 'DeviceCapacityLimit', + 'bgScannerCompareErrorAction': 'ReportAndFix', + 'bgScannerReadErrorAction': 'ReportAndFix', + 'fglExtraCapacity': None, + 'fglOverProvisioningFactor': None, + 'fglWriteAtomicitySize': None, + 'fglMaxCompressionRatio': None, + 'fglPerfProfile': None, + 'replicationCapacityMaxRatio': 0, + 'persistentChecksumEnabled': True, + 'persistentChecksumBuilderLimitKb': 3072, + 'persistentChecksumValidateOnRead': False, + 'useRmcache': False, + 'fglAccpId': None, + 'rebuildIoPriorityPolicy': 'limitNumOfConcurrentIos', + 'rebalanceIoPriorityPolicy': 'favorAppIos', + 'vtreeMigrationIoPriorityPolicy': 'favorAppIos', + 'protectedMaintenanceModeIoPriorityPolicy': 'limitNumOfConcurrentIos', + 'rebuildIoPriorityNumOfConcurrentIosPerDevice': 1, + 'rebalanceIoPriorityNumOfConcurrentIosPerDevice': 1, + 'vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice': 1, + 'protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice': 1, + 'rebuildIoPriorityBwLimitPerDeviceInKbps': 10240, + 'rebalanceIoPriorityBwLimitPerDeviceInKbps': 10240, + 'vtreeMigrationIoPriorityBwLimitPerDeviceInKbps': 10240, + 'protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps': 10240, + 'rebuildIoPriorityAppIopsPerDeviceThreshold': None, + 'rebalanceIoPriorityAppIopsPerDeviceThreshold': None, + 'vtreeMigrationIoPriorityAppIopsPerDeviceThreshold': None, + 'protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold': None, + 'rebuildIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'rebalanceIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'rebuildIoPriorityQuietPeriodInMsec': None, + 'rebalanceIoPriorityQuietPeriodInMsec': None, + 'vtreeMigrationIoPriorityQuietPeriodInMsec': None, + 'protectedMaintenanceModeIoPriorityQuietPeriodInMsec': None, + 'zeroPaddingEnabled': True, + 'backgroundScannerMode': 'DataComparison', + 'backgroundScannerBWLimitKBps': 3072, + 'fglMetadataSizeXx100': None, + 'fglNvdimmWriteCacheSizeInMb': None, + 'fglNvdimmMetadataAmortizationX100': None, + 'mediaType': 'HDD', + 'name': 'test_pool', + 'id': 'test_pool_id_1' + } + ] + + STORAGE_POOL_STATISTICS = { + 'backgroundScanFixedReadErrorCount': 0, + 'pendingMovingOutBckRebuildJobs': 0, + 'degradedHealthyCapacityInKb': 0, + 'activeMovingOutFwdRebuildJobs': 0, + 'bckRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglUncompressedDataSizeInKb': 0, + 'primaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'BackgroundScannedInMB': 3209584, + 'volumeIds': [ + 'test_vol_id_1' + ], + 'maxUserDataCapacityInKb': 761204736, + 'persistentChecksumBuilderProgress': 100.0, + 'rfcacheReadsSkippedAlignedSizeTooLarge': 0, + 'pendingMovingInRebalanceJobs': 0, + 'rfcacheWritesSkippedHeavyLoad': 0, + 'unusedCapacityInKb': 761204736, + 'userDataSdcReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'totalReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfDeviceAtFaultRebuilds': 0, + 'totalWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'persistentChecksumCapacityInKb': 414720, + 'rmPendingAllocatedInKb': 0, + 'numOfVolumes': 1, + 'rfcacheIosOutstanding': 0, + 'capacityAvailableForVolumeAllocationInKb': 377487360, + 'numOfMappedToAllVolumes': 0, + 'netThinUserDataCapacityInKb': 0, + 'backgroundScanFixedCompareErrorCount': 0, + 'volMigrationWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinAndSnapshotRatio': 'Infinity', + 'fglUserDataCapacityInKb': 0, + 'pendingMovingInEnterProtectedMaintenanceModeJobs': 0, + 'activeMovingInNormRebuildJobs': 0, + 'aggregateCompressionLevel': 'Uncompressed', + 'targetOtherLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netUserDataCapacityInKb': 0, + 'pendingMovingOutExitProtectedMaintenanceModeJobs': 0, + 'overallUsageRatio': 'Infinity', + 'volMigrationReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netCapacityInUseNoOverheadInKb': 0, + 'pendingMovingInBckRebuildJobs': 0, + 'rfcacheReadsSkippedInternalError': 0, + 'activeBckRebuildCapacityInKb': 0, + 'rebalanceCapacityInKb': 0, + 'pendingMovingInExitProtectedMaintenanceModeJobs': 0, + 'rfcacheReadsSkippedLowResources': 0, + 'rplJournalCapAllowed': 0, + 'thinCapacityInUseInKb': 0, + 'userDataSdcTrimLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeMovingInEnterProtectedMaintenanceModeJobs': 0, + 'rfcacheWritesSkippedInternalError': 0, + 'netUserDataCapacityNoTrimInKb': 0, + 'rfcacheWritesSkippedCacheMiss': 0, + 'degradedFailedCapacityInKb': 0, + 'activeNormRebuildCapacityInKb': 0, + 'fglSparesInKb': 0, + 'snapCapacityInUseInKb': 0, + 'numOfMigratingVolumes': 0, + 'compressionRatio': 0.0, + 'rfcacheWriteMiss': 0, + 'primaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'migratingVtreeIds': [ + ], + 'numOfVtrees': 1, + 'userDataCapacityNoTrimInKb': 0, + 'rfacheReadHit': 0, + 'compressedDataCompressionRatio': 0.0, + 'rplUsedJournalCap': 0, + 'pendingMovingCapacityInKb': 0, + 'numOfSnapshots': 0, + 'pendingFwdRebuildCapacityInKb': 0, + 'tempCapacityInKb': 0, + 'totalFglMigrationSizeInKb': 0, + 'normRebuildCapacityInKb': 0, + 'logWrittenBlocksInKb': 0, + 'primaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfThickBaseVolumes': 0, + 'enterProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeRebalanceCapacityInKb': 0, + 'numOfReplicationJournalVolumes': 0, + 'rfcacheReadsSkippedLockIos': 0, + 'unreachableUnusedCapacityInKb': 0, + 'netProvisionedAddressesInKb': 0, + 'trimmedUserDataCapacityInKb': 0, + 'provisionedAddressesInKb': 0, + 'numOfVolumesInDeletion': 0, + 'pendingMovingOutFwdRebuildJobs': 0, + 'maxCapacityInKb': 845783040, + 'rmPendingThickInKb': 0, + 'protectedCapacityInKb': 0, + 'secondaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'normRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinCapacityAllocatedInKb': 16777216, + 'netFglUserDataCapacityInKb': 0, + 'metadataOverheadInKb': 0, + 'thinCapacityAllocatedInKm': 16777216, + 'rebalanceWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'primaryVacInKb': 8388608, + 'deviceIds': [ + 'dv_id_1', + 'dv_id_2', + 'dv_id_3' + ], + 'netSnapshotCapacityInKb': 0, + 'secondaryVacInKb': 8388608, + 'numOfDevices': 3, + 'rplTotalJournalCap': 0, + 'failedCapacityInKb': 0, + 'netMetadataOverheadInKb': 0, + 'activeMovingOutBckRebuildJobs': 0, + 'rfcacheReadsFromCache': 0, + 'activeMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'enterProtectedMaintenanceModeCapacityInKb': 0, + 'pendingMovingInNormRebuildJobs': 0, + 'failedVacInKb': 0, + 'primaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'fglUncompressedDataSizeInKb': 0, + 'fglCompressedDataSizeInKb': 0, + 'pendingRebalanceCapacityInKb': 0, + 'rfcacheAvgReadTime': 0, + 'semiProtectedCapacityInKb': 0, + 'pendingMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'mgUserDdataCcapacityInKb': 0, + 'snapshotCapacityInKb': 0, + 'netMgUserDataCapacityInKb': 0, + 'fwdRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesReceived': 0, + 'netUnusedCapacityInKb': 380602368, + 'thinUserDataCapacityInKb': 0, + 'protectedVacInKb': 16777216, + 'activeMovingRebalanceJobs': 0, + 'bckRebuildCapacityInKb': 0, + 'activeMovingInFwdRebuildJobs': 0, + 'netTrimmedUserDataCapacityInKb': 0, + 'pendingMovingRebalanceJobs': 0, + 'numOfMarkedVolumesForReplication': 0, + 'degradedHealthyVacInKb': 0, + 'semiProtectedVacInKb': 0, + 'userDataReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingBckRebuildCapacityInKb': 0, + 'capacityLimitInKb': 845783040, + 'vtreeIds': [ + 'vtree_id_1' + ], + 'activeMovingCapacityInKb': 1, + 'targetWriteLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'pendingExitProtectedMaintenanceModeCapacityInKb': 1, + 'rfcacheIosSkipped': 1, + 'userDataWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inMaintenanceVacInKb': 1, + 'exitProtectedMaintenanceModeReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'netFglSparesInKb': 1, + 'rfcacheReadsSkipped': 1, + 'activeExitProtectedMaintenanceModeCapacityInKb': 1, + 'activeMovingOutExitProtectedMaintenanceModeJobs': 1, + 'numOfUnmappedVolumes': 2, + 'tempCapacityVacInKb': 1, + 'volumeAddressSpaceInKb': 80000, + 'currentFglMigrationSizeInKb': 1, + 'rfcacheWritesSkippedMaxIoSize': 1, + 'netMaxUserDataCapacityInKb': 380600000, + 'numOfMigratingVtrees': 1, + 'atRestCapacityInKb': 1, + 'rfacheWriteHit': 1, + 'bckRebuildReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheSourceDeviceWrites': 1, + 'spareCapacityInKb': 84578000, + 'enterProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheIoErrors': 1, + 'inaccessibleCapacityInKb': 1, + 'normRebuildWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'capacityInUseInKb': 1, + 'rebalanceReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheReadsSkippedMaxIoSize': 1, + 'activeMovingInExitProtectedMaintenanceModeJobs': 1, + 'secondaryReadFromDevBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'secondaryReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheWritesSkippedStuckIo': 1, + 'secondaryReadFromRmcacheBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inMaintenanceCapacityInKb': 1, + 'exposedCapacityInKb': 1, + 'netFglCompressedDataSizeInKb': 1, + 'userDataSdcWriteLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inUseVacInKb': 16777000, + 'fwdRebuildCapacityInKb': 1, + 'thickCapacityInUseInKb': 1, + 'backgroundScanReadErrorCount': 1, + 'activeMovingInRebalanceJobs': 1, + 'migratingVolumeIds': [ + '1xxx' + ], + 'rfcacheWritesSkippedLowResources': 1, + 'capacityInUseNoOverheadInKb': 1, + 'exitProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheSkippedUnlinedWrite': 1, + 'netCapacityInUseInKb': 1, + 'numOfOutgoingMigrations': 1, + 'rfcacheAvgWriteTime': 1, + 'pendingNormRebuildCapacityInKb': 1, + 'pendingMovingOutNormrebuildJobs': 1, + 'rfcacheSourceDeviceReads': 1, + 'rfcacheReadsPending': 1, + 'volumeAllocationLimitInKb': 3791650000, + 'rfcacheReadsSkippedHeavyLoad': 1, + 'fwdRebuildWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheReadMiss': 1, + 'targetReadLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'userDataCapacityInKb': 1, + 'activeMovingInBckRebuildJobs': 1, + 'movingCapacityInKb': 1, + 'activeEnterProtectedMaintenanceModeCapacityInKb': 1, + 'backgroundScanCompareErrorCount': 1, + 'pendingMovingInFwdRebuildJobs': 1, + 'rfcacheReadsReceived': 1, + 'spSdsIds': [ + 'sp_id_1', + 'sp_id_2', + 'sp_id_3' + ], + 'pendingEnterProtectedMaintenanceModeCapacityInKb': 1, + 'vtreeAddresSpaceInKb': 8388000, + 'snapCapacityInUseOccupiedInKb': 1, + 'activeFwdRebuildCapacityInKb': 1, + 'rfcacheReadsSkippedStuckIo': 1, + 'activeMovingOutNormRebuildJobs': 1, + 'rfcacheWritePending': 1, + 'numOfThinBaseVolumes': 2, + 'degradedFailedVacInKb': 1, + 'userDataTrimBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'numOfIncomingVtreeMigrations': 1 + } + + @staticmethod + def get_exception_response(response_type): + if response_type == 'get_details': + return "Failed to get the storage pool test_pool with error " diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py new file mode 100644 index 00000000..b05cc84d --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py @@ -0,0 +1,548 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of volume module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi + +__metaclass__ = type + + +class MockVolumeApi: + VOLUME_COMMON_ARGS = { + "hostname": "**.***.**.***", + "vol_name": None, + "vol_id": None, + "vol_type": None, + "compression_type": None, + "storage_pool_name": None, + "storage_pool_id": None, + "protection_domain_name": None, + "protection_domain_id": None, + "snapshot_policy_name": None, + "snapshot_policy_id": None, + "auto_snap_remove_type": None, + "use_rmcache": None, + "size": None, + "cap_unit": None, + "vol_new_name": None, + "sdc": {}, + "sdc_state": None, + "delete_snapshots": None, + "state": None + } + + VOLUME_GET_LIST = [ + { + 'storagePoolId': 'test_pool_id_1', + 'dataLayout': 'MediumGranularity', + 'vtreeId': 'vtree_id_1', + 'sizeInKb': 8388608, + 'snplIdOfAutoSnapshot': None, + 'volumeType': 'ThinProvisioned', + 'consistencyGroupId': None, + 'ancestorVolumeId': None, + 'notGenuineSnapshot': False, + 'accessModeLimit': 'ReadWrite', + 'secureSnapshotExpTime': 0, + 'useRmcache': False, + 'managedBy': 'ScaleIO', + 'lockedAutoSnapshot': False, + 'lockedAutoSnapshotMarkedForRemoval': False, + 'autoSnapshotGroupId': None, + 'compressionMethod': 'Invalid', + 'pairIds': None, + 'timeStampIsAccurate': False, + 'mappedSdcInfo': None, + 'originalExpiryTime': 0, + 'retentionLevels': [ + ], + 'snplIdOfSourceVolume': None, + 'volumeReplicationState': 'UnmarkedForReplication', + 'replicationJournalVolume': False, + 'replicationTimeStamp': 0, + 'creationTime': 1655878090, + 'name': 'testing', + 'id': 'test_id_1' + } + ] + + VOLUME_STORAGEPOOL_DETAILS = MockStoragePoolApi.STORAGE_POOL_GET_LIST[0] + + VOLUME_PD_DETAILS = { + 'rebalanceNetworkThrottlingEnabled': False, + 'vtreeMigrationNetworkThrottlingEnabled': False, + 'overallIoNetworkThrottlingEnabled': False, + 'rfcacheEnabled': True, + 'rfcacheAccpId': None, + 'rebuildNetworkThrottlingEnabled': False, + 'sdrSdsConnectivityInfo': { + 'clientServerConnStatus': 'CLIENT_SERVER_CONN_STATUS_ALL_CONNECTED', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'protectionDomainState': 'Active', + 'rebuildNetworkThrottlingInKbps': None, + 'rebalanceNetworkThrottlingInKbps': None, + 'overallIoNetworkThrottlingInKbps': None, + 'vtreeMigrationNetworkThrottlingInKbps': None, + 'sdsDecoupledCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdsConfigurationFailureCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'mdmSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdsSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'rfcacheOpertionalMode': 'WriteMiss', + 'rfcachePageSizeKb': 64, + 'rfcacheMaxIoSizeKb': 128, + 'sdsReceiveBufferAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 20000, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 200000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 2000000, + 'windowSizeInSec': 86400 + } + }, + 'fglDefaultNumConcurrentWrites': 1000, + 'fglMetadataCacheEnabled': False, + 'fglDefaultMetadataCacheSize': 0, + 'protectedMaintenanceModeNetworkThrottlingEnabled': False, + 'protectedMaintenanceModeNetworkThrottlingInKbps': None, + 'rplCapAlertLevel': 'normal', + 'systemId': 'syst_id_1', + 'name': 'domain1', + 'id': '4eeb304600000000', + } + + VOLUME_STATISTICS = { + 'backgroundScanFixedReadErrorCount': 0, + 'pendingMovingOutBckRebuildJobs': 0, + 'degradedHealthyCapacityInKb': 0, + 'activeMovingOutFwdRebuildJobs': 0, + 'bckRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglUncompressedDataSizeInKb': 0, + 'primaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'BackgroundScannedInMB': 3209584, + 'volumeIds': [ + '456ad22e00000003' + ], + 'maxUserDataCapacityInKb': 761204736, + 'persistentChecksumBuilderProgress': 100.0, + 'rfcacheReadsSkippedAlignedSizeTooLarge': 0, + 'pendingMovingInRebalanceJobs': 0, + 'rfcacheWritesSkippedHeavyLoad': 0, + 'unusedCapacityInKb': 761204736, + 'userDataSdcReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'totalReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfDeviceAtFaultRebuilds': 0, + 'totalWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'persistentChecksumCapacityInKb': 414720, + 'rmPendingAllocatedInKb': 0, + 'numOfVolumes': 1, + 'rfcacheIosOutstanding': 0, + 'capacityAvailableForVolumeAllocationInKb': 377487360, + 'numOfMappedToAllVolumes': 0, + 'netThinUserDataCapacityInKb': 0, + 'backgroundScanFixedCompareErrorCount': 0, + 'volMigrationWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinAndSnapshotRatio': 'Infinity', + 'fglUserDataCapacityInKb': 0, + 'pendingMovingInEnterProtectedMaintenanceModeJobs': 0, + 'activeMovingInNormRebuildJobs': 0, + 'aggregateCompressionLevel': 'Uncompressed', + 'targetOtherLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netUserDataCapacityInKb': 0, + 'pendingMovingOutExitProtectedMaintenanceModeJobs': 0, + 'overallUsageRatio': 'Infinity', + 'volMigrationReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netCapacityInUseNoOverheadInKb': 0, + 'pendingMovingInBckRebuildJobs': 0, + 'rfcacheReadsSkippedInternalError': 0, + 'activeBckRebuildCapacityInKb': 0, + 'rebalanceCapacityInKb': 0, + 'pendingMovingInExitProtectedMaintenanceModeJobs': 0, + 'rfcacheReadsSkippedLowResources': 0, + 'rplJournalCapAllowed': 0, + 'thinCapacityInUseInKb': 0, + 'userDataSdcTrimLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeMovingInEnterProtectedMaintenanceModeJobs': 0, + 'rfcacheWritesSkippedInternalError': 0, + 'netUserDataCapacityNoTrimInKb': 0, + 'rfcacheWritesSkippedCacheMiss': 0, + 'degradedFailedCapacityInKb': 0, + 'activeNormRebuildCapacityInKb': 0, + 'fglSparesInKb': 0, + 'snapCapacityInUseInKb': 0, + 'numOfMigratingVolumes': 0, + 'compressionRatio': 0.0, + 'rfcacheWriteMiss': 0, + 'primaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'migratingVtreeIds': [ + ], + 'numOfVtrees': 1, + 'userDataCapacityNoTrimInKb': 0, + 'rfacheReadHit': 0, + 'compressedDataCompressionRatio': 0.0, + 'rplUsedJournalCap': 0, + 'pendingMovingCapacityInKb': 0, + 'numOfSnapshots': 0, + 'pendingFwdRebuildCapacityInKb': 0, + 'tempCapacityInKb': 0, + 'totalFglMigrationSizeInKb': 0, + 'normRebuildCapacityInKb': 0, + 'logWrittenBlocksInKb': 0, + 'primaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfThickBaseVolumes': 0, + 'enterProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeRebalanceCapacityInKb': 0, + 'numOfReplicationJournalVolumes': 0, + 'rfcacheReadsSkippedLockIos': 0, + 'unreachableUnusedCapacityInKb': 0, + 'netProvisionedAddressesInKb': 0, + 'trimmedUserDataCapacityInKb': 0, + 'provisionedAddressesInKb': 0, + 'numOfVolumesInDeletion': 0, + 'pendingMovingOutFwdRebuildJobs': 0, + 'maxCapacityInKb': 845783040, + 'rmPendingThickInKb': 0, + 'protectedCapacityInKb': 0, + 'secondaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'normRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinCapacityAllocatedInKb': 16777216, + 'netFglUserDataCapacityInKb': 0, + 'metadataOverheadInKb': 0, + 'thinCapacityAllocatedInKm': 16777216, + 'rebalanceWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'primaryVacInKb': 8388608, + 'deviceIds': [ + 'bbd7580800030001', + 'bbd4580a00040001', + 'bbd5580b00050001' + ], + 'netSnapshotCapacityInKb': 0, + 'secondaryVacInKb': 8388608, + 'numOfDevices': 3, + 'rplTotalJournalCap': 0, + 'failedCapacityInKb': 0, + 'netMetadataOverheadInKb': 0, + 'activeMovingOutBckRebuildJobs': 0, + 'rfcacheReadsFromCache': 0, + 'activeMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'enterProtectedMaintenanceModeCapacityInKb': 0, + 'pendingMovingInNormRebuildJobs': 0, + 'failedVacInKb': 0, + 'primaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'fglUncompressedDataSizeInKb': 0, + 'fglCompressedDataSizeInKb': 0, + 'pendingRebalanceCapacityInKb': 0, + 'rfcacheAvgReadTime': 0, + 'semiProtectedCapacityInKb': 0, + 'pendingMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'mgUserDdataCcapacityInKb': 0, + 'snapshotCapacityInKb': 0, + 'netMgUserDataCapacityInKb': 0, + 'fwdRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesReceived': 0, + 'netUnusedCapacityInKb': 380602368, + 'thinUserDataCapacityInKb': 0, + 'protectedVacInKb': 16777216, + 'activeMovingRebalanceJobs': 0, + 'bckRebuildCapacityInKb': 0, + 'activeMovingInFwdRebuildJobs': 0, + 'netTrimmedUserDataCapacityInKb': 0, + 'pendingMovingRebalanceJobs': 0, + 'numOfMarkedVolumesForReplication': 0, + 'degradedHealthyVacInKb': 0, + 'semiProtectedVacInKb': 0, + 'userDataReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingBckRebuildCapacityInKb': 0, + 'capacityLimitInKb': 845783040, + 'vtreeIds': [ + '32b13de900000003' + ], + 'activeMovingCapacityInKb': 0, + 'targetWriteLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingExitProtectedMaintenanceModeCapacityInKb': 0, + 'rfcacheIosSkipped': 0, + 'userDataWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inMaintenanceVacInKb': 0, + 'exitProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglSparesInKb': 0, + 'rfcacheReadsSkipped': 0, + 'activeExitProtectedMaintenanceModeCapacityInKb': 0, + 'activeMovingOutExitProtectedMaintenanceModeJobs': 0, + 'numOfUnmappedVolumes': 1, + 'tempCapacityVacInKb': 0, + 'volumeAddressSpaceInKb': 8388608, + 'currentFglMigrationSizeInKb': 0, + 'rfcacheWritesSkippedMaxIoSize': 0, + 'netMaxUserDataCapacityInKb': 380602368, + 'numOfMigratingVtrees': 0, + 'atRestCapacityInKb': 0, + 'rfacheWriteHit': 0, + 'bckRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheSourceDeviceWrites': 0, + 'spareCapacityInKb': 84578304, + 'enterProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheIoErrors': 0, + 'inaccessibleCapacityInKb': 0, + 'normRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'capacityInUseInKb': 0, + 'rebalanceReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheReadsSkippedMaxIoSize': 0, + 'activeMovingInExitProtectedMaintenanceModeJobs': 0, + 'secondaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'secondaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesSkippedStuckIo': 0, + 'secondaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inMaintenanceCapacityInKb': 0, + 'exposedCapacityInKb': 0, + 'netFglCompressedDataSizeInKb': 0, + 'userDataSdcWriteLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inUseVacInKb': 16777216, + 'fwdRebuildCapacityInKb': 0, + 'thickCapacityInUseInKb': 0, + 'backgroundScanReadErrorCount': 0, + 'activeMovingInRebalanceJobs': 0, + 'migratingVolumeIds': [ + ], + 'rfcacheWritesSkippedLowResources': 0, + 'capacityInUseNoOverheadInKb': 0, + 'exitProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheSkippedUnlinedWrite': 0, + 'netCapacityInUseInKb': 0, + 'numOfOutgoingMigrations': 0, + 'rfcacheAvgWriteTime': 0, + 'pendingNormRebuildCapacityInKb': 0, + 'pendingMovingOutNormrebuildJobs': 0, + 'rfcacheSourceDeviceReads': 0, + 'rfcacheReadsPending': 0, + 'volumeAllocationLimitInKb': 3791650816, + 'rfcacheReadsSkippedHeavyLoad': 0, + 'fwdRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheReadMiss': 0, + 'targetReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'userDataCapacityInKb': 0, + 'activeMovingInBckRebuildJobs': 0, + 'movingCapacityInKb': 0, + 'activeEnterProtectedMaintenanceModeCapacityInKb': 0, + 'backgroundScanCompareErrorCount': 0, + 'pendingMovingInFwdRebuildJobs': 0, + 'rfcacheReadsReceived': 0, + 'spSdsIds': [ + 'abdfe71b00030001', + 'abdce71d00040001', + 'abdde71e00050001' + ], + 'pendingEnterProtectedMaintenanceModeCapacityInKb': 0, + 'vtreeAddresSpaceInKb': 8388608, + 'snapCapacityInUseOccupiedInKb': 0, + 'activeFwdRebuildCapacityInKb': 0, + 'rfcacheReadsSkippedStuckIo': 0, + 'activeMovingOutNormRebuildJobs': 0, + 'rfcacheWritePending': 0, + 'numOfThinBaseVolumes': 1, + 'degradedFailedVacInKb': 0, + 'userDataTrimBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfIncomingVtreeMigrations': 0 + } + + @staticmethod + def get_exception_response(response_type): + if response_type == 'get_details': + return "Failed to get the volume test_id_1 with error " diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/__init__.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py new file mode 100644 index 00000000..1af574da --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py @@ -0,0 +1,130 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for info module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_info_api import MockInfoApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.info import PowerFlexInfo + + +class TestPowerflexInfo(): + + get_module_args = MockInfoApi.INFO_COMMON_ARGS + + @pytest.fixture + def info_module_mock(self, mocker): + info_module_mock = PowerFlexInfo() + info_module_mock.module.check_mode = False + info_module_mock.powerflex_conn.system.api_version = MagicMock( + return_value=3.5 + ) + info_module_mock.powerflex_conn.system.get = MagicMock( + return_value=MockInfoApi.INFO_ARRAY_DETAILS + ) + return info_module_mock + + def test_get_volume_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['vol'] + }) + info_module_mock.module.params = self.get_module_args + volume_resp = MockInfoApi.INFO_VOLUME_GET_LIST + info_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_stat_resp = MockInfoApi.INFO_VOLUME_STATISTICS + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes = MagicMock( + return_value=volume_stat_resp + ) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.volume.get.assert_called() + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes.assert_called() + + def test_get_volume_details_with_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['vol'] + }) + info_module_mock.module.params = self.get_module_args + volume_resp = MockInfoApi.INFO_VOLUME_GET_LIST + info_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('volume_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_sp_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['storage_pool'] + }) + info_module_mock.module.params = self.get_module_args + sp_resp = MockInfoApi.INFO_STORAGE_POOL_GET_LIST + info_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=sp_resp + ) + sp_stat_resp = MockInfoApi.INFO_STORAGE_POOL_STATISTICS + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools = MagicMock( + return_value=sp_stat_resp + ) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.storage_pool.get.assert_called() + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools.assert_called() + + def test_get_sp_details_with_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['storage_pool'] + }) + info_module_mock.module.params = self.get_module_args + sp_resp = MockInfoApi.INFO_STORAGE_POOL_GET_LIST + info_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=sp_resp + ) + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('sp_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_rcg_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['rcg'] + }) + info_module_mock.module.params = self.get_module_args + rcg_resp = MockInfoApi.RCG_LIST + info_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=rcg_resp) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.replication_consistency_group.get.assert_called() + + def test_get_rcg_details_throws_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['rcg'] + }) + info_module_mock.module.params = self.get_module_args + info_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('rcg_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py new file mode 100644 index 00000000..f8f3cdc2 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_mdm_cluster.py @@ -0,0 +1,636 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for MDM cluster module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_mdm_cluster_api import MockMdmClusterApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.mdm_cluster import PowerFlexMdmCluster + + +class TestPowerflexMDMCluster(): + + get_module_args = MockMdmClusterApi.MDM_CLUSTER_COMMON_ARGS + add_mdm_ip = "xx.3x.xx.xx" + + @pytest.fixture + def mdm_cluster_module_mock(self, mocker): + mocker.patch(MockMdmClusterApi.MODULE_UTILS_PATH + '.PowerFlexClient', new=MockApiException) + mdm_cluster_module_mock = PowerFlexMdmCluster() + mdm_cluster_module_mock.module.check_mode = False + return mdm_cluster_module_mock + + def test_get_mdm_cluster(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details.assert_called() + + def test_get_mdm_cluster_with_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.get_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details.assert_called() + + def test_rename_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_new_name": "mdm_node_renamed", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_rename_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_new_name": "mdm_node_renamed", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.rename_mdm.assert_called() + assert MockMdmClusterApi.rename_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_set_performance_profile_mdm_cluster(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "performance_profile": "Compact", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_set_performance_profile_mdm_cluster_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "performance_profile": "Compact", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.set_cluster_mdm_performance_profile.assert_called() + assert MockMdmClusterApi.perf_profile_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_set_virtual_ip_interface_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.MDM_ID, + "virtual_ip_interfaces": ["ens11"], + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_set_virtual_ip_interface_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.MDM_ID, + "virtual_ip_interfaces": ["ens11"], + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface.assert_called() + assert MockMdmClusterApi.virtual_ip_interface_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_set_virtual_ip_interface_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.MDM_ID, + "virtual_ip_interfaces": ["ens1"], + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_remove_standby_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_remove_standby_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "non_existing_node", + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_remove_standby_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.remove_standby_mdm.assert_called() + assert MockMdmClusterApi.remove_mdm_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_add_standby_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "standby_node", + "standby_mdm": { + "mdm_ips": [self.add_mdm_ip], + "role": "Manager", + "port": 9011, + "management_ips": [self.add_mdm_ip], + "virtual_interfaces": ["ens1"], + "allow_multiple_ips": True + }, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS + ) + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_add_standby_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": MockMdmClusterApi.MDM_NAME, + "standby_mdm": { + "mdm_ips": ["10.x.z.z"], + "role": "TieBreaker", + "port": 9011 + }, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_add_standby_mdm_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "standby_node", + "standby_mdm": { + "mdm_ips": [self.add_mdm_ip], + "role": "Manager", + "port": 9011, + "management_ips": [self.add_mdm_ip], + "virtual_interfaces": ["ens1"], + "allow_multiple_ips": True + }, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS + ) + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.add_standby_mdm.assert_called() + assert MockMdmClusterApi.add_mdm_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_change_mdm_cluster_owner(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "sample_mdm1", + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_change_mdm_cluster_owner_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": "5908d328581d1400", + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse( + MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_change_mdm_cluster_owner_execption(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "sample_mdm1", + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.change_mdm_ownership.assert_called() + assert MockMdmClusterApi.owner_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_expand_mdm_cluster_mode(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "mdm_name": None, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_reduce_mdm_cluster_mode_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "ThreeNodes", + "mdm": [ + { + "mdm_name": None, + "mdm_id": MockMdmClusterApi.STB_MGR_MDM_ID, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "absent-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_expand_mdm_cluster_mode_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": MockMdmClusterApi.MDM_NAME_STB_MGR, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": MockMdmClusterApi.STB_TB_MDM_ID, + "mdm_name": None, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode.assert_called() + assert MockMdmClusterApi.switch_mode_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_reduce_mdm_cluster_mode(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "ThreeNodes", + "mdm": [ + { + "mdm_name": None, + "mdm_id": MockMdmClusterApi.STB_MGR_MDM_ID, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": MockMdmClusterApi.MDM_NAME, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "absent-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.FIVE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.switch_cluster_mode.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_clear_virtual_ip_interface_mdm(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": MockMdmClusterApi.STB_MGR_MDM_ID, + "clear_interfaces": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface = MagicMock() + mdm_cluster_module_mock.perform_module_operation() + mdm_cluster_module_mock.powerflex_conn.system.modify_virtual_ip_interface.assert_called() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_clear_virtual_ip_interface_mdm_idempotency(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "sample_mdm11", + "clear_interfaces": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.FIVE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert mdm_cluster_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_get_system_id_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.powerflex_conn.system.get = MagicMock( + side_effect=utils.PowerFlexClient + ) + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.system_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_remove_mdm_cluster_owner_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "absent" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.remove_mdm_no_id_name_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_expand_cluster_without_standby(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "cluster_mode": "FiveNodes", + "mdm": [ + { + "mdm_name": None, + "mdm_id": None, + "mdm_type": "Secondary" + }, + { + "mdm_id": None, + "mdm_name": None, + "mdm_type": "TieBreaker" + } + ], + "mdm_state": "present-in-cluster", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS_2) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.without_standby_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_system_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + system_resp = MockSDKResponse(MockMdmClusterApi.PARTIAL_SYSTEM_DETAILS_1) + mdm_cluster_module_mock.powerflex_conn.system.get = MagicMock( + return_value=system_resp.__dict__['data'] + ) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value={} + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.no_cluster_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_clear_virtual_ip_interface_mdm_id_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": None, + "clear_interfaces": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.id_none_interface_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_rename_mdm_id_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": None, + "mdm_new_name": "new_node", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.id_none_rename_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_change_owner_id_none(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_id": None, + "is_primary": True, + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.id_none_change_owner_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_multiple_system_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + system_resp = MockSDKResponse(MockMdmClusterApi.PARTIAL_SYSTEM_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get = MagicMock( + return_value=system_resp.__dict__['data'] + ) + mdm_cluster_resp = MockSDKResponse(MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS) + mdm_cluster_module_mock.powerflex_conn.system.get_mdm_cluster_details = MagicMock( + return_value=mdm_cluster_resp.__dict__['data'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.multiple_system_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] + + def test_add_standby_mdm_new_name_exception(self, mdm_cluster_module_mock): + self.get_module_args.update({ + "mdm_name": "standby_node", + "standby_mdm": { + "mdm_ips": [self.add_mdm_ip], + "role": "Manager", + "port": 9011, + "management_ips": [self.add_mdm_ip], + "virtual_interfaces": ["ens1"], + "allow_multiple_ips": True + }, + "mdm_new_name": "new_node", + "state": "present" + }) + mdm_cluster_module_mock.module.params = self.get_module_args + mdm_cluster_module_mock.get_mdm_cluster_details = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS + ) + mdm_cluster_module_mock.is_mdm_name_id_exists = MagicMock( + return_value=MockMdmClusterApi.THREE_MDM_CLUSTER_DETAILS['master'] + ) + mdm_cluster_module_mock.perform_module_operation() + assert MockMdmClusterApi.new_name_add_mdm_failed_response() in mdm_cluster_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py new file mode 100644 index 00000000..ced9fc7f --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py @@ -0,0 +1,236 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for Protection Domain module on Dell Technologies (Dell) PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_protection_domain_api import MockProtectionDomainApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.protection_domain import PowerFlexProtectionDomain + + +class TestPowerflexProtectionDomain(): + + get_module_args = { + 'hostname': '**.***.**.***', + 'protection_domain_id': '7bd6457000000000', + 'protection_domain_name': None, + 'protection_domain_new_name': None, + 'is_active': True, + 'network_limits': { + 'rebuild_limit': 10240, + 'rebalance_limit': 10240, + 'vtree_migration_limit': 10240, + 'overall_limit': 20480, + 'bandwidth_unit': 'KBps', + }, + 'rf_cache_limits': { + 'is_enabled': None, + 'page_size': 4, + 'max_io_limit': 16, + 'pass_through_mode': 'None' + }, + 'state': 'present' + } + + @pytest.fixture + def protection_domain_module_mock(self, mocker): + mocker.patch(MockProtectionDomainApi.MODULE_UTILS_PATH + '.PowerFlexClient', new=MockApiException) + protection_domain_module_mock = PowerFlexProtectionDomain() + return protection_domain_module_mock + + def test_get_protection_domain_response(self, protection_domain_module_mock): + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain.get.assert_called() + + def test_create_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_name": "test_domain", + "state": "present" + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.get_protection_domain = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'][0] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock(return_values=None) + protection_domain_module_mock.perform_module_operation() + assert (self.get_module_args['protection_domain_name'] == + protection_domain_module_mock.module.exit_json.call_args[1]["protection_domain_details"]['name']) + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_modify_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'network_limits': { + 'rebuild_limit': 10, + 'rebalance_limit': 10, + 'vtree_migration_limit': 11, + 'overall_limit': 21, + 'bandwidth_unit': 'GBps', + } + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + sp_resp = MockSDKResponse(MockProtectionDomainApi.STORAGE_POOL) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.get_storage_pools = MagicMock( + return_value=sp_resp.__dict__['data']['storagepool'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain.network_limits.assert_called() + + def test_rename_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'protection_domain_new_name': 'new_test_domain' + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain.rename.assert_called() + + def test_inactivate_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'is_active': False + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.perform_module_operation() + protection_domain_module_mock.powerflex_conn.protection_domain. \ + inactivate.assert_called() + + def test_activate_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'is_active': True + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.activate = MagicMock(return_value=None) + protection_domain_module_mock.perform_module_operation() + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_delete_protection_domain(self, protection_domain_module_mock): + self.get_module_args.update({ + 'protection_domain_name': 'new_test_domain', + 'state': 'absent' + }) + protection_domain_module_mock.module.params = self.get_module_args + protection_domain_module_mock.get_protection_domain = MagicMock(return_values=None) + protection_domain_module_mock.perform_module_operation() + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_delete_protection_domain_throws_exception(self, protection_domain_module_mock): + self.get_module_args.update({ + 'protection_domain_id': '7bd6457000000000', + 'state': 'absent' + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.delete = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.delete_pd_failed_msg(self.get_module_args['protection_domain_id']) in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_with_404_exception(self, protection_domain_module_mock): + MockProtectionDomainApi.status = 404 + self.get_module_args.update({ + "protection_domain_name": "test_domain1" + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_modify_protection_domain_throws_exception(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_id": "7bd6457000000000", + 'rf_cache_limits': { + 'is_enabled': True, + 'page_size': 64, + 'max_io_limit': 128, + 'pass_through_mode': 'invalid_Read' + } + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.set_rfcache_enabled = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.modify_pd_with_failed_msg(self.get_module_args['protection_domain_id']) in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] + + def test_rename_protection_domain_invalid_value(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_name": "test_domain", + "protection_domain_new_name": " test domain", + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.rename = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.rename_pd_failed_msg(self.get_module_args['protection_domain_id']) in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_protection_domain_invalid_param(self, protection_domain_module_mock): + self.get_module_args.update({ + "protection_domain_name": "test_domain1", + "protection_domain_new_name": "new_domain", + "state": "present" + }) + protection_domain_module_mock.module.params = self.get_module_args + pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN) + protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp.__dict__['data']['protectiondomain'] + ) + protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock( + side_effect=utils.PowerFlexClient) + protection_domain_module_mock.perform_module_operation() + assert MockProtectionDomainApi.version_pd_failed_msg() in \ + protection_domain_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py new file mode 100644 index 00000000..b77cfb9c --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py @@ -0,0 +1,344 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for volume module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) +from unittest.mock import Mock + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_replication_consistency_group_api import MockReplicationConsistencyGroupApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.replication_consistency_group import PowerFlexReplicationConsistencyGroup + + +class TestPowerflexReplicationConsistencyGroup(): + + get_module_args = MockReplicationConsistencyGroupApi.RCG_COMMON_ARGS + + @pytest.fixture + def replication_consistency_group_module_mock(self): + replication_consistency_group_module_mock = PowerFlexReplicationConsistencyGroup() + replication_consistency_group_module_mock.module.check_mode = False + return replication_consistency_group_module_mock + + def test_get_rcg_details(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_resp = MockReplicationConsistencyGroupApi.get_rcg_details() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=replication_consistency_group_resp + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get.assert_called() + + def test_get_rcg_details_with_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + side_effect=MockApiException) + replication_consistency_group_module_mock.validate_create = MagicMock() + replication_consistency_group_module_mock.perform_module_operation() + assert MockReplicationConsistencyGroupApi.get_exception_response('get_details') in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_rcg_snapshot_response(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", + "create_snapshot": True, + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_resp = MockReplicationConsistencyGroupApi.get_rcg_details() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=replication_consistency_group_resp + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.create_snapshot.assert_called() + + def test_create_rcg_snapshot_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_id": "aadc17d500000000", + "create_snapshot": True, + "state": "present" + }) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_resp = MockReplicationConsistencyGroupApi.get_rcg_details() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=replication_consistency_group_resp + ) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.create_snapshot = MagicMock( + side_effect=MockApiException + ) + replication_consistency_group_module_mock.perform_module_operation() + assert MockReplicationConsistencyGroupApi.create_snapshot_exception_response('create_snapshot', self.get_module_args['rcg_id']) \ + in replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", "rpo": 60, "protection_domain_name": "domain1", + "protection_domain_id": None, "activity_mode": "active", "state": "present", + "remote_peer": {"hostname": "1.1.1.1", "username": "username", "password": "password", + "verifycert": "verifycert", "port": "port", "protection_domain_name": "None", + "protection_domain_id": "123"}}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=None + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.create.assert_called() + + def test_modify_rpo(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "rpo": 60, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details() + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_rpo.assert_called() + + def test_modify_rpo_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "rpo": 60, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_rpo = MagicMock( + side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modify rpo for replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_target_volume_access_mode(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "target_volume_access_mode": "Readonly", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details() + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_target_volume_access_mode.assert_called() + + def test_modify_target_volume_access_mode_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "target_volume_access_mode": "Readonly", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.modify_target_volume_access_mode = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modify target volume access mode for replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_activity_mode(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "activity_mode": "Inactive", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.inactivate.assert_called() + + def test_modify_activity_mode_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "activity_mode": "Active", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(activity_mode="Inactive")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.activate = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modify activity_mode for replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_pause_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": True, + "pause_mode": "StopDataTransfer", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details() + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.pause.assert_called() + + def test_pause_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": True, + "pause_mode": "StopDataTransfer", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.pause = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Pause replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_resume_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(pause_mode="StopDataTransfer")) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.resume.assert_called() + + def test_resume_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(pause_mode="StopDataTransfer")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.resume = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Resume replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_freeze_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.freeze.assert_called() + + def test_freeze_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.freeze = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Freeze replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_unfreeze_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(freeze_state="Frozen") + ) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.unfreeze.assert_called() + + def test_unfreeze_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "freeze": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(freeze_state="Frozen")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.unfreeze = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Unfreeze replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \ + + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_rename_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "new_rcg_name": "test_rcg_rename", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.rename_rcg.assert_called() + + def test_rename_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "new_rcg_name": "test_rcg_rename", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.rename_rcg = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Renaming replication consistency group to test_rcg_rename failed with error" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_delete_rcg(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "state": "absent"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.delete.assert_called() + + def test_delete_rcg_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "state": "absent"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.delete = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Delete replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID + MockReplicationConsistencyGroupApi.FAIL_MSG in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_rcg_as_inconsistent(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "is_consistent": False, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.perform_module_operation() + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.set_as_inconsistent.assert_called() + + def test_modify_rcg_as_consistent_throws_exception(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "is_consistent": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details(consistency="InConsistent")) + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.set_as_consistent = \ + MagicMock(side_effect=MockApiException) + replication_consistency_group_module_mock.perform_module_operation() + assert "Modifying consistency of replication consistency group failed with error" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_pause_rcg_without_pause_mode(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause": True, "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.protection_domain.get = MagicMock(return_value=[{"name": "pd_id"}]) + replication_consistency_group_module_mock.perform_module_operation() + assert "Specify pause_mode to perform pause on replication consistency group." in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_rcg_with_invalid_params(self, replication_consistency_group_module_mock): + self.get_module_args.update({ + "rcg_name": "test_rcg", "activity_mode": "active", "state": "present", + "remote_peer": {"hostname": "1.1.1.1", "username": "username", "password": "password", + "verifycert": "verifycert", "port": "port", "protection_domain_name": None, + "protection_domain_id": None}}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=None) + replication_consistency_group_module_mock.perform_module_operation() + assert "Enter remote protection_domain_name or protection_domain_id to create replication consistency group" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] + + def test_pause_rcg_without_pause(self, replication_consistency_group_module_mock): + self.get_module_args.update({"rcg_name": "test_rcg", "pause_mode": "StopDataTransfer", "state": "present"}) + replication_consistency_group_module_mock.module.params = self.get_module_args + replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock( + return_value=MockReplicationConsistencyGroupApi.get_rcg_details()) + replication_consistency_group_module_mock.powerflex_conn.protection_domain.get = MagicMock(return_value=[{"name": "pd_id"}]) + replication_consistency_group_module_mock.perform_module_operation() + assert "Specify pause as True to pause replication consistency group" in \ + replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py new file mode 100644 index 00000000..a2c463f6 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py @@ -0,0 +1,72 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for storage pool module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import PowerFlexStoragePool + + +class TestPowerflexStoragePool(): + + get_module_args = MockStoragePoolApi.STORAGE_POOL_COMMON_ARGS + + @pytest.fixture + def storagepool_module_mock(self, mocker): + storagepool_module_mock = PowerFlexStoragePool() + storagepool_module_mock.module.check_mode = False + return storagepool_module_mock + + def test_get_storagepool_details(self, storagepool_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "state": "present" + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + storagepool_module_mock.perform_module_operation() + storagepool_module_mock.powerflex_conn.storage_pool.get.assert_called() + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics.assert_called() + + def test_get_storagepool_details_with_exception(self, storagepool_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool" + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + side_effect=MockApiException + ) + storagepool_module_mock.create_storage_pool = MagicMock(return_value=None) + storagepool_module_mock.perform_module_operation() + assert MockStoragePoolApi.get_exception_response('get_details') in storagepool_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py new file mode 100644 index 00000000..53cdcfc0 --- /dev/null +++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py @@ -0,0 +1,81 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for volume module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_volume_api import MockVolumeApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.volume import PowerFlexVolume + + +class TestPowerflexVolume(): + + get_module_args = MockVolumeApi.VOLUME_COMMON_ARGS + + @pytest.fixture + def volume_module_mock(self, mocker): + volume_module_mock = PowerFlexVolume() + volume_module_mock.module.check_mode = False + return volume_module_mock + + def test_get_volume_details(self, volume_module_mock): + self.get_module_args.update({ + "vol_name": "testing", + "state": "present" + }) + volume_module_mock.module.params = self.get_module_args + volume_resp = MockVolumeApi.VOLUME_GET_LIST + volume_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_sp_resp = MockVolumeApi.VOLUME_STORAGEPOOL_DETAILS + volume_module_mock.get_storage_pool = MagicMock( + return_value=volume_sp_resp + ) + volume_pd_resp = MockVolumeApi.VOLUME_PD_DETAILS + volume_module_mock.get_protection_domain = MagicMock( + return_value=volume_pd_resp + ) + volume_statistics_resp = MockVolumeApi.VOLUME_STATISTICS + volume_module_mock.powerflex_conn.volume.get_statistics = MagicMock( + return_value=volume_statistics_resp + ) + volume_module_mock.perform_module_operation() + volume_module_mock.powerflex_conn.volume.get.assert_called() + volume_module_mock.powerflex_conn.volume.get_statistics.assert_called() + + def test_get_volume_details_with_exception(self, volume_module_mock): + self.get_module_args.update({ + "vol_name": "testing", + "state": "present" + }) + volume_module_mock.module.params = self.get_module_args + volume_resp = MockVolumeApi.VOLUME_GET_LIST + volume_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_module_mock.powerflex_conn.volume.get_statistics = MagicMock( + side_effect=MockApiException + ) + volume_module_mock.create_volume = MagicMock(return_value=None) + volume_module_mock.perform_module_operation() + assert MockVolumeApi.get_exception_response('get_details') in volume_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/unity/CHANGELOG.rst b/ansible_collections/dellemc/unity/CHANGELOG.rst new file mode 100644 index 00000000..4ec2b3c2 --- /dev/null +++ b/ansible_collections/dellemc/unity/CHANGELOG.rst @@ -0,0 +1,146 @@ +=========================== +Dellemc.Unity Change Log +=========================== + +.. contents:: Topics + + +v1.5.0 +====== + +Minor Changes +------------- + +- Updated modules to adhere with ansible community guidelines. + +v1.4.1 +====== + +Minor Changes +------------- + +- Updated the execution environment related files. + +v1.4.0 +====== + +Minor Changes +------------- + +- Added cifsserver module to support create, list and delete CIFS server. +- Added execution environment manifest file to support building an execution environment with ansible-builder. +- Added interface module to support create, list and delete interface. +- Added nfsserver module to support create, list and delete NFS server. +- Check mode is supported for Info. +- Enhance nfs module to support advanced host management option. +- Enhanced filesystem module to support create, modify and delete of filesystem replication. +- Enhanced info module to list cifs server, nfs servers, ethernet port and file interface. +- Enhanced nas server module to support create, modify and delete of nas server replication. + +New Modules +----------- + +- dellemc.unity.cifsserver - Manage CIFS server on Unity storage system +- dellemc.unity.interface - Manage Interfaces on Unity storage system +- dellemc.unity.nfsserver - Manage NFS server on Unity storage system + +v1.3.0 +====== + +Minor Changes +------------- + +- Added rotating file handler for logging. +- Bugfix in volume module to retrieve details of non-thin volumes. +- Enhance host module to support add/remove network address to/from a host. +- Enhanced Info module to list disk groups. +- Enhanced Storage Pool module to support listing of drive details of a pool +- Enhanced Storage pool module to support creation of storage pool +- Enhanced consistency group module to support enable/disable replication in consistency group +- Enhanced host module to support both mapping and un-mapping of non-logged-in initiators to host. +- Enhanced host module to support listing of network addresses, FC initiators, ISCSI initiators and allocated volumes of a host +- Removed dellemc.unity prefix from module names. +- Renamed gatherfacts module to info module + +v1.2.1 +====== + +Minor Changes +------------- + +- Added dual licensing +- Documentation updates +- Fixed typo in galaxy.yml +- Updated few samples in modules + +v1.2.0 +====== + +Minor Changes +------------- + +- Added CRUD operations support for Quota tree. +- Added CRUD operations support for User Quota on Filesystem/Quota tree. +- Added support for Application tagging. +- Consistency group module is enhanced to map/unmap hosts to/from a new or existing consistency group. +- Filesystem module is enhanced to associate/dissociate snapshot schedule to/from a Filesystem. +- Filesystem module is enhanced to update default quota configuration during create operation. +- Gather facts module is enhanced to list User Quota and Quota tree components. +- Volume module is enhanced to support map/unmap multiple hosts from a volume. + +New Modules +----------- + +- dellemc.unity.tree_quota - Manage quota tree on the Unity storage system +- dellemc.unity.user_quota - Manage user quota on the Unity storage system + +v1.1.0 +====== + +Minor Changes +------------- + +- Added CRUD operations support for Filesystem snapshot. +- Added CRUD operations support for Filesystem. +- Added CRUD operations support for NFS export. +- Added CRUD operations support for SMB share. +- Added support to get/modify operations on NAS server. +- Gather facts module is enhanced to list Filesystem snapshots, NAS servers, File systems, NFS exports, SMB shares. + +New Modules +----------- + +- dellemc.unity.filesystem - Manage filesystem on Unity storage system +- dellemc.unity.filesystem_snapshot - Manage filesystem snapshot on the Unity storage system +- dellemc.unity.nasserver - Manage NAS servers on Unity storage system +- dellemc.unity.nfs - Manage NFS export on Unity storage system +- dellemc.unity.smbshare - Manage SMB shares on Unity storage system + +v1.0.0 +====== + +Major Changes +------------- + +- Added CRUD operations support for Consistency group. +- Added CRUD operations support for Volume. +- Added CRUD operations support for a snapshot schedule. +- Added support for CRUD operations on a host with FC/iSCSI initiators. +- Added support for CRUD operations on a snapshot of a volume. +- Added support for adding/removing volumes to/from a consistency group. +- Added support to add/remove FC/iSCSI initiators to/from a host. +- Added support to create a snapshot for a consistency group. +- Added support to get/modify operations on storage pool. +- Added support to map/unmap a host to/from a snapshot. +- Gather facts module is enhanced to list volumes, consistency groups, FC initiators, iSCSI initiators, hosts, snapshot schedules. + +New Modules +----------- + +- dellemc.unity.consistencygroup - Manage consistency groups on Unity storage system +- dellemc.unity.host - Manage Host operations on Unity +- dellemc.unity.info - Gathering information about Unity +- dellemc.unity.snapshot - Manage snapshots on the Unity storage system +- dellemc.unity.snapshotschedule - Manage snapshot schedules on Unity storage system +- dellemc.unity.storagepool - Manage storage pool on Unity +- dellemc.unity.volume - Manage volume on Unity storage system diff --git a/ansible_collections/dellemc/unity/FILES.json b/ansible_collections/dellemc/unity/FILES.json new file mode 100644 index 00000000..b9900e51 --- /dev/null +++ b/ansible_collections/dellemc/unity/FILES.json @@ -0,0 +1,600 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b41c9ab9380851e938601886d427d61ffc094ebe4af44119bad6aa8ecc04c09", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b", + "format": 1 + }, + { + "name": "MODULE-LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4aa53d1139ef2b275ab3fbe08e79a4cbbcec76931cb07e1602783327e39d4ed0", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a53a52d256a06ea2e1f309ba3ef542977f54ef173269c6f639ca35daa570849f", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eee0999f4405cf7aa065ce031b998060099308434b6199b6e5947fe645fa2402", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ad32bcb3927f6676bb3f1501ad872b55c87129d2266412051e57847a65508b1", + "format": 1 + }, + { + "name": "docs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/ADOPTERS.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c81933a41503275063d789f0685472e1603e4614376f3918b42c4bfb210a2c01", + "format": 1 + }, + { + "name": "docs/BRANCHING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae716ec9cdebdf9f24166ba485eba474de335db37bcf51e50d65dad5a6fdde85", + "format": 1 + }, + { + "name": "docs/CODE_OF_CONDUCT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d47343b6cae5e147a16cc1c312461d175f29d89c8c6094e024d6cb885f5cc36", + "format": 1 + }, + { + "name": "docs/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d07b61db5f2a7bb85d630242e70c23bf3d20a8272d8b42ac2113dfeed418ae60", + "format": 1 + }, + { + "name": "docs/INSTALLATION.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9a7a8e350f0dd552b51fe63cfdf2fe00ad2d69b34aef902156af23b2869bde1", + "format": 1 + }, + { + "name": "docs/ISSUE_TRIAGE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffc4402bfc8e68fff1db9aeff684948f4486a3f8cb5d58eebf0da3534747bcf3", + "format": 1 + }, + { + "name": "docs/MAINTAINER_GUIDE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86318bae1344674e30c98ddd4ed0e8b55db6ae5f39edf376274d81d13eb96db5", + "format": 1 + }, + { + "name": "docs/Product Guide.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "62295b1840e59bf555810686fef04553762475507d9ea5fb1cb56e72cdeb84c2", + "format": 1 + }, + { + "name": "docs/Release Notes.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16d85dd9544284e9a1602862c114798243a4e7ed5317869b68428e8087a977c4", + "format": 1 + }, + { + "name": "docs/SECURITY.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f97dbe35e85c9d0af79c9cf3229baa56f8f84c383ce43e9708329af68112391", + "format": 1 + }, + { + "name": "docs/SUPPORT.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc16b1e742969c2c208f6ea60211b637878f6278212f2a123b333b6448288733", + "format": 1 + }, + { + "name": "docs/MAINTAINERS.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e748fd39a38ac2a61aa6f48eac2179dffcc5a3e8f261f54042946d969bbfadf6", + "format": 1 + }, + { + "name": "docs/COMMITTER_GUIDE.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b41b38fe09cfcbfb4499c39ed4822a9f8c3f5d562e68dad45b5f2389f18053b5", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6fecf89b56560e79560dba9f860201828a8df82323d02c3a6a4e5fbfaa0aed3a", + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ad0fa1a5cceaac69ea46df66d57fe6f290544c8efa6fabd2a2982296e428536", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/unity.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59baa9afd0063cf6ccc64d0be2f93bcdac944d2d53d9a9808654d5715173eab2", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/logging_handler.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a940654af74df1d2ed5d35691ee46a542190227e414d87d68c471c3210202750", + "format": 1 + }, + { + "name": "plugins/module_utils/storage/dell/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fec20103cba14b247e99885a64d5f447876743d1a2c1aabfa41344fa3b5811a", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/cifsserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5557cf556c549d471c3a8f900598932edb116f83fe001aca27d20aeaabe8674", + "format": 1 + }, + { + "name": "plugins/modules/consistencygroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ef364861ab2603180a7025724892f168c8c5e3740795e3dc70f12d5d496154f", + "format": 1 + }, + { + "name": "plugins/modules/filesystem.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0c366bb29c733eb6a320a535c4d8271d0f0d027ae641b87186a317877358fd3", + "format": 1 + }, + { + "name": "plugins/modules/filesystem_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9dfcad8f1b83a848e82e15a29764bdb2831670467d097bb7d3f4860008c048b0", + "format": 1 + }, + { + "name": "plugins/modules/host.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f46cdb51124e9ffda1653ff5ffc2f20ddcae7fa1221423d46dcb7588d0b6baa", + "format": 1 + }, + { + "name": "plugins/modules/info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5da6a19f981ed6cb95050cb05b46cbe46ce7678de45ba8ed0437e3a717a1e13c", + "format": 1 + }, + { + "name": "plugins/modules/interface.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0eb4ab5d3132eaed48d560495e10d8a90e28a1e6cc457854c1d02294d205c2c0", + "format": 1 + }, + { + "name": "plugins/modules/nasserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cab2356f33d72c9080323dd4bde8d74ce7e85645075f4b34376cb8fe22a655bd", + "format": 1 + }, + { + "name": "plugins/modules/nfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bb77b56cdbd086ce949f04f62977af609e0c6cba159c651a1adc734fea015bc", + "format": 1 + }, + { + "name": "plugins/modules/nfsserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86e4b7c3383457ac04a53844b15a1bec8ef3e7991a2cc39cf2ff0d38df0cd8a2", + "format": 1 + }, + { + "name": "plugins/modules/smbshare.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84ebf06a630cf8057cd4d380c98f2dac00cfbffb53ff179e7144c2ba18339fc1", + "format": 1 + }, + { + "name": "plugins/modules/snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e186f0ce63a0b4c223341677372f7c1c500073e62dfa6fc63be80352d10b5f1e", + "format": 1 + }, + { + "name": "plugins/modules/snapshotschedule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af48985493c845285965ffcad9a85820d3300a17c527cb356ff2fedd3b9403c0", + "format": 1 + }, + { + "name": "plugins/modules/storagepool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04e838a1151368436f2b10291084d14d42890d7d6102c30a536389a391e7ddea", + "format": 1 + }, + { + "name": "plugins/modules/tree_quota.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecdacddb2495e2bd561140796fdc802d9d33a75b9acf3301e9169c9f8e5d98c3", + "format": 1 + }, + { + "name": "plugins/modules/user_quota.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afcf7a3e5ff72c444866bd80a4d93071428d39614b9d78eeccc19cc7713931fe", + "format": 1 + }, + { + "name": "plugins/modules/volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a57a4ba37e699650ea05927c348b9e029e3db0f0be1f52df562e8f2209fbe0e6", + "format": 1 + }, + { + "name": "requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60355bcbb93016c5bff4bc3a5d1eb99d26cd74d0da681517e2a20545b71501d4", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0744d235664baa2ba82f601e70b4cdf68a2224dc8b71bb4c1d2c7989893b03e8", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0320ab54e2eff89b32cff7ed9256208968ae59a75bcc2fe4493dc9a58bd62ade", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0320ab54e2eff89b32cff7ed9256208968ae59a75bcc2fe4493dc9a58bd62ade", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_api_exception.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7a0fa0bda40ed6b6bbd4193bb68a8c8402864506aa9031f655fc102ca49bfb6", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_cifsserver_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b864ceef5c5db21f3282a76094c8d9226186dadebf9851432fff4fb57e59cfad", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_consistencygroup_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6df144fe81cfe2a4d59f7ff5c4d0f22925b0c9b9d6fadf56fee25f3b046efe47", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_filesystem_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "171c7ad10cbe32e041c124a91fc1c34fa6c80585da66afe7938c6133031e8202", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_host_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d7e6f5d137f6977389d84485ddffd09099d13b756256a8190b506b86089db1a", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_interface_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c8de82b6b7311a2ec191fc510d0bc06cde5e0627f74e83711de69c629e826fc", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_nasserver_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5f8e78abd2a70ecb4d42b4384b872f2075c9bc91bdca683a42787a0f5ce9851", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_nfs_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3cea065ef9996c1ea912f4772c6a420d097db695863dcc06158e838f450f8f4c", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_nfsserver_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03b34a46b5c55696c4741dbd3749b1a654cf401e29e79443908145ef87ff5994", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_sdk_response.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0813900fa591ec50510b41bfca7960791baf5ed9f08c2b41a09483b7011c21b4", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/mock_storagepool_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7fd67ca3924ba62b179b93f879ff07f16a1ada51742c12cd947e48e0aad26f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_cifsserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7031be0fee19af368f61e6f07404c941ec64139f066514eeca48a5f3c9224749", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_consistencygroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "684935db745c549cae4c337b85fede00ab0f95dea56c129c158abfb006cba901", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_filesystem.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c56c28cf9a4e3b50a4d7031863eb127fda8795da574024e2fc4351e5c002907", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_host.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbc6464fdcbc66df43da1ef26cbf64d86b77406f28ded032dc7c7c4da6034cd0", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_interface.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10476e71c1542a43c01b1c9622dbc54b32ae3512fd1cd3ecd2dbec61b06b373b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_nasserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09ef2b39687911942250b40ba350920a496e00dc864f05b8b46d2a79958769b1", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_nfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d87b5b2a61917c4057561c51921e493d4f4ad7936bbd80b4d8920af1b843fd02", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_nfsserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd07766bb20d3eacf1bf1823ecc019e081893ffc0195d5611103a0c62a713628", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_storagepool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63dafda1a4e630b23a637affabacfefd5c14a3231a0fae02d3886b0bf7656525", + "format": 1 + }, + { + "name": "tests/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65e6091d1c8d88a703555bd13590bb95248fb0b7376d3ed1d660e2b9d65581c8", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea96c62d419724d448e0ed0b2099f5242e6a9cc26abca64844f1ed99c082e844", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/unity/LICENSE b/ansible_collections/dellemc/unity/LICENSE new file mode 100644 index 00000000..e72bfdda --- /dev/null +++ b/ansible_collections/dellemc/unity/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/ansible_collections/dellemc/unity/MANIFEST.json b/ansible_collections/dellemc/unity/MANIFEST.json new file mode 100644 index 00000000..60dd525d --- /dev/null +++ b/ansible_collections/dellemc/unity/MANIFEST.json @@ -0,0 +1,41 @@ +{ + "collection_info": { + "namespace": "dellemc", + "name": "unity", + "version": "1.5.0", + "authors": [ + "Akash Shendge ", + "Ambuj Dubey ", + "Arindam Datta ", + "P Srinivas Rao ", + "Rajshree Khare ", + "Vivek Soni ", + "Spandita Panigrahi ", + "Ananthu S Kuttattu ", + "Pavan Mudunuri " + ], + "readme": "README.md", + "tags": [ + "storage" + ], + "description": "Ansible modules for Unity", + "license": [ + "GPL-3.0-or-later", + "Apache-2.0" + ], + "license_file": null, + "dependencies": {}, + "repository": "https://github.com/dell/ansible-unity/tree/1.5.0", + "documentation": "https://github.com/dell/ansible-unity/tree/1.5.0/docs", + "homepage": "https://github.com/dell/ansible-unity/tree/1.5.0", + "issues": "https://www.dell.com/community/Automation/bd-p/Automation" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b3628f9c11980e54957b66a4d52d99347ee35903e617762232cfb01288da811", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/dellemc/unity/MODULE-LICENSE b/ansible_collections/dellemc/unity/MODULE-LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/ansible_collections/dellemc/unity/MODULE-LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible_collections/dellemc/unity/README.md b/ansible_collections/dellemc/unity/README.md new file mode 100644 index 00000000..af6d887c --- /dev/null +++ b/ansible_collections/dellemc/unity/README.md @@ -0,0 +1,71 @@ +# Ansible Modules for Dell Technologies Unity + +The Ansible Modules for Dell Technologies (Dell) Unity allow Data Center and IT administrators to use RedHat Ansible to automate and orchestrate the configuration and management of Dell Unity arrays. + +The capabilities of the Ansible modules are managing consistency groups, filesystem, filesystem snapshots, CIFS server, NAS server, NFS server, NFS export, SMB share, interface, hosts, snapshots, snapshot schedules, storage pools, user quotas, quota trees and volumes. Capabilities also include gathering facts from the array. The options available for each are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request. + +## Table of contents + +* [Code of conduct](https://github.com/dell/ansible-unity/blob/1.5.0/docs/CODE_OF_CONDUCT.md) +* [Maintainer guide](https://github.com/dell/ansible-unity/blob/1.5.0/docs/MAINTAINER_GUIDE.md) +* [Committer guide](https://github.com/dell/ansible-unity/blob/1.5.0/docs/COMMITTER_GUIDE.md) +* [Contributing guide](https://github.com/dell/ansible-unity/blob/1.5.0/docs/CONTRIBUTING.md) +* [Branching strategy](https://github.com/dell/ansible-unity/blob/1.5.0/docs/BRANCHING.md) +* [List of adopters](https://github.com/dell/ansible-unity/blob/1.5.0/docs/ADOPTERS.md) +* [Maintainers](https://github.com/dell/ansible-unity/blob/1.5.0/docs/MAINTAINERS.md) +* [Support](https://github.com/dell/ansible-unity/blob/1.5.0/docs/SUPPORT.md) +* [License](#license) +* [Security](https://github.com/dell/ansible-unity/blob/1.5.0/docs/SECURITY.md) +* [Prerequisites](#prerequisites) +* [List of Ansible modules for Dell Unity](#list-of-ansible-modules-for-dell-unity) +* [Installation and execution of Ansible modules for Dell Unity](#installation-and-execution-of-ansible-modules-for-dell-unity) +* [Releasing, Maintenance and Deprecation](#releasing-maintenance-and-deprecation) + +## License +The Ansible collection for Unity is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-unity/blob/1.5.0/LICENSE) for the full terms. Ansible modules and module utilities that are part of the Ansible collection for Unity are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-unity/blob/1.5.0/MODULE-LICENSE) for the full terms. + +## Supported Platforms + * Dell Unity Arrays version 5.1, 5.2 + +## Prerequisites +This table provides information about the software prerequisites for the Ansible Modules for Dell Unity. + +| **Ansible Modules** | **Python version** | **Storops - Python SDK version** | **Ansible** | +|---------------------|--------------------|----------------------------------|-------------| +| v1.5.0 | 3.9
3.10
3.11 | 1.2.11 | 2.12
2.13
2.14| + +## Idempotency +The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed. + +## List of Ansible Modules for Dell Unity + * [Consistency group module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#consistency-group-module) + * [Filesystem module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#filesystem-module) + * [Filesystem snapshot module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#filesystem-snapshot-module) + * [Info module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#info-module) + * [Host module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#host-module) + * [CIFS server module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#cifs-server-module) + * [NAS server module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#nas-server-module) + * [NFS server module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#nfs-server-module) + * [NFS export module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md##nfs-module) + * [SMB share module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#smb-share-module) + * [Interface module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#interface-module) + * [Snapshot module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#snapshot-module) + * [Snapshot schedule module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#snapshot-schedule-module) + * [Storage pool module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#storage-pool-module) + * [User quota module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#user-quota-module) + * [Quota tree module ](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#quota-tree-module) + * [Volume module](https://github.com/dell/ansible-unity/blob/1.5.0/docs/Product%20Guide.md#volume-module) + +## Installation and execution of Ansible modules for Dell Unity + +The installation and execution steps of Ansible modules for Dell Unity can be found [here](https://github.com/dell/ansible-unity/blob/1.5.0/docs/INSTALLATION.md). + +## Releasing, Maintenance and Deprecation + +Ansible Modules for Dell Technnologies Unity follows [Semantic Versioning](https://semver.org/). + +New version will be release regularly if significant changes (bug fix or new feature) are made in the collection. + +Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-unity/blob/1.5.0/docs/BRANCHING.md). + +Ansible Modules for Dell Technologies Unity deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html). diff --git a/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml new file mode 100644 index 00000000..111a3618 --- /dev/null +++ b/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml @@ -0,0 +1,102 @@ +objects: + role: {} +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + httpapi: {} + inventory: {} + lookup: {} + module: + cifsserver: + description: Manage CIFS server on Unity storage system + name: cifsserver + namespace: '' + version_added: 1.4.0 + consistencygroup: + description: Manage consistency groups on Unity storage system + name: consistencygroup + namespace: '' + version_added: 1.1.0 + filesystem: + description: Manage filesystem on Unity storage system + name: filesystem + namespace: '' + version_added: 1.1.0 + filesystem_snapshot: + description: Manage filesystem snapshot on the Unity storage system + name: filesystem_snapshot + namespace: '' + version_added: 1.1.0 + host: + description: Manage Host operations on Unity + name: host + namespace: '' + version_added: 1.1.0 + info: + description: Gathering information about Unity + name: info + namespace: '' + version_added: 1.1.0 + interface: + description: Manage Interfaces on Unity storage system + name: interface + namespace: '' + version_added: 1.4.0 + nasserver: + description: Manage NAS servers on Unity storage system + name: nasserver + namespace: '' + version_added: 1.1.0 + nfs: + description: Manage NFS export on Unity storage system + name: nfs + namespace: '' + version_added: 1.1.0 + nfsserver: + description: Manage NFS server on Unity storage system + name: nfsserver + namespace: '' + version_added: 1.4.0 + smbshare: + description: Manage SMB shares on Unity storage system + name: smbshare + namespace: '' + version_added: 1.1.0 + snapshot: + description: Manage snapshots on the Unity storage system + name: snapshot + namespace: '' + version_added: 1.1.0 + snapshotschedule: + description: Manage snapshot schedules on Unity storage system + name: snapshotschedule + namespace: '' + version_added: 1.1.0 + storagepool: + description: Manage storage pool on Unity + name: storagepool + namespace: '' + version_added: 1.1.0 + tree_quota: + description: Manage quota tree on the Unity storage system + name: tree_quota + namespace: '' + version_added: 1.2.0 + user_quota: + description: Manage user quota on the Unity storage system + name: user_quota + namespace: '' + version_added: 1.2.0 + volume: + description: Manage volume on Unity storage system + name: volume + namespace: '' + version_added: 1.1.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 1.5.0 diff --git a/ansible_collections/dellemc/unity/changelogs/changelog.yaml b/ansible_collections/dellemc/unity/changelogs/changelog.yaml new file mode 100644 index 00000000..3e869281 --- /dev/null +++ b/ansible_collections/dellemc/unity/changelogs/changelog.yaml @@ -0,0 +1,152 @@ +ancestor: null +releases: + 1.0.0: + changes: + major_changes: + - Added CRUD operations support for Consistency group. + - Added CRUD operations support for Volume. + - Added CRUD operations support for a snapshot schedule. + - Added support for CRUD operations on a host with FC/iSCSI initiators. + - Added support for CRUD operations on a snapshot of a volume. + - Added support for adding/removing volumes to/from a consistency group. + - Added support to add/remove FC/iSCSI initiators to/from a host. + - Added support to create a snapshot for a consistency group. + - Added support to get/modify operations on storage pool. + - Added support to map/unmap a host to/from a snapshot. + - Gather facts module is enhanced to list volumes, consistency groups, FC initiators, + iSCSI initiators, hosts, snapshot schedules. + modules: + - description: Manage consistency groups on Unity storage system + name: consistencygroup + namespace: '' + - description: Manage Host operations on Unity + name: host + namespace: '' + - description: Gathering information about Unity + name: info + namespace: '' + - description: Manage snapshots on the Unity storage system + name: snapshot + namespace: '' + - description: Manage snapshot schedules on Unity storage system + name: snapshotschedule + namespace: '' + - description: Manage storage pool on Unity + name: storagepool + namespace: '' + - description: Manage volume on Unity storage system + name: volume + namespace: '' + release_date: '2020-06-20' + 1.1.0: + changes: + minor_changes: + - Added CRUD operations support for Filesystem snapshot. + - Added CRUD operations support for Filesystem. + - Added CRUD operations support for NFS export. + - Added CRUD operations support for SMB share. + - Added support to get/modify operations on NAS server. + - Gather facts module is enhanced to list Filesystem snapshots, NAS servers, + File systems, NFS exports, SMB shares. + modules: + - description: Manage filesystem on Unity storage system + name: filesystem + namespace: '' + - description: Manage filesystem snapshot on the Unity storage system + name: filesystem_snapshot + namespace: '' + - description: Manage NAS servers on Unity storage system + name: nasserver + namespace: '' + - description: Manage NFS export on Unity storage system + name: nfs + namespace: '' + - description: Manage SMB shares on Unity storage system + name: smbshare + namespace: '' + release_date: '2020-12-02' + 1.2.0: + changes: + minor_changes: + - Added CRUD operations support for Quota tree. + - Added CRUD operations support for User Quota on Filesystem/Quota tree. + - Added support for Application tagging. + - Consistency group module is enhanced to map/unmap hosts to/from a new or existing + consistency group. + - Filesystem module is enhanced to associate/dissociate snapshot schedule to/from + a Filesystem. + - Filesystem module is enhanced to update default quota configuration during + create operation. + - Gather facts module is enhanced to list User Quota and Quota tree components. + - Volume module is enhanced to support map/unmap multiple hosts from a volume. + modules: + - description: Manage quota tree on the Unity storage system + name: tree_quota + namespace: '' + - description: Manage user quota on the Unity storage system + name: user_quota + namespace: '' + release_date: '2021-06-25' + 1.2.1: + changes: + minor_changes: + - Added dual licensing + - Documentation updates + - Fixed typo in galaxy.yml + - Updated few samples in modules + release_date: '2021-09-28' + 1.3.0: + changes: + minor_changes: + - Added rotating file handler for logging. + - Bugfix in volume module to retrieve details of non-thin volumes. + - Enhance host module to support add/remove network address to/from a host. + - Enhanced Info module to list disk groups. + - Enhanced Storage Pool module to support listing of drive details of a pool + - Enhanced Storage pool module to support creation of storage pool + - Enhanced consistency group module to support enable/disable replication in + consistency group + - Enhanced host module to support both mapping and un-mapping of non-logged-in + initiators to host. + - Enhanced host module to support listing of network addresses, FC initiators, + ISCSI initiators and allocated volumes of a host + - Removed dellemc.unity prefix from module names. + - Renamed gatherfacts module to info module + release_date: '2022-03-25' + 1.4.0: + changes: + minor_changes: + - Added cifsserver module to support create, list and delete CIFS server. + - Added execution environment manifest file to support building an execution + environment with ansible-builder. + - Added interface module to support create, list and delete interface. + - Added nfsserver module to support create, list and delete NFS server. + - Check mode is supported for Info. + - Enhance nfs module to support advanced host management option. + - Enhanced filesystem module to support create, modify and delete of filesystem + replication. + - Enhanced info module to list cifs server, nfs servers, ethernet port and file + interface. + - Enhanced nas server module to support create, modify and delete of nas server + replication. + modules: + - description: Manage CIFS server on Unity storage system + name: cifsserver + namespace: '' + - description: Manage Interfaces on Unity storage system + name: interface + namespace: '' + - description: Manage NFS server on Unity storage system + name: nfsserver + namespace: '' + release_date: '2022-06-28' + 1.4.1: + changes: + minor_changes: + - Updated the execution environment related files. + release_date: '2022-09-27' + 1.5.0: + changes: + minor_changes: + - Updated modules to adhere with ansible community guidelines. + release_date: '2022-12-20' diff --git a/ansible_collections/dellemc/unity/changelogs/config.yaml b/ansible_collections/dellemc/unity/changelogs/config.yaml new file mode 100644 index 00000000..b4bf6e16 --- /dev/null +++ b/ansible_collections/dellemc/unity/changelogs/config.yaml @@ -0,0 +1,33 @@ +--- +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues +title: Dellemc.Unity +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/dellemc/unity/docs/ADOPTERS.md b/ansible_collections/dellemc/unity/docs/ADOPTERS.md new file mode 100644 index 00000000..826b5cd7 --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/ADOPTERS.md @@ -0,0 +1,11 @@ + + +# List of adopters diff --git a/ansible_collections/dellemc/unity/docs/BRANCHING.md b/ansible_collections/dellemc/unity/docs/BRANCHING.md new file mode 100644 index 00000000..810a309b --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/BRANCHING.md @@ -0,0 +1,32 @@ + + +# Branching strategy + +Ansible modules for Dell Unity follows a scaled trunk branching strategy where short-lived branches are created off of the main branch. When coding is complete, the branch is merged back into main after being approved in a pull request code review. + +## Branch naming convention + +| Branch Type | Example | Comment | +|--------------|-----------------------------------|-------------------------------------------| +| main | main | | +| Release | release-1.0 | hotfix: release-1.1 patch: release-1.0.1 | +| Feature | feature-9-vol-support | "9" referring to GitHub issue ID | +| Bug Fix | bugfix-110-fix-duplicates-issue | "110" referring to GitHub issue ID | + + +## Steps for working on a release branch + +1. Fork the repository. +2. Create a branch off of the main branch. The branch name should follow [branch naming convention](#branch-naming-convention). +3. Make your changes and commit them to your branch. +4. If other code changes have merged into the upstream main branch, perform a rebase of those changes into your branch. +5. Open a [pull request](https://github.com/dell/ansible-unity/pulls) between your branch and the upstream main branch. +6. Once your pull request has merged, your branch can be deleted. diff --git a/ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md b/ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..c791055c --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,137 @@ + + +# Code of conduct - contributor covenant + +## Our pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at ansible.team@dell.com +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary ban + +**Community impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent ban + +**Community impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md b/ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md new file mode 100644 index 00000000..8af0752e --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md @@ -0,0 +1,49 @@ + + +# Committer guidelines + +These are the guidelines for people with commit privileges on the GitHub repository. Committers act as members of the Core Team and not necessarily employees of Dell. + +These guidelines apply to everyone and as Committers you have been given access to commit changes because you exhibit good judgment and have demonstrated your commitment to the vision of the project. We trust that you will use these privileges wisely and not abuse it. + +If these privileges are abused in any way and the quality of the project is compromised, our trust will be diminished and you may be asked to not commit or lose these privileges all together. + +## General rules + +### Don't + +* Break the build. +* Commit directly. +* Compromise backward compatibility. +* Disrespect your Community Team members. Help them grow. +* Think it is someone elses job to test your code. Write tests for all the code you produce. +* Forget to keep thing simple. +* Create technical debt. Fix-in-place and make it the highest priority above everything else. + +### Do + +* Keep it simple. +* Good work, your best every time. +* Keep the design of your software clean and maintainable. +* Squash your commits, avoid merges. +* Be active. Committers that are not active may have their permissions suspended. +* Write tests for all your deliverables. +* Automate everything. +* Maintain a high code coverage. +* Keep an open communication with other Committers. +* Ask questions. +* Document your contributions and remember to keep it simple. + +## People + +| Name | GitHub ID | Nickname | +|-------|-------------|------------| +| | | | diff --git a/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md b/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md new file mode 100644 index 00000000..542f8635 --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md @@ -0,0 +1,173 @@ + + +# How to contribute + +Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.5.0/docs/CODE_OF_CONDUCT.md). + +## Table of contents + +* [Become a contributor](#Become-a-contributor) +* [Submitting issues](#Submitting-issues) +* [Triage issues](#Triage-issues) +* [Your first contribution](#Your-first-contribution) +* [Branching](#Branching) +* [Signing your commits](#Signing-your-commits) +* [Pull requests](#Pull-requests) +* [Code reviews](#Code-reviews) +* [TODOs in the code](#TODOs-in-the-code) + +## Become a contributor + +You can contribute to this project in several ways. Here are some examples: + +* Contribute to the Ansible modules for Dell Unity documentation and codebase. +* Report and triage bugs. +* Feature requests. +* Write technical documentation and blog posts, for users and contributors. +* Help others by answering questions about this project. + +## Submitting issues + +All issues related to Ansible modules for Dell Unity, regardless of the service/repository the issue belongs to (see table above), should be submitted [here](https://github.com/dell/ansible-unity/issues). Issues will be triaged and labels will be used to indicate the type of issue. This section outlines the types of issues that can be submitted. + +### Report bugs + +We aim to track and document everything related to Ansible modules for Dell Unity via the Issues page. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process. + +Before submitting a new issue, make sure someone hasn't already reported the problem. Look through the [existing issues](https://github.com/dell/ansible-unity/issues) for similar issues. + +Report a bug by submitting a [bug report](https://github.com/dell/ansible-unity/issues/new?labels=type%2Fbug%2C+needs-triage&template=bug_report.md&title=%5BBUG%5D%3A). Make sure that you provide as much information as possible on how to reproduce the bug. + +When opening a Bug please include this information to help with debugging: + +1. Version of relevant software: this software, Ansible, Python, SDK, etc. +2. Details of the issue explaining the problem: what, when, where +3. The expected outcome that was not met (if any) +4. Supporting troubleshooting information. __Note: Do not provide private company information that could compromise your company's security.__ + +An Issue __must__ be created before submitting any pull request. Any pull request that is created should be linked to an Issue. + +### Feature request + +If you have an idea of how to improve this project, submit a [feature request](https://github.com/dell/ansible-unity/issues/new?labels=type%2Ffeature-request%2C+needs-triage&template=feature_request.md&title=%5BFEATURE%5D%3A). + +### Answering questions + +If you have a question and you can't find the answer in the documentation or issues, the next step is to submit a [question.](https://github.com/dell/ansible-unity/issues/new?labels=type%2Fquestion&template=ask-a-question.md&title=%5BQUESTION%5D%3A) + +We'd love your help answering questions being asked by other Ansible modules for Dell Unity users. + +## Triage issues + +Triage helps ensure that issues resolve quickly by: + +* Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +* Giving a contributor the information they need before they commit to resolving an issue. +* Lowering the issue count by preventing duplicate issues. +* Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell Unity community will thank you for saving them time by spending some of yours. + +Read more about the ways you can [Triage issues](https://github.com/dell/ansible-unity/blob/1.5.0/docs/ISSUE_TRIAGE.md). + +## Your first contribution + +Unsure where to begin contributing? Start by browsing issues labeled `beginner friendly` or `help wanted`. + +* [Beginner-friendly](https://github.com/dell/ansible-unity/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22) issues are generally straightforward to complete. +* [Help wanted](https://github.com/dell/ansible-unity/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) issues are problems we would like the community to help us with regardless of complexity. + +When you're ready to contribute, it's time to create a pull request. + +## Branching + +* [Branching Strategy for Ansible modules for Dell Unity](https://github.com/dell/ansible-unity/blob/1.5.0/docs/BRANCHING.md) + +## Signing your commits + +We require that developers sign off their commits to certify that they have permission to contribute the code in a pull request. This way of certifying is commonly known as the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). We encourage all contributors to read the DCO text before signing a commit and making contributions. + +GitHub will prevent a pull request from being merged if there are any unsigned commits. + +### Signing a commit + +GPG (GNU Privacy Guard) will be used to sign commits. Follow the instructions [here](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-commits) to create a GPG key and configure your GitHub account to use that key. + +Make sure you have your user name and e-mail set. This will be required for your signed commit to be properly verified. Check this references: + +* Setting up your github user name [reference](https://help.github.com/articles/setting-your-username-in-git/) +* Setting up your e-mail address [reference](https://help.github.com/articles/setting-your-commit-email-address-in-git/) + +Once Git and your GitHub account have been properly configured, you can add the -S flag to the git commits: + +```console +$ git commit -S -m your commit message +# Creates a signed commit +``` + +### Commit message format + +Ansible modules for Dell Unity uses the guidelines for commit messages outlined in [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/) + +## Pull requests + +If this is your first time contributing to an open-source project on GitHub, make sure you read about [Creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it. + +To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines: + +* Title and description matches the implementation. +* Commits within the pull request follow the formatting guidelines. +* The pull request closes one related issue. +* The pull request contains necessary tests that verify the intended behavior. +* If your pull request has conflicts, rebase your branch onto the main branch. + +If the pull request fixes a bug: + +* The pull request description must include `Fixes #`. +* To avoid regressions, the pull request should include tests that replicate the fixed bug. + +The team _squashes_ all commits into one when we accept a pull request. The title of the pull request becomes the subject line of the squashed commit message. We still encourage contributors to write informative commit messages, as they becomes a part of the Git commit body. + +We use the pull request title when we generate change logs for releases. As such, we strive to make the title as informative as possible. + +Make sure that the title for your pull request uses the same format as the subject line in the commit message. + +### Quality gates for pull requests + +GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-unity/blob/1.5.0/docs/SUPPORT.md). + +#### Code sanitization + +[GitHub action](https://github.com/dell/ansible-unity/actions/workflows/ansible-test.yml) that analyzes source code to flag ansible sanity errors and runs Unit tests. + +## Code reviews + +All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. + +A pull request must satisfy following for it to be merged: + +* A pull request will require at least 2 maintainer approvals. +* Maintainers must perform a review to ensure the changes adhere to guidelines laid out in this document. +* If any commits are made after the PR has been approved, the PR approval will automatically be removed and the above process must happen again. + +## Code style + +Ensure the added code has the required documenation, examples and unit tests. + +### Sanity + +Run ansible-test sanity --docker default on your code to ensure sanity. Ensure the code does not have any Andersson script violations and not break any existing unit test workflows. + +### TODOs in the code + +We don't like TODOs in the code or documentation. It is really best if you sort out all issues you can see with the changes before we check the changes in. diff --git a/ansible_collections/dellemc/unity/docs/INSTALLATION.md b/ansible_collections/dellemc/unity/docs/INSTALLATION.md new file mode 100644 index 00000000..5efe10d5 --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/INSTALLATION.md @@ -0,0 +1,100 @@ + + +# Installation and execution of Ansible modules for Dell Unity + +## Installation of SDK +* Install the python SDK named [Storops](https://pypi.org/project/storops/). It can be installed using pip, based on appropriate python version. Execute this command: + + pip install storops + +* Alternatively, Other installation ways can be found from [SDK](https://github.com/emc-openstack/storops#readme) page + +## Building collections + * Use this command to build the collection from source code: + + ansible-galaxy collection build + + For more details on how to build a tar ball, please refer to: [Building the collection](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_distributing.html#building-your-collection-tarball) + +## Installing collections + +#### Online installation of collections + * Use this command to install the latest collection hosted in [galaxy portal](https://galaxy.ansible.com/dellemc/unity): + + ansible-galaxy collection install dellemc.unity -p + +#### Offline installation of collections + + * Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/unity) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/unity) and use this command to install the collection anywhere in your system: + + ansible-galaxy collection install dellemc-unity-1.5.0.tar.gz -p + + * Set the environment variable: + + export ANSIBLE_COLLECTIONS_PATHS=$ANSIBLE_COLLECTIONS_PATHS: + +## Using collections + + * In order to use any Ansible module, ensure that the importing of proper FQCN (Fully Qualified Collection Name) must be embedded in the playbook. + This example can be referred to: + + collections: + - dellemc.unity + + * In order to use installed collection in a specific task use a proper FQCN (Fully Qualified Collection Name). Refer to this example: + + tasks: + - name: Create volume + dellemc.unity.volume + + * For generating Ansible documentation for a specific module, embed the FQCN before the module name. Refer to this example: + + ansible-doc dellemc.unity.volume + + +## Ansible modules execution + +The Ansible server must be configured with Python library for Unity to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-unity/blob/1.5.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules. + +## SSL certificate validation + +* Copy the CA certificate to the "/etc/pki/ca-trust/source/anchors" path of the host by any external means. +* Set the "REQUESTS_CA_BUNDLE" environment variable to the path of the SSL certificate using the command: + + export REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/<> +* Import the SSL certificate to host using the command: + + update-ca-trust extract +* If "TLS CA certificate bundle error" occurs, then follow these steps: + + cd /etc/pki/tls/certs/ + openssl x509 -in ca-bundle.crt -text -noout + +## Results +Each module returns the updated state and details of the entity, For example, if you are using the Volume module, all calls will return the updated details of the volume. Sample result is shown in each module's documentation. + +## Ansible execution environment +Ansible can also be installed in a container environment. Ansible Builder provides the ability to create reproducible, self-contained environments as container images that can be run as Ansible execution environments. +* Install the ansible builder package using: + + pip3 install ansible-builder +* Ensure the execution-environment.yml is at the root of collection and create the execution environment using: + + ansible-builder build --tag --container-runtime docker +* After the image is built, run the container using: + + docker run -it /bin/bash +* Verify collection installation using command: + + ansible-galaxy collection list +* The playbook can be run on the container using: + + docker run --rm -v $(pwd):/runner ansible-playbook info_test.yml diff --git a/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md new file mode 100644 index 00000000..e91bde9b --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md @@ -0,0 +1,308 @@ + + +# Triage issues + +The main goal of issue triage is to categorize all incoming issues and make sure each issue has all basic information needed for anyone else to understand and be able to start working on it. + +> **Note:** This information is for project Maintainers, Owners, and Admins. If you are a Contributor, then you will not be able to perform most of the tasks in this topic. + +The core maintainers of this project are responsible for categorizing all incoming issues and delegating any critical or important issue to other maintainers. Triage provides an important way to contribute to an open source project. + +Triage helps ensure issues resolve quickly by: + +- Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +## 1. Find issues that need triage + +The easiest way to find issues that haven't been triaged is to search for issues with the `needs-triage` label. + +## 2. Ensure the issue contains basic information + +Make sure that the issue's author provided the standard issue information. This project utilizes GitHub issue templates to guide contributors to provide standard information that must be included for each type of template or type of issue. + +### Standard issue information that must be included + +This section describes the various issue templates and the expected content. + +#### Bug reports + +Should explain what happened, what was expected and how to reproduce it together with any additional information that may help giving a complete picture of what happened such as screenshots, output and any environment related information that's applicable and/or maybe related to the reported problem: + + - Ansible Version: [e.g. 2.14] + - Python Version [e.g. 3.10] + - Ansible modules for Dell Unity Version: [e.g. 1.5.0] + - Unity SDK version: [e.g. Unity 1.2.11] + - Any other additional information... + +#### Feature requests + +Should explain what feature that the author wants to be added and why that is needed. + +#### Ask a question requests + +In general, if the issue description and title is perceived as a question no more information is needed. + +### Good practices + +To make it easier for everyone to understand and find issues they're searching for it's suggested as a general rule of thumbs to: + +- Make sure that issue titles are named to explain the subject of the issue, has a correct spelling and doesn't include irrelevant information and/or sensitive information. +- Make sure that issue descriptions doesn't include irrelevant information. +- Make sure that issues do not contain sensitive information. +- Make sure that issues have all relevant fields filled in. +- Do your best effort to change title and description or request suggested changes by adding a comment. + +> **Note:** Above rules are applicable to both new and existing issues. + +### Dealing with missing information + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. Label issue with `triage/needs-information`. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. Label issue with `triage/needs-information`. + +If the author does not respond to the requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +If you receive a notification with additional information provided but you are not anymore on issue triage and you feel you do not have time to handle it, you should delegate it to the current person on issue triage. + +## 3. Categorizing an issue + +### Duplicate issues + +Make sure it's not a duplicate by searching existing issues using related terms from the issue title and description. If you think you know there is an existing issue, but can't find it, please reach out to one of the maintainers and ask for help. If you identify that the issue is a duplicate of an existing issue: + +1. Add a comment `duplicate of #` +2. Add the `triage/duplicate` label + +### Bug reports + +If it's not perfectly clear that it's an actual bug, quickly try to reproduce it. + +**It's a bug/it can be reproduced:** + +1. Add a comment describing detailed steps for how to reproduce it, if applicable. +2. If you know that maintainers wont be able to put any resources into it for some time then label the issue with `help wanted` and optionally `beginner friendly` together with pointers on which code to update to fix the bug. This should signal to the community that we would appreciate any help we can get to resolve this. +3. Move on to [prioritizing the issue](#4-prioritization-of-issues). + +**It can't be reproduced:** + +1. Either [ask for more information](#2-ensure-the-issue-contains-basic-information) needed to investigate it more thoroughly. Provide details in a comment. +2. Either [delegate further investigations](#investigation-of-issues) to someone else. Provide details in a comment. + +**It works as intended/by design:** + +1. Kindly and politely add a comment explaining briefly why we think it works as intended and close the issue. +2. Label the issue `triage/works-as-intended`. +3. Remove the `needs-triage` label. + +**It does not work as intended/by design:** + +### Feature requests + +1. If the feature request does not align with the product vision, add a comment indicating so, remove the `needs-triage` label and close the issue +2. Otherwise, move on to [prioritizing the issue](#4-prioritization-of-issues). Assign the appropriate priority label to the issue, add the appropriate comments to the issue, and remove the `needs-triage` label. + +## 4. Prioritization of issues + +In general bugs and feature request issues should be labeled with a priority. + +Adding priority levels can be difficult. Ensure you have the knowledge, context, and the experience before prioritizing any issues. If you have any uncertainty as to which priority level to assign, please ask the maintainers for help. + +The key here is asking for help and discuss issues to understand how more experienced project members think and reason. By doing that you learn more and eventually be more and more comfortable with prioritizing issues. + +In case there is an uncertainty around the prioritization of an issue, please ask the maintainers for help. + +| Label | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `priority/critical` | Highest priority. Must be actively worked on as someone's top priority immediately. | +| `priority/high` | Must be worked on soon, ideally in time for the next release. | +| `priority/low` | Lowest priority. Possibly useful, but not yet enough interest in it. | + +### Critical priority + +1. If an issue has been categorized and any of this criteria apply, the issue should be labeled as critical and must be actively worked on as someone's top priority immediately. + + - Results in any data loss + - Critical security or performance issues + - Problem that makes a feature unusable + - Multiple users experience a severe problem affecting their business, users etc. + +2. Label the issue `priority/critical`. +3. Escalate the problem to the maintainers. +4. Assign or ask a maintainer for help assigning someone to make this issue their top priority immediately. +5. Add the issue to the next upcoming release milestone. + +### High priority + +1. Label the issue `priority/high`. +2. Add the issue to the next upcoming release milestone. +3. Prioritize it or assign someone to work on it now or very soon. +4. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +### Low priority + +1. If the issue is deemed possibly useful but a low priority label the issue `priority/low`. +2. The amount of interest in the issue will determine if the priority elevated. +3. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +## 5. Requesting help from the community + +Depending on the issue and/or priority, it's always a good idea to consider signalling to the community that help from community is appreciated and needed in case an issue is not prioritized to be worked on by maintainers. Use your best judgement. In general, requesting help from the community means that a contribution has a good chance of getting accepted and merged. + +In many cases the issue author or community as a whole is more suitable to contribute changes since they're experts in their domain. It's also quite common that someone has tried to get something to work using the documentation without success and made an effort to get it to work and/or reached out to the community to get the missing information. + +1. Kindly and politely add a comment to alert update subscribers. + - Explain the issue and the need for resolution. Be sure and detail that the issue has not been prioritized and that the issue has not been scheduled for work by the maintainers. + - If possible or applicable, add pointers and references to the code/files that need to be revised. Provide any ideas as to the solution. This will help the maintainers get started on resolving the issue. +2. Label the issue with `help wanted`. +3. If applicable, label the issue with `beginner friendly` to denote that the issue is suitable for a beginner to work on. + +## Investigation of issues + +When an issue has all basic information provided, but the reported problem cannot be reproduced at a first glance, the issue is labeled `triage/needs-information`. Depending on the perceived severity and/or number of [upvotes](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments), the investigation will either be delegated to another maintainer for further investigation or put on hold until someone else (maintainer or contributor) picks it up and eventually starts investigating it. + +Even if you don't have the time or knowledge to investigate an issue we highly recommend that you [upvote](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments) the issue if you happen to have the same problem. If you have further details that may help investigating the issue please provide as much information as possible. + +## External pull requests + +Part of issue triage should also be triaging of external PRs. Main goal should be to make sure PRs from external contributors have an owner/reviewer and are not forgotten. + +1. Check new external PRs which do not have a reviewer. +1. Check if there is a link to an existing issue. +1. If not and you know which issue it is solving, add the link yourself, otherwise ask the author to link the issue or create one. +1. Assign a reviewer based on who was handling the linked issue or what code or feature does the PR touches (look at who was the last to make changes there if all else fails). + +## GitHub issue management workflow + +This section describes the triage workflow for new GitGHub issues that get created. + +### GitHub Issue: Bug + +This workflow starts off with a GitHub issue of type bug being created. + +1. Collaborator or maintainer creates a GitHub bug using the appropriate GitHub issue template +2. By default a bug will be created with the `type/bug` and `needs-triage` labels + +The following flow chart outlines the triage process for bugs. + + +``` + +--------------------------+ + | New bug issue opened/more| + | information added | + +-------------|------------+ + | + | + +----------------------------------+ NO +--------------|-------------+ + | label: triage/needs-information --------- All required information | + | | | contained in issue? | + +-----------------------------|----+ +--------------|-------------+ + | | YES + | | + +--------------------------+ | +---------------------+ YES +---------------------------------------+ + |label: | | | Dupicate Issue? ------- Comment `Duplicate of #` + |triage/needs-investigation| | NO | | | Remove needs-triage label | + +------|-------------------+ | +----------|----------+ | label: triage/duplicate | + | | | NO +-----------------|---------------------+ + YES | | | | + | +---------------|----+ NO +------------|------------+ | + | |Needs investigation?|---------- Can it be reproduced? | | + |------- | +------------|------------+ | + +--------------------+ | YES | + | +----------|----------+ + +-------------------------+ +------------|------------+ | Close Issue | + | Add release-found label |------------------ Works as intended? | | | + | label: release-found/* | NO | | +----------|----------+ + +------------|------------+ +------------|------------+ | + | | | + | | YES | + +-----------------------------+ +----------------|----------------+ | + | Add area label | | Add comment | | + | label: area/* | | Remove needs-triage label ------------------| + +------------|----------------+ | label: triage/works-as-intended | + | +---------------------------------+ + | + +------------|-------------+ +----------+ + | Add priority label | | Done ---------------------------------------- + | label: priority/* | +----|-----+ | + +------------|-------------+ |NO | + | | +------------------|------------------+ + +------------|-------------+ +----|----------------+ YES | Add details to issue | + | ------------ Signal Community? ---------- label: help wanted | + |Remove needs-triage label | | | | label: beginner friendly (optional)| + +--------------------------+ +---------------------+ +-------------------------------------+ + +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +### GitHub issue: feature request + +This workflow starts off with a GitHub issue of type feature request being created. + +1. Collaborator or maintainer creates a GitHub feature request using the appropriate GitHub issue template +2. By default a feature request will be created with the `type/feature-request` and `needs-triage` labels + +This flow chart outlines the triage process for feature requests. + + +``` + +---------------------------------+ + |New feature request issue opened/| + |more information added | + +----------------|----------------+ + | + | + +---------------------------------+ NO +-------------|------------+ + | label: triage/needs-information ---------- All required information | + | | | contained in issue? | + +---------------------------------+ +-------------|------------+ + | + | + +---------------------------------------+ | + |Comment `Duplicate of #` | YES +----------|----------+ + |Remove needs-triage label ------- Duplicate issue? | + |label: triage/duplicate | | | + +-----|---------------------------------+ +-----------|---------+ + | |NO + | +-------------------------+ NO +-----------------------------+ + | |Add comment |-------- Does feature request align | + | |Remove needs-triage label| | with product vision? | + | +------|------------------+ +--------------|--------------+ + | | | YES + | | +-----------------|----------------+ + | | |Change feature-request to feature | + | | |Remove label: type/feature-request| + | | |Add label: type/feature | + | | +-----------------|----------------+ + | | | + | | +--------------|--------------+ + | | | Add area label | + | | | label: area/* | + | | +--------------|--------------+ + | | | + +-|---------|---+ +--------+ +--------------|--------------+ + | Close issue | | Done --------- Add priority label | + | | | | | label: priority/* | + +---------------+ +--------+ +-----------------------------+ +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +In some cases you may receive a request you do not wish to accept. Perhaps the request doesn't align with the project scope or vision. It is important to tactfully handle contributions that don't meet the project standards. + +1. Acknowledge the person behind the contribution and thank them for their interest and contribution +2. Explain why it didn't fit into the scope of the project or vision +3. Don't leave an unwanted contributions open. Immediately close the contribution you do not wish to accept diff --git a/ansible_collections/dellemc/unity/docs/MAINTAINERS.md b/ansible_collections/dellemc/unity/docs/MAINTAINERS.md new file mode 100644 index 00000000..24ab255d --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/MAINTAINERS.md @@ -0,0 +1,19 @@ + + +# Maintainers + +* Ananthu Kuttattu (kuttattz) +* Bhavneet Sharma (Bhavneet-Sharma) +* Jennifer John (Jennifer-John) +* Meenakshi Dembi (meenakshidembi691) +* Pavan Mudunuri (Pavan-Mudunuri) +* Previnkumar G (Previnkumar-G) +* Trisha Datta (trisha-dell) diff --git a/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md new file mode 100644 index 00000000..b5f68501 --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md @@ -0,0 +1,38 @@ + + +# Maintainer guidelines + +As a Maintainer of this project you have the responsibility of keeping true to the vision of the project with high-degree quality. Being part of this group is a privilege that requires dedication and time to attend to the daily activities that are associated with the maintenance of this project. + +## Becoming a maintainer + +Most Maintainers started as Contributors that have demonstrated their commitment to the success of the project. Contributors wishing to become Maintainers, must demonstrate commitment to the success of the project by contributing code, reviewing others' work, and triaging issues on a regular basis for at least three months. + +The contributions alone don't make you a Maintainer. You need to earn the trust of the current Maintainers and other project Contributors, that your decisions and actions are in the best interest of the project. + +Periodically, the existing Maintainers curate a list of Contributors who have shown regular activity on the project over the prior months. It is from this list that Maintainer candidates are selected. + +After a candidate is selected, the existing Maintainers discuss the candidate over the next 5 business days, provide feedback, and vote. At least 75% of the current Maintainers must vote in the affirmative for a candidate to be moved to the role of Maintainer. + +If a candidate is approved, a Maintainer contacts the candidate to invite them to open a pull request that adds the contributor to the MAINTAINERS file. The candidate becomes a Maintainer once the pull request is merged. + +## Maintainer policies + +* Lead by example +* Follow the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.5.0/docs/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-unity/blob/1.5.0/docs/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-unity/blob/1.5.0/docs/COMMITTER_GUIDE.md) guides +* Promote a friendly and collaborative environment within our community +* Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests +* Criticize code, not people. Ideally, tell the contributor a better way to do what they need. +* Clearly mark optional suggestions as such. Best practice, start your comment with *At your option: …* + +## Project decision making + +All project decisions should contribute to successfully executing on the project roadmap. Project milestones are established for each release. diff --git a/ansible_collections/dellemc/unity/docs/Product Guide.md b/ansible_collections/dellemc/unity/docs/Product Guide.md new file mode 100644 index 00000000..ec90bd97 --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/Product Guide.md @@ -0,0 +1,8662 @@ +# Ansible Modules for Dell Technologies Unity +## Product Guide 1.5.0 +© 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell, and other trademarks are trademarks of Dell Inc. or its subsidiaries. Other trademarks may be trademarks of their respective owners. + +-------------- +## Contents +* [CIFS Server Module](#cifs-server-module) + * [Synopsis](#synopsis) + * [Parameters](#parameters) + * [Notes](#notes) + * [Examples](#examples) + * [Return Values](#return-values) + * [Authors](#authors) +* [Consistency Group Module](#consistency-group-module) + * [Synopsis](#synopsis-1) + * [Parameters](#parameters-1) + * [Notes](#notes-1) + * [Examples](#examples-1) + * [Return Values](#return-values-1) + * [Authors](#authors-1) +* [Filesystem Module](#filesystem-module) + * [Synopsis](#synopsis-2) + * [Parameters](#parameters-2) + * [Notes](#notes-2) + * [Examples](#examples-2) + * [Return Values](#return-values-2) + * [Authors](#authors-2) +* [Filesystem Snapshot Module](#filesystem-snapshot-module) + * [Synopsis](#synopsis-3) + * [Parameters](#parameters-3) + * [Notes](#notes-3) + * [Examples](#examples-3) + * [Return Values](#return-values-3) + * [Authors](#authors-3) +* [Host Module](#host-module) + * [Synopsis](#synopsis-4) + * [Parameters](#parameters-4) + * [Notes](#notes-4) + * [Examples](#examples-4) + * [Return Values](#return-values-4) + * [Authors](#authors-4) +* [Info Module](#info-module) + * [Synopsis](#synopsis-5) + * [Parameters](#parameters-5) + * [Notes](#notes-5) + * [Examples](#examples-5) + * [Return Values](#return-values-5) + * [Authors](#authors-5) +* [Interface Module](#interface-module) + * [Synopsis](#synopsis-6) + * [Parameters](#parameters-6) + * [Notes](#notes-6) + * [Examples](#examples-6) + * [Return Values](#return-values-6) + * [Authors](#authors-6) +* [NAS Server Module](#nas-server-module) + * [Synopsis](#synopsis-7) + * [Parameters](#parameters-7) + * [Notes](#notes-7) + * [Examples](#examples-7) + * [Return Values](#return-values-7) + * [Authors](#authors-7) +* [NFS Module](#nfs-module) + * [Synopsis](#synopsis-8) + * [Parameters](#parameters-8) + * [Notes](#notes-8) + * [Examples](#examples-8) + * [Return Values](#return-values-8) + * [Authors](#authors-8) +* [NFS Server Module](#nfs-server-module) + * [Synopsis](#synopsis-9) + * [Parameters](#parameters-9) + * [Notes](#notes-9) + * [Examples](#examples-9) + * [Return Values](#return-values-9) + * [Authors](#authors-9) +* [SMB Share Module](#smb-share-module) + * [Synopsis](#synopsis-10) + * [Parameters](#parameters-10) + * [Notes](#notes-10) + * [Examples](#examples-10) + * [Return Values](#return-values-10) + * [Authors](#authors-10) +* [Snapshot Module](#snapshot-module) + * [Synopsis](#synopsis-11) + * [Parameters](#parameters-11) + * [Notes](#notes-11) + * [Examples](#examples-11) + * [Return Values](#return-values-11) + * [Authors](#authors-11) +* [Snapshot Schedule Module](#snapshot-schedule-module) + * [Synopsis](#synopsis-12) + * [Parameters](#parameters-12) + * [Notes](#notes-12) + * [Examples](#examples-12) + * [Return Values](#return-values-12) + * [Authors](#authors-12) +* [Storage Pool Module](#storage-pool-module) + * [Synopsis](#synopsis-13) + * [Parameters](#parameters-13) + * [Notes](#notes-13) + * [Examples](#examples-13) + * [Return Values](#return-values-13) + * [Authors](#authors-13) +* [Quota Tree Module](#quota-tree-module) + * [Synopsis](#synopsis-14) + * [Parameters](#parameters-14) + * [Notes](#notes-14) + * [Examples](#examples-14) + * [Return Values](#return-values-14) + * [Authors](#authors-14) +* [User Quota Module](#user-quota-module) + * [Synopsis](#synopsis-15) + * [Parameters](#parameters-15) + * [Notes](#notes-15) + * [Examples](#examples-15) + * [Return Values](#return-values-15) + * [Authors](#authors-15) +* [Volume Module](#volume-module) + * [Synopsis](#synopsis-16) + * [Parameters](#parameters-16) + * [Notes](#notes-16) + * [Examples](#examples-16) + * [Return Values](#return-values-16) + * [Authors](#authors-16) + +-------------- + +# CIFS Server Module + +Manage CIFS server on Unity storage system + +### Synopsis + Managing the CIFS server on the Unity storage system includes creating CIFS server, getting CIFS server details and deleting CIFS server. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
nas_server_name str
Name of the NAS server on which CIFS server will be hosted.
nas_server_id str
ID of the NAS server on which CIFS server will be hosted.
netbios_name str
The computer name of the SMB server in Windows network.
workgroup str
Standalone SMB server workgroup.
local_password str
Standalone SMB server administrator password.
domain str
The domain name where the SMB server is registered in Active Directory.
domain_username str
Active Directory domain user name.
domain_password str
Active Directory domain password.
cifs_server_name str
The name of the CIFS server.
cifs_server_id str
The ID of the CIFS server.
interfaces list
elements: str

List of file IP interfaces that service CIFS protocol of SMB server.
unjoin_cifs_server_account bool
Keep SMB server account unjoined in Active Directory after deletion.
False specifies keep SMB server account joined after deletion.
True specifies unjoin SMB server account from Active Directory before deletion.
state str True
  • absent
  • present

Define whether the CIFS server should exist or not.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create CIFS server belonging to Active Directory + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "test_nas1" + cifs_server_name: "test_cifs" + domain: "ad_domain" + domain_username: "domain_username" + domain_password: "domain_password" + state: "present" + +- name: Get CIFS server details using CIFS server ID + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cifs_server_id: "cifs_37" + state: "present" + +- name: Get CIFS server details using NAS server name + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "test_nas1" + state: "present" + +- name: Delete CIFS server + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cifs_server_id: "cifs_37" + unjoin_cifs_server_account: True + domain_username: "domain_username" + domain_password: "domain_password" + state: "absent" + +- name: Create standalone CIFS server + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + netbios_name: "ANSIBLE_CIFS" + workgroup: "ansible" + local_password: "Password123!" + nas_server_name: "test_nas1" + state: "present" + +- name: Get CIFS server details using netbios name + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + netbios_name: "ANSIBLE_CIFS" + state: "present" + +- name: Delete standalone CIFS server + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cifs_server_id: "cifs_40" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
cifs_server_details dict When CIFS server exists Details of the CIFS server.
  description str success Description of the SMB server.
  domain str success Domain name where SMB server is registered in Active Directory.
  file_interfaces dict success The file interfaces associated with the NAS server.
   UnityFileInterfaceList list success List of file interfaces associated with the NAS server.
    UnityFileInterface dict success Details of file interface associated with the NAS server.
  id str success Unique identifier of the CIFS server instance.
  is_standalone bool success Indicates whether the SMB server is standalone.
  name str success User-specified name for the SMB server.
  nasServer dict success Information about the NAS server in the storage system.
   UnityNasServer dict success Information about the NAS server in the storage system.
    id str success Unique identifier of the NAS server instance.
  netbios_name str success Computer Name of the SMB server in windows network.
  smb_multi_channel_supported bool success Indicates whether the SMB 3.0+ multichannel feature is supported.
  smb_protocol_versions list success Supported SMB protocols, such as 1.0, 2.0, 2.1, 3.0, and so on.
  smbca_supported bool success Indicates whether the SMB server supports continuous availability.
  workgroup str success Windows network workgroup for the SMB server.
+ +### Authors +* Akash Shendge (@shenda1) + +-------------------------------- +# Consistency Group Module + +Manage consistency groups on Unity storage system + +### Synopsis + Managing the consistency group on the Unity storage system includes creating new consistency group, adding volumes to consistency group, removing volumes from consistency group, mapping hosts to consistency group, unmapping hosts from consistency group, renaming consistency group, modifying attributes of consistency group, enabling replication in consistency group, disabling replication in consistency group and deleting consistency group. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
cg_name str
The name of the consistency group.
It is mandatory for the create operation.
Specify either cg_name or cg_id (but not both) for any operation.
cg_id str
The ID of the consistency group.
It can be used only for get, modify, add/remove volumes, or delete operations.
volumes list
elements: dict

This is a list of volumes.
Either the volume ID or name must be provided for adding/removing existing volumes from consistency group.
If volumes are given, then vol_state should also be specified.
Volumes cannot be added/removed from consistency group, if the consistency group or the volume has snapshots.
  vol_id str
The ID of the volume.
  vol_name str
The name of the volume.
vol_state str
  • present-in-group
  • absent-in-group

String variable, describes the state of volumes inside consistency group.
If volumes are given, then vol_state should also be specified.
new_cg_name str
The new name of the consistency group, used in rename operation.
description str
Description of the consistency group.
snap_schedule str
Snapshot schedule assigned to the consistency group.
Specifying an empty string "" removes the existing snapshot schedule from consistency group.
tiering_policy str
  • AUTOTIER_HIGH
  • AUTOTIER
  • HIGHEST
  • LOWEST

Tiering policy choices for how the storage resource data will be distributed among the tiers available in the pool.
hosts list
elements: dict

This is a list of hosts.
Either the host ID or name must be provided for mapping/unmapping hosts for a consistency group.
If hosts are given, then mapping_state should also be specified.
Hosts cannot be mapped to a consistency group, if the consistency group has no volumes.
When a consistency group is being mapped to the host, users should not use the volume module to map the volumes in the consistency group to hosts.
  host_id str
The ID of the host.
  host_name str
The name of the host.
mapping_state str
  • mapped
  • unmapped

String variable, describes the state of hosts inside the consistency group.
If hosts are given, then mapping_state should also be specified.
replication_params dict
Settings required for enabling replication.
  destination_cg_name str
Name of the destination consistency group.
Default value will be source consistency group name prefixed by 'DR_'.
  replication_mode str True
  • asynchronous
  • manual

The replication mode.
  rpo int
Maximum time to wait before the system syncs the source and destination LUNs.
Option rpo should be specified if the replication_mode is asynchronous.
The value should be in range of 5 to 1440.
  replication_type str local
  • local
  • remote

Type of replication.
  remote_system dict
Details of remote system to which the replication is being configured.
The remote_system option should be specified if the replication_type is remote.
   remote_system_host str True
IP or FQDN for remote Unity unisphere Host.
   remote_system_username str True
User name of remote Unity unisphere Host.
   remote_system_password str True
Password of remote Unity unisphere Host.
   remote_system_verifycert bool True
Boolean variable to specify whether or not to validate SSL certificate of remote Unity unisphere Host.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
   remote_system_port int 443
Port at which remote Unity unisphere is hosted.
  destination_pool_name str
Name of pool to allocate destination Luns.
Mutually exclusive with destination_pool_id.
  destination_pool_id str
Id of pool to allocate destination Luns.
Mutually exclusive with destination_pool_name.
replication_state str
  • enable
  • disable

State of the replication.
state str True
  • absent
  • present

Define whether the consistency group should exist or not.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + cg_name: "{{cg_name}}" + description: "{{description}}" + snap_schedule: "{{snap_schedule1}}" + state: "present" + +- name: Get details of consistency group using id + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + state: "present" + +- name: Add volumes to consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + volumes: + - vol_name: "Ansible_Test-3" + - vol_id: "sv_1744" + vol_state: "{{vol_state_present}}" + state: "present" + +- name: Rename consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{cg_name}}" + new_cg_name: "{{new_cg_name}}" + state: "present" + +- name: Modify consistency group details + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{new_cg_name}}" + snap_schedule: "{{snap_schedule2}}" + tiering_policy: "{{tiering_policy1}}" + state: "present" + +- name: Map hosts to a consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + hosts: + - host_name: "10.226.198.248" + - host_id: "Host_511" + mapping_state: "mapped" + state: "present" + +- name: Unmap hosts from a consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + hosts: + - host_id: "Host_511" + - host_name: "10.226.198.248" + mapping_state: "unmapped" + state: "present" + +- name: Remove volumes from consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{new_cg_name}}" + volumes: + - vol_name: "Ansible_Test-3" + - vol_id: "sv_1744" + vol_state: "{{vol_state_absent}}" + state: "present" + +- name: Delete consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{new_cg_name}}" + state: "absent" + +- name: Enable replication for consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "cg_id_1" + replication_params: + destination_cg_name: "destination_cg_1" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "remote" + remote_system: + remote_system_host: '10.1.2.3' + remote_system_verifycert: False + remote_system_username: 'username' + remote_system_password: 'password' + destination_pool_name: "pool_test_1" + replication_state: "enable" + state: "present" + +- name: Disable replication for consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "dis_repl_ans_source" + replication_state: "disable" + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
consistency_group_details dict When consistency group exists Details of the consistency group.
  block_host_access dict success Details of hosts mapped to the consistency group.
   UnityBlockHostAccessList list success List of hosts mapped to consistency group.
    UnityBlockHostAccess dict success Details of host.
  cg_replication_enabled bool success Whether or not the replication is enabled..
  id str success The system ID given to the consistency group.
  luns dict success Details of volumes part of consistency group.
   UnityLunList list success List of volumes part of consistency group.
    UnityLun dict success Detail of volume.
  relocation_policy str success FAST VP tiering policy for the consistency group.
  snap_schedule dict success Snapshot schedule applied to consistency group.
   UnitySnapSchedule dict success Snapshot schedule applied to consistency group.
    id str success The system ID given to the snapshot schedule.
    name str success The name of the snapshot schedule.
  snapshots list success List of snapshots of consistency group.
   creation_time str success Date and time on which the snapshot was taken.
   expirationTime str success Date and time after which the snapshot will expire.
   name str success Name of the snapshot.
   storageResource dict success Storage resource for which the snapshot was taken.
    UnityStorageResource dict success Details of the storage resource.
+ +### Authors +* Akash Shendge (@shenda1) + +-------------------------------- +# Filesystem Module + +Manage filesystem on Unity storage system + +### Synopsis + Managing filesystem on Unity storage system includes Create new filesystem, Modify snapschedule attribute of filesystem, Modify filesystem attributes, Display filesystem details, Display filesystem snapshots, Display filesystem snapschedule, Delete snapschedule associated with the filesystem, Delete filesystem, Create new filesystem with quota configuration, Enable, modify and disable replication. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
filesystem_name str
The name of the filesystem. Mandatory only for the create operation. All the operations are supported through filesystem_name.
It is mutually exclusive with filesystem_id.
filesystem_id str
The id of the filesystem.
It can be used only for get, modify, or delete operations.
It is mutually exclusive with filesystem_name.
pool_name str
This is the name of the pool where the filesystem will be created.
Either the pool_name or pool_id must be provided to create a new filesystem.
pool_id str
This is the ID of the pool where the filesystem will be created.
Either the pool_name or pool_id must be provided to create a new filesystem.
size int
The size of the filesystem.
cap_unit str
  • GB
  • TB

The unit of the filesystem size. It defaults to GB, if not specified.
nas_server_name str
Name of the NAS server on which filesystem will be hosted.
nas_server_id str
ID of the NAS server on which filesystem will be hosted.
supported_protocols str
  • NFS
  • CIFS
  • MULTIPROTOCOL

Protocols supported by the file system.
It will be overridden by NAS server configuration if NAS Server is Multiprotocol.
description str
Description about the filesystem.
Description can be removed by passing empty string ("").
smb_properties dict
Advance settings for SMB. It contains optional candidate variables.
  is_smb_sync_writes_enabled bool
Indicates whether the synchronous writes option is enabled on the file system.
  is_smb_notify_on_access_enabled bool
Indicates whether notifications of changes to directory file structure are enabled.
  is_smb_op_locks_enabled bool
Indicates whether opportunistic file locking is enabled on the file system.
  is_smb_notify_on_write_enabled bool
Indicates whether file write notifications are enabled on the file system.
  smb_notify_on_change_dir_depth int
Integer variable, determines the lowest directory level to which the enabled notifications apply.
Minimum value is 1.
data_reduction bool
Boolean variable, specifies whether or not to enable compression. Compression is supported only for thin filesystem.
is_thin bool
Boolean variable, specifies whether or not it is a thin filesystem.
access_policy str
  • NATIVE
  • UNIX
  • WINDOWS

Access policy of a filesystem.
locking_policy str
  • ADVISORY
  • MANDATORY

File system locking policies. These policy choices control whether the NFSv4 range locks must be honored.
tiering_policy str
  • AUTOTIER_HIGH
  • AUTOTIER
  • HIGHEST
  • LOWEST

Tiering policy choices for how the storage resource data will be distributed among the tiers available in the pool.
quota_config dict
Configuration for quota management. It contains optional parameters.
  grace_period int
Grace period set in quota configuration after soft limit is reached.
If grace_period is not set during creation of filesystem, it will be set to 7 days by default.
  grace_period_unit str
  • minutes
  • hours
  • days

Unit of grace period.
Default unit is days.
  default_hard_limit int
Default hard limit for user quotas and tree quotas.
If default_hard_limit is not set while creation of filesystem, it will be set to 0B by default.
  default_soft_limit int
Default soft limit for user quotas and tree quotas.
If default_soft_limit is not set while creation of filesystem, it will be set to 0B by default.
  is_user_quota_enabled bool
Indicates whether the user quota is enabled.
If is_user_quota_enabled is not set while creation of filesystem, it will be set to false by default.
Parameters is_user_quota_enabled and quota_policy are mutually exclusive.
  quota_policy str
  • FILE_SIZE
  • BLOCKS

Quota policy set in quota configuration.
If quota_policy is not set while creation of filesystem, it will be set to FILE_SIZE by default.
Parameters is_user_quota_enabled and quota_policy are mutually exclusive.
  cap_unit str
  • MB
  • GB
  • TB

Unit of default_soft_limit and default_hard_limit size.
Default unit is GB.
state str True
  • absent
  • present

State variable to determine whether filesystem will exist or not.
snap_schedule_name str
This is the name of an existing snapshot schedule which is to be associated with the filesystem.
This is mutually exclusive with snapshot_schedule_id.
snap_schedule_id str
This is the id of an existing snapshot schedule which is to be associated with the filesystem.
This is mutually exclusive with snapshot_schedule_name.
replication_params dict
Settings required for enabling or modifying replication.
  replication_name str
Name of the replication session.
  new_replication_name str
Replication name to rename the session to.
  replication_mode str
  • asynchronous
  • manual

The replication mode.
This is a mandatory field while creating a replication session.
  rpo int
Maximum time to wait before the system syncs the source and destination LUNs.
The rpo option should be specified if the replication_mode is asynchronous.
The value should be in range of 5 to 1440.
  replication_type str
  • local
  • remote

Type of replication.
  remote_system dict
Details of remote system to which the replication is being configured.
The remote_system option should be specified if the replication_type is remote.
   remote_system_host str True
IP or FQDN for remote Unity unisphere Host.
   remote_system_username str True
User name of remote Unity unisphere Host.
   remote_system_password str True
Password of remote Unity unisphere Host.
   remote_system_verifycert bool True
Boolean variable to specify whether or not to validate SSL certificate of remote Unity unisphere Host.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
   remote_system_port int 443
Port at which remote Unity unisphere is hosted.
  destination_pool_id str
ID of pool to allocate destination filesystem.
  destination_pool_name str
Name of pool to allocate destination filesystem.
replication_state str
  • enable
  • disable

State of the replication.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* SMB shares, NFS exports, and snapshots associated with filesystem need to be deleted prior to deleting a filesystem. +* The quota_config parameter can be used to update default hard limit and soft limit values to limit the maximum space that can be used. By default they both are set to 0 during filesystem creation which means unlimited. +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create FileSystem + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + pool_name: "pool_1" + size: 5 + state: "present" + +- name: Create FileSystem with quota configuration + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + pool_name: "pool_1" + size: 5 + quota_config: + grace_period: 8 + grace_period_unit: "days" + default_soft_limit: 10 + is_user_quota_enabled: False + state: "present" + +- name: Expand FileSystem size + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + size: 10 + state: "present" + +- name: Expand FileSystem size + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + size: 10 + state: "present" + +- name: Modify FileSystem smb_properties + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + smb_properties: + is_smb_op_locks_enabled: True + smb_notify_on_change_dir_depth: 5 + is_smb_notify_on_access_enabled: True + state: "present" + +- name: Modify FileSystem Snap Schedule + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_141" + snap_schedule_id: "{{snap_schedule_id}}" + state: "{{state_present}}" + +- name: Get details of FileSystem using id + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + state: "present" + +- name: Delete a FileSystem using id + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + state: "absent" + +- name: Enable replication on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_params: + replication_name: "test_repl" + replication_type: "remote" + replication_mode: "asynchronous" + rpo: 60 + remote_system: + remote_system_host: '0.1.2.3' + remote_system_verifycert: False + remote_system_username: 'username' + remote_system_password: 'password' + destination_pool_name: "pool_test_1" + replication_state: "enable" + state: "present" + +- name: Modify replication on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_params: + replication_name: "test_repl" + new_replication_name: "test_repl_updated" + replication_mode: "asynchronous" + rpo: 50 + replication_state: "enable" + state: "present" + +- name: Disable replication on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_state: "disable" + state: "present" + +- name: Disable replication by specifying replication_name on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_params: + replication_name: "test_replication" + replication_state: "disable" + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
filesystem_details dict When filesystem exists Details of the filesystem.
  cifs_notify_on_change_dir_depth int success Indicates the lowest directory level to which the enabled notifications apply, if any.
  description str success Description about the filesystem.
  id str success The system generated ID given to the filesystem.
  is_cifs_notify_on_access_enabled bool success Indicates whether the system generates a notification when a user accesses the file system.
  is_cifs_notify_on_write_enabled bool success Indicates whether the system generates a notification when the file system is written to.
  is_cifs_op_locks_enabled bool success Indicates whether opportunistic file locks are enabled for the file system.
  is_cifs_sync_writes_enabled bool success Indicates whether the CIFS synchronous writes option is enabled for the file system.
  is_data_reduction_enabled bool success Whether or not compression enabled on this filesystem.
  is_thin_enabled bool success Indicates whether thin provisioning is enabled for this filesystem.
  name str success Name of the filesystem.
  nas_server dict success The NAS Server details on which this filesystem is hosted.
   id str success The system ID given to the NAS Server.
   name str success The name of the NAS Server.
  pool dict success The pool in which this filesystem is allocated.
   id str success The system ID given to the pool.
   name str success The name of the storage pool.
  quota_config dict success Details of quota configuration of the filesystem created.
   default_hard_limit int success Default hard limit for user quotas and tree quotas.
   default_soft_limit int success Default soft limit for user quotas and tree quotas.
   grace_period str success Grace period set in quota configuration after soft limit is reached.
   is_user_quota_enabled bool success Indicates whether the user quota is enabled.
   quota_policy str success Quota policy set in quota configuration.
  replication_sessions dict success List of replication sessions if replication is enabled.
   id str success ID of replication session
   name str success Name of replication session
   remote_system dict success Remote system
    id str success ID of remote system
  size_total_with_unit str success Size of the filesystem with actual unit.
  snap_schedule_id str success Indicates the id of the snap schedule associated with the filesystem.
  snap_schedule_name str success Indicates the name of the snap schedule associated with the filesystem.
  snapshots list success The list of snapshots of this filesystem.
   id str success The system ID given to the filesystem snapshot.
   name str success The name of the filesystem snapshot.
  tiering_policy str success Tiering policy applied to this filesystem.
+ +### Authors +* Arindam Datta (@dattaarindam) +* Meenakshi Dembi (@dembim) +* Spandita Panigrahi (@panigs7) + +-------------------------------- +# Filesystem Snapshot Module + +Manage filesystem snapshot on the Unity storage system + +### Synopsis + Managing Filesystem Snapshot on the Unity storage system includes create filesystem snapshot, get filesystem snapshot, modify filesystem snapshot and delete filesystem snapshot. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
snapshot_name str
The name of the filesystem snapshot.
Mandatory parameter for creating a filesystem snapshot.
For all other operations either snapshot_name or snapshot_id is required.
snapshot_id str
During creation snapshot_id is auto generated.
For all other operations either snapshot_id or snapshot_name is required.
filesystem_name str
The name of the Filesystem for which snapshot is created.
For creation of filesystem snapshot either filesystem_name or filesystem_id is required.
Not required for other operations.
filesystem_id str
The ID of the Filesystem for which snapshot is created.
For creation of filesystem snapshot either filesystem_id or filesystem_name is required.
Not required for other operations.
nas_server_name str
The name of the NAS server in which the Filesystem is created.
For creation of filesystem snapshot either nas_server_name or nas_server_id is required.
Not required for other operations.
nas_server_id str
The ID of the NAS server in which the Filesystem is created.
For creation of filesystem snapshot either filesystem_id or filesystem_name is required.
Not required for other operations.
auto_delete bool
This option specifies whether or not the filesystem snapshot will be automatically deleted.
If set to true, the filesystem snapshot will expire based on the pool auto deletion policy.
If set to false, the filesystem snapshot will not be auto deleted based on the pool auto deletion policy.
Option auto_delete can not be set to True, if expiry_time is specified.
If during creation neither auto_delete nor expiry_time is mentioned then the filesystem snapshot will be created keeping auto_delete as True.
Once the expiry_time is set, then the filesystem snapshot cannot be assigned to the auto delete policy.
expiry_time str
This option is for specifying the date and time after which the filesystem snapshot will expire.
The time is to be mentioned in UTC timezone.
The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
description str
The additional information about the filesystem snapshot can be provided using this option.
The description can be removed by passing an empty string.
fs_access_type str
  • Checkpoint
  • Protocol

Access type of the filesystem snapshot.
Required only during creation of filesystem snapshot.
If not given, snapshot's access type will be Checkpoint.
state str True
  • absent
  • present

The state option is used to mention the existence of the filesystem snapshot.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* Filesystem snapshot cannot be deleted, if it has nfs or smb share. +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Create Filesystem Snapshot + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + filesystem_name: "ansible_test_FS" + nas_server_name: "lglad069" + description: "Created using playbook" + auto_delete: True + fs_access_type: "Protocol" + state: "present" + + - name: Create Filesystem Snapshot with expiry time + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap_1" + filesystem_name: "ansible_test_FS_1" + nas_server_name: "lglad069" + description: "Created using playbook" + expiry_time: "04/15/2021 2:30" + fs_access_type: "Protocol" + state: "present" + + - name: Get Filesystem Snapshot Details using Name + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + state: "present" + + - name: Get Filesystem Snapshot Details using ID + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "10008000403" + state: "present" + + - name: Update Filesystem Snapshot attributes + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + description: "Description updated" + auto_delete: False + expiry_time: "04/15/2021 5:30" + state: "present" + + - name: Update Filesystem Snapshot attributes using ID + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "10008000403" + expiry_time: "04/18/2021 8:30" + state: "present" + + - name: Delete Filesystem Snapshot using Name + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + state: "absent" + + - name: Delete Filesystem Snapshot using ID + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "10008000403" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
filesystem_snapshot_details dict When filesystem snapshot exists Details of the filesystem snapshot.
  access_type str success Access type of filesystem snapshot.
  attached_wwn str success Attached WWN details.
  creation_time str success Creation time of filesystem snapshot.
  creator_schedule str success Creator schedule of filesystem snapshot.
  creator_type str success Creator type for filesystem snapshot.
  creator_user str success Creator user for filesystem snapshot.
  description str success Description of the filesystem snapshot.
  expiration_time str success Date and time after which the filesystem snapshot will expire.
  filesystem_id str success Id of the filesystem for which the snapshot exists.
  filesystem_name str success Name of the filesystem for which the snapshot exists.
  id str success Unique identifier of the filesystem snapshot instance.
  is_auto_delete bool success Is the filesystem snapshot is auto deleted or not.
  name str success The name of the filesystem snapshot.
  nas_server_id str success Id of the NAS server on which filesystem exists.
  nas_server_name str success Name of the NAS server on which filesystem exists.
  size int success Size of the filesystem snapshot.
+ +### Authors +* Rajshree Khare (@kharer5) + +-------------------------------- +# Host Module + +Manage Host operations on Unity + +### Synopsis + The Host module contains the operations Creation of a Host, Addition of initiators to Host, Removal of initiators from Host, Modification of host attributes, Get details of a Host, Deletion of a Host, Addition of network address to Host, Removal of network address from Host. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
host_name str
Name of the host.
Mandatory for host creation.
host_id str
Unique identifier of the host.
Host Id is auto generated during creation.
Except create, all other operations require either host_id or Ihost_name).
description str
Host description.
host_os str
  • AIX
  • Citrix XenServer
  • HP-UX
  • IBM VIOS
  • Linux
  • Mac OS
  • Solaris
  • VMware ESXi
  • Windows Client
  • Windows Server

Operating system running on the host.
new_host_name str
New name for the host.
Only required in rename host operation.
initiators list
elements: str

List of initiators to be added/removed to/from host.
initiator_state str
  • present-in-host
  • absent-in-host

State of the initiator.
network_address str
Network address to be added/removed to/from the host.
Enter valid IPV4 or host name.
network_address_state str
  • present-in-host
  • absent-in-host

State of the Network address.
state str True
  • present
  • absent

State of the host.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create empty Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host" + host_os: "Linux" + description: "ansible-test-host" + state: "present" + +- name: Create Host with Initiators + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-1" + host_os: "Linux" + description: "ansible-test-host-1" + initiators: + - "iqn.1994-05.com.redhat:c38e6e8cfd81" + - "20:00:00:90:FA:13:81:8D:10:00:00:90:FA:13:81:8D" + initiator_state: "present-in-host" + state: "present" + +- name: Modify Host using host_id + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_id: "Host_253" + new_host_name: "ansible-test-host-2" + host_os: "Mac OS" + description: "Ansible tesing purpose" + state: "present" + +- name: Add Initiators to Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-2" + initiators: + - "20:00:00:90:FA:13:81:8C:10:00:00:90:FA:13:81:8C" + initiator_state: "present-in-host" + state: "present" + +- name: Get Host details using host_name + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-2" + state: "present" + +- name: Get Host details using host_id + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_id: "Host_253" + state: "present" + +- name: Delete Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-2" + state: "absent" + +- name: Add network address to Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "{{host_name}}" + network_address: "192.168.1.2" + network_address_state: "present-in-host" + state: "present" + +- name: Delete network address from Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "{{host_name}}" + network_address: "192.168.1.2" + network_address_state: "absent-in-host" + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
host_details dict When host exists. Details of the host.
  description str success Description about the host.
  fc_host_initiators list success Details of the FC initiators associated with the host.
   id str success Unique identifier of the FC initiator path.
   name str success FC Qualified Name (WWN) of the initiator.
   paths list success Details of the paths associated with the FC initiator.
    id str success Unique identifier of the path.
    is_logged_in bool success Indicates whether the host initiator is logged into the storage system.
  host_luns list success Details of luns attached to host.
  id str success The system ID given to the host.
  iscsi_host_initiators list success Details of the ISCSI initiators associated with the host.
   id str success Unique identifier of the ISCSI initiator path.
   name str success ISCSI Qualified Name (IQN) of the initiator.
   paths list success Details of the paths associated with the ISCSI initiator.
    id str success Unique identifier of the path.
    is_logged_in bool success Indicates whether the host initiator is logged into the storage system.
  name str success The name of the host.
  network_addresses list success List of network addresses mapped to the host.
  os_type str success Operating system running on the host.
  type str success HostTypeEnum of the host.
+ +### Authors +* Rajshree Khare (@kharer5) + +-------------------------------- +# Info Module + +Gathering information about Unity + +### Synopsis + Gathering information about Unity storage system includes Get the details of Unity array, Get list of Hosts in Unity array, Get list of FC initiators in Unity array, Get list of iSCSI initiators in Unity array, Get list of Consistency groups in Unity array, Get list of Storage pools in Unity array, Get list of Volumes in Unity array, Get list of Snapshot schedules in Unity array, Get list of NAS servers in Unity array, Get list of File systems in Unity array, Get list of Snapshots in Unity array, Get list of SMB shares in Unity array, Get list of NFS exports in Unity array, Get list of User quotas in Unity array, Get list of Quota tree in Unity array, Get list of NFS Servers in Unity array, Get list of CIFS Servers in Unity array. Get list of Ethernet ports in Unity array. Get list of File interfaces used in Unity array. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
gather_subset list
elements: str
  • host
  • fc_initiator
  • iscsi_initiator
  • cg
  • storage_pool
  • vol
  • snapshot_schedule
  • nas_server
  • file_system
  • snapshot
  • nfs_export
  • smb_share
  • user_quota
  • tree_quota
  • disk_group
  • nfs_server
  • cifs_server
  • ethernet_port
  • file_interface

List of string variables to specify the Unity storage system entities for which information is required.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Get detailed list of Unity entities + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - host + - fc_initiator + - iscsi_initiator + - cg + - storage_pool + - vol + - snapshot_schedule + - nas_server + - file_system + - snapshot + - nfs_export + - smb_share + - user_quota + - tree_quota + - disk_group + - nfs_server + - cifs_server + - ethernet_port + - file_interface + + - name: Get information of Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + + - name: Get list of hosts on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - host + + - name: Get list of FC initiators on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - fc_initiator + + - name: Get list of ISCSI initiators on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - iscsi_initiator + + - name: Get list of consistency groups on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - cg + + - name: Get list of storage pools on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - storage_pool + + - name: Get list of volumes on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + + - name: Get list of snapshot schedules on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - snapshot_schedule + + - name: Get list of NAS Servers on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - nas_server + + - name: Get list of File Systems on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - file_system + + - name: Get list of Snapshots on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - snapshot + + - name: Get list of NFS exports on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - nfs_export + + - name: Get list of SMB shares on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - smb_share + + - name: Get list of user quotas on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - user_quota + + - name: Get list of quota trees on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - tree_quota + + - name: Get list of disk groups on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - disk_group + + - name: Get list of NFS Servers on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - nfs_server + + - name: Get list of CIFS Servers on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - cifs_server + + - name: Get list of ethernet ports on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - ethernet_port + + - name: Get list of file interfaces on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - file_interface +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
Array_Details dict always Details of the Unity Array.
  api_version str success The current api version of the Unity Array.
  earliest_api_version str success The earliest api version of the Unity Array.
  model str success The model of the Unity Array.
  name str success The name of the Unity Array.
  software_version str success The software version of the Unity Array.
CIFS_Servers list When CIFS Servers exist. Details of the CIFS Servers.
  id str success The ID of the CIFS Servers.
  name str success The name of the CIFS server.
Consistency_Groups list When Consistency Groups exist. Details of the Consistency Groups.
  id str success The ID of the Consistency Group.
  name str success The name of the Consistency Group.
Disk_Groups list When disk groups exist. Details of the disk groups.
  id str success The ID of the disk group.
  name str success The name of the disk group.
  tier_type str success The tier type of the disk group.
Ethernet_ports list When ethernet ports exist. Details of the ethernet ports.
  id str success The ID of the ethernet port.
  name str success The name of the ethernet port.
FC_initiators list When FC initiator exist. Details of the FC initiators.
  WWN str success The WWN of the FC initiator.
  id str success The id of the FC initiator.
File_Systems list When File Systems exist. Details of the File Systems.
  id str success The ID of the File System.
  name str success The name of the File System.
File_interfaces list When file inetrface exist. Details of the file inetrfaces.
  id str success The ID of the file inetrface.
  ip_address str success IP address of the file inetrface.
  name str success The name of the file inetrface.
Hosts list When hosts exist. Details of the hosts.
  id str success The ID of the host.
  name str success The name of the host.
ISCSI_initiators list When ISCSI initiators exist. Details of the ISCSI initiators.
  IQN str success The IQN of the ISCSI initiator.
  id str success The id of the ISCSI initiator.
NAS_Servers list When NAS Servers exist. Details of the NAS Servers.
  id str success The ID of the NAS Server.
  name str success The name of the NAS Server.
NFS_Exports list When NFS Exports exist. Details of the NFS Exports.
  id str success The ID of the NFS Export.
  name str success The name of the NFS Export.
NFS_Servers list When NFS Servers exist. Details of the NFS Servers.
  id str success The ID of the NFS Servers.
SMB_Shares list When SMB Shares exist. Details of the SMB Shares.
  id str success The ID of the SMB Share.
  name str success The name of the SMB Share.
Snapshot_Schedules list When Snapshot Schedules exist. Details of the Snapshot Schedules.
  id str success The ID of the Snapshot Schedule.
  name str success The name of the Snapshot Schedule.
Snapshots list When Snapshots exist. Details of the Snapshots.
  id str success The ID of the Snapshot.
  name str success The name of the Snapshot.
Storage_Pools list When Storage Pools exist. Details of the Storage Pools.
  id str success The ID of the Storage Pool.
  name str success The name of the Storage Pool.
Tree_Quotas list When quota trees exist. Details of the quota trees.
  id str success The ID of the quota tree.
  path str success The path of the quota tree.
User_Quotas list When user quotas exist. Details of the user quotas.
  id str success The ID of the user quota.
  uid str success The UID of the user quota.
Volumes list When Volumes exist. Details of the Volumes.
  id str success The ID of the Volume.
  name str success The name of the Volume.
+ +### Authors +* Rajshree Khare (@kharer5) +* Akash Shendge (@shenda1) +* Meenakshi Dembi (@dembim) + +-------------------------------- +# Interface Module + +Manage Interfaces on Unity storage system + +### Synopsis + Managing the Interfaces on the Unity storage system includes adding Interfaces to NAS Server, getting details of interface and deleting configured interfaces. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
nas_server_name str
Name of the NAS server for which interface will be configured.
nas_server_id str
ID of the NAS server for which interface will be configured.
ethernet_port_name str
Name of the ethernet port.
ethernet_port_id str
ID of the ethernet port.
role str
  • PRODUCTION
  • BACKUP

Indicates whether interface is configured as production or backup.
interface_ip str True
IP of network interface.
netmask str
Netmask of network interface.
prefix_length int
Prefix length is mutually exclusive with netmask.
gateway str
Gateway of network interface.
vlan_id int
Vlan id of the interface.
state str True
  • present
  • absent

Define whether the interface should exist or not.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is supported. +* Modify operation for interface is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Add Interface as Backup to NAS Server + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + ethernet_port_name: "SP A 4-Port Card Ethernet Port 0" + role: "BACKUP" + interface_ip: "xx.xx.xx.xx" + netmask: "xx.xx.xx.xx" + gateway: "xx.xx.xx.xx" + vlan_id: 324 + state: "present" + + - name: Add Interface as Production to NAS Server + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + ethernet_port_name: "SP A 4-Port Card Ethernet Port 0" + role: "PRODUCTION" + interface_ip: "xx.xx.xx.xx" + netmask: "xx.xx.xx.xx" + gateway: "xx.xx.xx.xx" + vlan_id: 324 + state: "present" + + - name: Get interface details + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + interface_ip: "xx.xx.xx.xx" + state: "present" + + - name: Delete Interface + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + interface_ip: "xx.xx.xx.xx" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
interface_details dict When interface is configured for NAS Server. Details of the interface.
  existed bool success Indicates if interface exists.
  gateway str success Gateway of network interface.
  id str success Unique identifier interface.
  ip_address str success IP address of interface.
  ip_port dict success Port on which network interface is configured.
   id str success ID of ip_port.
  ip_protocol_version str success IP protocol version.
  is_disabled bool success Indicates whether interface is disabled.
  is_preferred bool success Indicates whether interface is preferred.
  mac_address bool success Mac address of ip_port.
  name bool success System configured name of interface.
  nas_server dict success Details of NAS server where interface is configured.
   id str success ID of NAS Server.
+ +### Authors +* Meenakshi Dembi (@dembim) + +-------------------------------- +# NAS Server Module + +Manage NAS servers on Unity storage system + +### Synopsis + Managing NAS servers on Unity storage system includes get, modification to the NAS servers. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
nas_server_id str
The ID of the NAS server.
Either nas_server_name or nas_server_id is required to perform the task.
The parameters nas_server_name and nas_server_id are mutually exclusive.
nas_server_name str
The Name of the NAS server.
Either nas_server_name or nas_server_id is required to perform the task.
The parameters nas_server_name and nas_server_id are mutually exclusive.
nas_server_new_name str
The new name of the NAS server.
It can be mentioned during modification of the NAS server.
is_replication_destination bool
It specifies whether the NAS server is a replication destination.
It can be mentioned during modification of the NAS server.
is_backup_only bool
It specifies whether the NAS server is used as backup only.
It can be mentioned during modification of the NAS server.
is_multiprotocol_enabled bool
This parameter indicates whether multiprotocol sharing mode is enabled.
It can be mentioned during modification of the NAS server.
allow_unmapped_user bool
This flag is used to mandatorily disable access in case of any user mapping failure.
If true, then enable access in case of any user mapping failure.
If false, then disable access in case of any user mapping failure.
It can be mentioned during modification of the NAS server.
default_windows_user str
Default windows user name used for granting access in the case of Unix to Windows user mapping failure.
It can be mentioned during modification of the NAS server.
default_unix_user str
Default Unix user name used for granting access in the case of Windows to Unix user mapping failure.
It can be mentioned during modification of the NAS server.
enable_windows_to_unix_username_mapping bool
This parameter indicates whether a Unix to/from Windows user name mapping is enabled.
It can be mentioned during modification of the NAS server.
is_packet_reflect_enabled bool
If the packet has to be reflected, then this parameter has to be set to True.
It can be mentioned during modification of the NAS server.
current_unix_directory_service str
  • NONE
  • NIS
  • LOCAL
  • LDAP
  • LOCAL_THEN_NIS
  • LOCAL_THEN_LDAP

This is the directory service used for querying identity information for UNIX (such as UIDs, GIDs, net groups).
It can be mentioned during modification of the NAS server.
replication_params dict
Settings required for enabling replication.
  destination_nas_server_name str
Name of the destination nas server.
Default value will be source nas server name prefixed by 'DR_'.
  replication_mode str
  • asynchronous
  • manual

The replication mode.
This is mandatory to enable replication.
  rpo int
Maximum time to wait before the system syncs the source and destination LUNs.
The rpo option should be specified if the replication_mode is asynchronous.
The value should be in range of 5 to 1440.
  replication_type str
  • local
  • remote

Type of replication.
  remote_system dict
Details of remote system to which the replication is being configured.
The remote_system option should be specified if the replication_type is remote.
   remote_system_host str True
IP or FQDN for remote Unity unisphere Host.
   remote_system_username str True
User name of remote Unity unisphere Host.
   remote_system_password str True
Password of remote Unity unisphere Host.
   remote_system_verifycert bool True
Boolean variable to specify whether or not to validate SSL certificate of remote Unity unisphere Host.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
   remote_system_port int 443
Port at which remote Unity unisphere is hosted.
  destination_pool_name str
Name of pool to allocate destination Luns.
Mutually exclusive with destination_pool_id.
  destination_pool_id str
Id of pool to allocate destination Luns.
Mutually exclusive with destination_pool_name.
  destination_sp str
  • SPA
  • SPB

Storage process of destination nas server
  is_backup bool
Indicates if the destination nas server is backup.
  replication_name str
User defined name for replication session.
  new_replication_name str
Replication name to rename the session to.
replication_state str
  • enable
  • disable

State of the replication.
replication_reuse_resource bool
This parameter indicates if existing NAS Server is to be used for replication.
state str True
  • present
  • absent

Define the state of NAS server on the array.
The value present indicates that NAS server should exist on the system after the task is executed.
In this release deletion of NAS server is not supported. Hence, if state is set to absent for any existing NAS server then error will be thrown.
For any non-existing NAS server, if state is set to absent then it will return None.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Get Details of NAS Server + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "{{nas_server_name}}" + state: "present" + + - name: Modify Details of NAS Server + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "{{nas_server_name}}" + nas_server_new_name: "updated_sample_nas_server" + is_replication_destination: False + is_backup_only: False + is_multiprotocol_enabled: True + allow_unmapped_user: True + default_unix_user: "default_unix_sample_user" + default_windows_user: "default_windows_sample_user" + enable_windows_to_unix_username_mapping: True + current_unix_directory_service: "LDAP" + is_packet_reflect_enabled: True + state: "present" + + - name: Enable replication for NAS Server on Local System + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_id: "nas_10" + replication_reuse_resource: False + replication_params: + replication_name: "test_replication" + destination_nas_server_name: "destination_nas" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "local" + destination_pool_name: "Pool_Ansible_Neo_DND" + destination_sp: "SPA" + is_backup: True + replication_state: "enable" + state: "present" + + - name: Enable replication for NAS Server on Remote System + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_reuse_resource: False + replication_params: + replication_name: "test_replication" + destination_nas_server_name: "destination_nas" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "remote" + remote_system: + remote_system_host: '10.10.10.10' + remote_system_verifycert: False + remote_system_username: 'test1' + remote_system_password: 'test1!' + destination_pool_name: "fastVP_pool" + destination_sp: "SPA" + is_backup: True + replication_state: "enable" + state: "present" + + - name: Enable replication for NAS Server on Remote System in existing NAS Server + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_reuse_resource: True + replication_params: + destination_nas_server_name: "destination_nas" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "remote" + replication_name: "test_replication" + remote_system: + remote_system_host: '10.10.10.10' + remote_system_verifycert: False + remote_system_username: 'test1' + remote_system_password: 'test1!' + destination_pool_name: "fastVP_pool" + replication_state: "enable" + state: "present" + + - name: Modify replication on the nasserver + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_params: + replication_name: "test_repl" + new_replication_name: "test_repl_updated" + replication_mode: "asynchronous" + rpo: 50 + replication_state: "enable" + state: "present" + + - name: Disable replication on the nasserver + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_state: "disable" + state: "present" + + - name: Disable replication by specifying replication_name on the nasserver + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_params: + replication_name: "test_replication" + replication_state: "disable" + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
nas_server_details dict When NAS server exists. The NAS server details.
  allow_unmapped_user bool success Enable/disable access status in case of any user mapping failure.
  current_unix_directory_service str success Directory service used for querying identity information for UNIX (such as UIDs, GIDs, net groups).
  default_unix_user str success Default Unix user name used for granting access in the case of Windows to Unix user mapping failure.
  default_windows_user str success Default windows user name used for granting access in the case of Unix to Windows user mapping failure.
  id str success ID of the NAS server.
  is_backup_only bool success Whether the NAS server is used as backup only.
  is_multi_protocol_enabled bool success Indicates whether multiprotocol sharing mode is enabled.
  is_packet_reflect_enabled bool success If the packet reflect has to be enabled.
  is_replication_destination bool success If the NAS server is a replication destination then True.
  is_windows_to_unix_username_mapping_enabled bool success Indicates whether a Unix to/from Windows user name mapping is enabled.
  name str success Name of the NAS server.
+ +### Authors +* P Srinivas Rao (@srinivas-rao5) + +-------------------------------- +# NFS Module + +Manage NFS export on Unity storage system + +### Synopsis + Managing NFS export on Unity storage system includes- Create new NFS export, Modify NFS export attributes, Display NFS export details, Delete NFS export. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
nfs_export_name str
Name of the nfs export.
Mandatory for create operation.
Specify either nfs_export_name or nfs_export_id (but not both) for any operation.
nfs_export_id str
ID of the nfs export.
This is a unique ID generated by Unity storage system.
filesystem_name str
Name of the filesystem for which NFS export will be created.
Either filesystem or snapshot is required for creation of the NFS.
If filesystem_name is specified, then nas_server is required to uniquely identify the filesystem.
If filesystem parameter is provided, then snapshot cannot be specified.
filesystem_id str
ID of the filesystem.
This is a unique ID generated by Unity storage system.
snapshot_name str
Name of the snapshot for which NFS export will be created.
Either filesystem or snapshot is required for creation of the NFS export.
If snapshot parameter is provided, then filesystem cannot be specified.
snapshot_id str
ID of the snapshot.
This is a unique ID generated by Unity storage system.
nas_server_name str
Name of the NAS server on which filesystem will be hosted.
nas_server_id str
ID of the NAS server on which filesystem will be hosted.
path str
Local path to export relative to the NAS server root.
With NFS, each export of a file_system or file_snap must have a unique local path.
Mandatory while creating NFS export.
description str
Description of the NFS export.
Optional parameter when creating a NFS export.
To modify description, pass the new value in description field.
To remove description, pass the empty value in description field.
host_state str
  • present-in-export
  • absent-in-export

Define whether the hosts can access the NFS export.
Required when adding or removing access of hosts from the export.
anonymous_uid int
Specifies the user ID of the anonymous account.
If not specified at the time of creation, it will be set to 4294967294.
anonymous_gid int
Specifies the group ID of the anonymous account.
If not specified at the time of creation, it will be set to 4294967294.
state str True
  • absent
  • present

State variable to determine whether NFS export will exist or not.
default_access str
  • NO_ACCESS
  • READ_ONLY
  • READ_WRITE
  • ROOT
  • READ_ONLY_ROOT

Default access level for all hosts that can access the NFS export.
For hosts that need different access than the default, they can be configured by adding to the list.
If default_access is not mentioned during creation, then NFS export will be created with NO_ACCESS.
min_security str
  • SYS
  • KERBEROS
  • KERBEROS_WITH_INTEGRITY
  • KERBEROS_WITH_ENCRYPTION

NFS enforced security type for users accessing a NFS export.
If not specified at the time of creation, it will be set to SYS.
adv_host_mgmt_enabled bool
If false, allows you to specify hosts without first having to register them.
Mandatory while adding access hosts.
no_access_hosts list
elements: dict

Hosts with no access to the NFS export.
List of dictionaries. Each dictionary will have any of the keys from host_name, host_id, subnet, netgroup, domain and ip_address.
If adv_host_mgmt_enabled is true then the accepted keys are host_name, host_id and ip_address.
If adv_host_mgmt_enabled is false then the accepted keys are host_name, subnet, netgroup, domain and ip_address.
  host_name str
Name of the host.
  host_id str
ID of the host.
  ip_address str
IP address of the host.
  subnet str
Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
  netgroup str
Netgroup that is defined in NIS or the local netgroup file.
  domain str
DNS domain, where all NFS clients in the domain are included in the host list.
read_only_hosts list
elements: dict

Hosts with read-only access to the NFS export.
List of dictionaries. Each dictionary will have any of the keys from host_name, host_id, subnet, netgroup, domain and ip_address.
If adv_host_mgmt_enabled is true then the accepted keys are host_name, host_id and ip_address.
If adv_host_mgmt_enabled is false then the accepted keys are host_name, subnet, netgroup, domain and ip_address.
  host_name str
Name of the host.
  host_id str
ID of the host.
  ip_address str
IP address of the host.
  subnet str
Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
  netgroup str
Netgroup that is defined in NIS or the local netgroup file.
  domain str
DNS domain, where all NFS clients in the domain are included in the host list.
read_only_root_hosts list
elements: dict

Hosts with read-only for root user access to the NFS export.
List of dictionaries. Each dictionary will have any of the keys from host_name, host_id, subnet, netgroup, domain and ip_address.
If adv_host_mgmt_enabled is true then the accepted keys are host_name, host_id and ip_address.
If adv_host_mgmt_enabled is false then the accepted keys are host_name, subnet, netgroup, domain and ip_address.
  host_name str
Name of the host.
  host_id str
ID of the host.
  ip_address str
IP address of the host.
  subnet str
Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
  netgroup str
Netgroup that is defined in NIS or the local netgroup file.
  domain str
DNS domain, where all NFS clients in the domain are included in the host list.
read_write_hosts list
elements: dict

Hosts with read and write access to the NFS export.
List of dictionaries. Each dictionary will have any of the keys from host_name, host_id, subnet, netgroup, domain and ip_address.
If adv_host_mgmt_enabled is true then the accepted keys are host_name, host_id and ip_address.
If adv_host_mgmt_enabled is false then the accepted keys are host_name, subnet, netgroup, domain and ip_address.
  host_name str
Name of the host.
  host_id str
ID of the host.
  ip_address str
IP address of the host.
  subnet str
Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
  netgroup str
Netgroup that is defined in NIS or the local netgroup file.
  domain str
DNS domain, where all NFS clients in the domain are included in the host list.
read_write_root_hosts list
elements: dict

Hosts with read and write for root user access to the NFS export.
List of dictionaries. Each dictionary will have any of the keys from host_name, host_id, subnet, netgroup, domain and ip_address.
If adv_host_mgmt_enabled is true then the accepted keys are host_name, host_id and ip_address.
If adv_host_mgmt_enabled is false then the accepted keys are host_name, subnet, netgroup, domain and ip_address.
  host_name str
Name of the host.
  host_id str
ID of the host.
  ip_address str
IP address of the host.
  subnet str
Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
  netgroup str
Netgroup that is defined in NIS or the local netgroup file.
  domain str
DNS domain, where all NFS clients in the domain are included in the host list.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create nfs export from filesystem + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + path: '/' + filesystem_id: "fs_377" + state: "present" + +- name: Create nfs export from snapshot + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_snap" + path: '/' + snapshot_name: "ansible_fs_snap" + state: "present" + +- name: Modify nfs export + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + nas_server_id: "nas_3" + description: "" + default_access: "READ_ONLY_ROOT" + anonymous_gid: 4294967290 + anonymous_uid: 4294967290 + state: "present" + +- name: Add host in nfs export with adv_host_mgmt_enabled as true + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: true + no_access_hosts: + - host_id: "Host_1" + read_only_hosts: + - host_id: "Host_2" + read_only_root_hosts: + - host_name: "host_name1" + read_write_hosts: + - host_name: "host_name2" + read_write_root_hosts: + - ip_address: "1.1.1.1" + host_state: "present-in-export" + state: "present" + +- name: Remove host in nfs export with adv_host_mgmt_enabled as true + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: true + no_access_hosts: + - host_id: "Host_1" + read_only_hosts: + - host_id: "Host_2" + read_only_root_hosts: + - host_name: "host_name1" + read_write_hosts: + - host_name: "host_name2" + read_write_root_hosts: + - ip_address: "1.1.1.1" + host_state: "absent-in-export" + state: "present" + +- name: Add host in nfs export with adv_host_mgmt_enabled as false + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: false + no_access_hosts: + - domain: "google.com" + read_only_hosts: + - netgroup: "netgroup_admin" + read_only_root_hosts: + - host_name: "host5" + read_write_hosts: + - subnet: "168.159.57.4/255.255.255.0" + read_write_root_hosts: + - ip_address: "10.255.2.4" + host_state: "present-in-export" + state: "present" + +- name: Remove host in nfs export with adv_host_mgmt_enabled as false + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: false + no_access_hosts: + - domain: "google.com" + read_only_hosts: + - netgroup: "netgroup_admin" + read_only_root_hosts: + - host_name: "host5" + read_write_hosts: + - subnet: "168.159.57.4/255.255.255.0" + read_write_root_hosts: + - ip_address: "10.255.2.4" + host_state: "absent-in-export" + state: "present" + +- name: Get nfs details + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_id: "NFSShare_291" + state: "present" + +- name: Delete nfs export by nfs name + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_name" + nas_server_name: "ansible_nas_name" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
nfs_share_details dict When nfs export exists. Details of the nfs export.
  anonymous_gid int success Group ID of the anonymous account
  anonymous_uid int success User ID of the anonymous account
  default_access str success Default access level for all hosts that can access export
  description str success Description about the nfs export
  export_paths list success Export paths that can be used to mount and access export
  filesystem dict success Details of the filesystem on which nfs export is present
   UnityFileSystem dict success filesystem details
    id str success ID of the filesystem
    name str success Name of the filesystem
  id str success ID of the nfs export
  min_security str success NFS enforced security type for users accessing an export
  name str success Name of the nfs export
  nas_server dict success Details of the nas server
   UnityNasServer dict success NAS server details
    id str success ID of the nas server
    name str success Name of the nas server
  no_access_hosts_string str success Hosts with no access to the nfs export
  read_only_hosts_string str success Hosts with read-only access to the nfs export
  read_only_root_hosts_string str success Hosts with read-only for root user access to the nfs export
  read_write_hosts_string str success Hosts with read and write access to the nfs export
  read_write_root_hosts_string str success Hosts with read and write for root user access to export
  type str success NFS export type. i.e. filesystem or snapshot
+ +### Authors +* Vivek Soni (@v-soni11) + +-------------------------------- +# NFS Server Module + +Manage NFS server on Unity storage system + +### Synopsis + Managing the NFS server on the Unity storage system includes creating NFS server, getting NFS server details and deleting NFS server attributes. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
nas_server_name str
Name of the NAS server on which NFS server will be hosted.
nas_server_id str
ID of the NAS server on which NFS server will be hosted.
nfs_server_id str
ID of the NFS server.
host_name str
Host name of the NFS server.
nfs_v4_enabled bool
Indicates whether the NFSv4 is enabled on the NAS server.
is_secure_enabled bool
Indicates whether the secure NFS is enabled.
kerberos_domain_controller_type str
  • CUSTOM
  • UNIX
  • WINDOWS

Type of Kerberos Domain Controller used for secure NFS service.
kerberos_domain_controller_username str
Kerberos Domain Controller administrator username.
kerberos_domain_controller_password str
Kerberos Domain Controller administrator password.
is_extended_credentials_enabled bool
Indicates whether support for more than 16 unix groups in a Unix credential.
remove_spn_from_kerberos bool True
Indicates whether to remove the SPN from Kerberos Domain Controller.
state str True
  • absent
  • present

Define whether the NFS server should exist or not.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is supported. +* Modify operation for NFS Server is not supported. +* When kerberos_domain_controller_type is UNIX, kdc_type in nfs_server_details output is displayed as null. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Create NFS server with kdctype as Windows + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + host_name: "dummy_nas23" + is_secure_enabled: True + kerberos_domain_controller_type: "WINDOWS" + kerberos_domain_controller_username: "administrator" + kerberos_domain_controller_password: "Password123!" + is_extended_credentials_enabled: True + nfs_v4_enabled: True + state: "present" + + - name: Create NFS server with kdctype as Unix + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + host_name: "dummy_nas23" + is_secure_enabled: True + kerberos_domain_controller_type: "UNIX" + is_extended_credentials_enabled: True + nfs_v4_enabled: True + state: "present" + + - name: Get NFS server details + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + state: "present" + + - name: Delete NFS server + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + kerberos_domain_controller_username: "administrator" + kerberos_domain_controller_password: "Password123!" + unjoin_server_account: False + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
nfs_server_details dict When NFS server exists Details of the NFS server.
  credentials_cache_ttl str success Credential cache refresh timeout. Resolution is in minutes. Default value is 15 minutes.
  existed bool success Indicates if NFS Server exists.
  host_name str success Host name of the NFS server.
  id str success Unique identifier of the NFS Server instance.
  is_extended_credentials_enabled bool success Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential.
  is_secure_enabled bool success Indicates whether secure NFS is enabled on the NFS server.
  kdc_type str success Type of Kerberos Domain Controller used for secure NFS service.
  nfs_v4_enabled bool success Indicates whether NFSv4 is enabled on the NAS server.
  servicee_principal_name str success The Service Principal Name (SPN) for the NFS Server.
+ +### Authors +* Meenakshi Dembi (@dembim) + +-------------------------------- +# SMB Share Module + +Manage SMB shares on Unity storage system + +### Synopsis + Managing SMB Shares on Unity storage system includes create, get, modify, and delete the smb shares. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
share_name str
Name of the SMB share.
Required during creation of the SMB share.
For all other operations either share_name or share_id is required.
share_id str
ID of the SMB share.
Should not be specified during creation. Id is auto generated.
For all other operations either share_name or share_id is required.
If share_id is used then no need to pass nas_server/filesystem/snapshot/path.
path str
Local path to the file system/Snapshot or any existing sub-folder of the file system/Snapshot that is shared over the network.
Path is relative to the root of the filesystem.
Required for creation of the SMB share.
filesystem_id str
The ID of the File System.
Either filesystem_name or filesystem_id is required for creation of the SMB share for filesystem.
If filesystem_name is specified, then nas_server_name/nas_server_id is required to uniquely identify the filesystem.
Options filesystem_name and filesystem_id are mutually exclusive parameters.
snapshot_id str
The ID of the Filesystem Snapshot.
Either snapshot_name or snapshot_id is required for creation of the SMB share for a snapshot.
If snapshot_name is specified, then nas_server_name/nas_server_id is required to uniquely identify the snapshot.
Options snapshot_name and snapshot_id are mutually exclusive parameters.
nas_server_id str
The ID of the NAS Server.
It is not required if share_id is used.
filesystem_name str
The Name of the File System.
Either filesystem_name or filesystem_id is required for creation of the SMB share for filesystem.
If filesystem_name is specified, then nas_server_name/nas_server_id is required to uniquely identify the filesystem.
Options filesystem_name and filesytem_id are mutually exclusive parameters.
snapshot_name str
The Name of the Filesystem Snapshot.
Either snapshot_name or snapshot_id is required for creation of the SMB share for a snapshot.
If snapshot_name is specified, then nas_server_name/nas_server_id is required to uniquely identify the snapshot.
Options snapshot_name and snapshot_id are mutually exclusive parameters.
nas_server_name str
The Name of the NAS Server.
It is not required if share_id is used.
Options nas_server_name and nas_server_id are mutually exclusive parameters.
description str
Description for the SMB share.
Optional parameter when creating a share.
To modify, pass the new value in description field.
is_abe_enabled bool
Indicates whether Access-based Enumeration (ABE) for SMB share is enabled.
During creation, if not mentioned then default is false.
is_branch_cache_enabled bool
Indicates whether Branch Cache optimization for SMB share is enabled.
During creation, if not mentioned then default is false.
is_continuous_availability_enabled bool
Indicates whether continuous availability for SMB 3.0 is enabled.
During creation, if not mentioned then default is false.
is_encryption_enabled bool
Indicates whether encryption for SMB 3.0 is enabled at the shared folder level.
During creation, if not mentioned then default is false.
offline_availability str
  • MANUAL
  • DOCUMENTS
  • PROGRAMS
  • NONE

Defines valid states of Offline Availability.
MANUAL- Only specified files will be available offline.
DOCUMENTS- All files that users open will be available offline.
PROGRAMS- Program will preferably run from the offline cache even when connected to the network. All files that users open will be available offline.
NONE- Prevents clients from storing documents and programs in offline cache.
umask str
The default UNIX umask for new files created on the SMB Share.
state str True
  • absent
  • present

Define whether the SMB share should exist or not.
Value present indicates that the share should exist on the system.
Value absent indicates that the share should not exist on the system.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* When ID/Name of the filesystem/snapshot is passed then nas_server is not required. If passed, then filesystem/snapshot should exist for the mentioned nas_server, else the task will fail. +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create SMB share for a filesystem + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_smb_share" + filesystem_name: "sample_fs" + nas_server_id: "NAS_11" + path: "/sample_fs" + description: "Sample SMB share created" + is_abe_enabled: True + is_branch_cache_enabled: True + offline_availability: "DOCUMENTS" + is_continuous_availability_enabled: True + is_encryption_enabled: True + umask: "777" + state: "present" +- name: Modify Attributes of SMB share for a filesystem + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_smb_share" + nas_server_name: "sample_nas_server" + description: "Sample SMB share attributes updated" + is_abe_enabled: False + is_branch_cache_enabled: False + offline_availability: "MANUAL" + is_continuous_availability_enabled: "False" + is_encryption_enabled: "False" + umask: "022" + state: "present" +- name: Create SMB share for a snapshot + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_snap_smb_share" + snapshot_name: "sample_snapshot" + nas_server_id: "NAS_11" + path: "/sample_snapshot" + description: "Sample SMB share created for snapshot" + is_abe_enabled: True + is_branch_cache_enabled: True + is_continuous_availability_enabled: True + is_encryption_enabled: True + umask: "777" + state: "present" +- name: Modify Attributes of SMB share for a snapshot + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_snap_smb_share" + snapshot_name: "sample_snapshot" + description: "Sample SMB share attributes updated for snapshot" + is_abe_enabled: False + is_branch_cache_enabled: False + offline_availability: "MANUAL" + is_continuous_availability_enabled: "False" + is_encryption_enabled: "False" + umask: "022" + state: "present" +- name: Get details of SMB share + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_id: "{{smb_share_id}}" + state: "present" +- name: Delete SMB share + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_id: "{{smb_share_id}}" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
smb_share_details dict When share exists. The SMB share details.
  description str success Additional information about the share.
  filesystem_id str success The ID of the Filesystem.
  filesystem_name str success The Name of the filesystem
  id str success The ID of the SMB share.
  is_abe_enabled bool success Whether Access Based enumeration is enforced or not.
  is_branch_cache_enabled bool success Whether branch cache is enabled or not.
  is_continuous_availability_enabled bool success Whether the share will be available continuously or not.
  is_encryption_enabled bool success Whether encryption is enabled or not.
  name str success Name of the SMB share.
  nas_server_id str success The ID of the nas_server.
  nas_server_name str success The Name of the nas_server.
  snapshot_id str success The ID of the Snapshot.
  snapshot_name str success The Name of the Snapshot.
  umask str success Unix mask for the SMB share.
+ +### Authors +* P Srinivas Rao (@srinivas-rao5) + +-------------------------------- +# Snapshot Module + +Manage snapshots on the Unity storage system + +### Synopsis + Managing snapshots on the Unity storage system includes create snapshot, delete snapshot, update snapshot, get snapshot, map host and unmap host. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
snapshot_name str
The name of the snapshot.
Mandatory parameter for creating a snapshot.
For all other operations either snapshot_name or snapshot_id is required.
vol_name str
The name of the volume for which snapshot is created.
For creation of a snapshot either vol_name or cg_name is required.
Not required for other operations.
cg_name str
The name of the Consistency Group for which snapshot is created.
For creation of a snapshot either vol_name or cg_name is required.
Not required for other operations.
snapshot_id str
The id of the snapshot.
For all operations other than creation either snapshot_name or snapshot_id is required.
auto_delete bool
This option specifies whether the snapshot is auto deleted or not.
If set to true, snapshot will expire based on the pool auto deletion policy.
If set to (false), snapshot will not be auto deleted based on the pool auto deletion policy.
Option auto_delete can not be set to true, if expiry_time is specified.
If during creation neither auto_delete nor expiry_time is mentioned then snapshot will be created keeping auto_delete as true.
Once the expiry_time is set then snapshot cannot be assigned to the auto delete policy.
expiry_time str
This option is for specifying the date and time after which the snapshot will expire.
The time is to be mentioned in UTC timezone.
The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
description str
The additional information about the snapshot can be provided using this option.
new_snapshot_name str
New name for the snapshot.
state str True
  • absent
  • present

The state option is used to mention the existence of the snapshot.
host_name str
The name of the host.
Either host_name or host_id is required to map or unmap a snapshot from a host.
Snapshot can be attached to multiple hosts.
host_id str
The id of the host.
Either host_name or host_id is required to map or unmap a snapshot from a host.
Snapshot can be attached to multiple hosts.
host_state str
  • mapped
  • unmapped

The host_state option is used to mention the existence of the host for snapshot.
It is required when a snapshot is mapped or unmapped from host.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Create a Snapshot for a CG + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cg_name: "{{cg_name}}" + snapshot_name: "{{cg_snapshot_name}}" + description: "{{description}}" + auto_delete: False + state: "present" + + - name: Create a Snapshot for a volume with Host attached + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "{{vol_name}}" + snapshot_name: "{{vol_snapshot_name}}" + description: "{{description}}" + expiry_time: "04/15/2025 16:30" + host_name: "{{host_name}}" + host_state: "mapped" + state: "present" + + - name: Unmap a host for a Snapshot + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + snapshot_name: "{{vol_snapshot_name}}" + host_name: "{{host_name}}" + host_state: "unmapped" + state: "present" + + - name: Map snapshot to a host + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + snapshot_name: "{{vol_snapshot_name}}" + host_name: "{{host_name}}" + host_state: "mapped" + state: "present" + + - name: Update attributes of a Snapshot for a volume + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "{{vol_snapshot_name}}" + new_snapshot_name: "{{new_snapshot_name}}" + description: "{{new_description}}" + host_name: "{{host_name}}" + host_state: "unmapped" + state: "present" + + - name: Delete Snapshot of CG + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "{{cg_snapshot_name}}" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
snapshot_details dict When snapshot exists Details of the snapshot.
  expiration_time str success Date and time after which the snapshot will expire.
  hosts_list dict success Contains the name and id of the associated hosts.
  id str success Unique identifier of the snapshot instance.
  is_auto_delete str success Additional information mentioned for snapshot.
  name str success The name of the snapshot.
  storage_resource_id str success Id of the storage resource for which the snapshot exists.
  storage_resource_name str success Name of the storage resource for which the snapshot exists.
+ +### Authors +* P Srinivas Rao (@srinivas-rao5) + +-------------------------------- +# Snapshot Schedule Module + +Manage snapshot schedules on Unity storage system + +### Synopsis + Managing snapshot schedules on Unity storage system includes creating new snapshot schedule, getting details of snapshot schedule, modifying attributes of snapshot schedule, and deleting snapshot schedule. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
name str
The name of the snapshot schedule.
Name is mandatory for a create operation.
Specify either name or id (but not both) for any operation.
id str
The ID of the snapshot schedule.
type str
  • every_n_hours
  • every_day
  • every_n_days
  • every_week
  • every_month

Type of the rule to be included in snapshot schedule.
Type is mandatory for any create or modify operation.
Once the snapshot schedule is created with one type it can be modified.
interval int
Number of hours between snapshots.
Applicable only when rule type is every_n_hours.
hours_of_day list
elements: int

Hours of the day when the snapshot will be taken.
Applicable only when rule type is every_day.
day_interval int
Number of days between snapshots.
Applicable only when rule type is every_n_days.
days_of_week list
elements: str
  • SUNDAY
  • MONDAY
  • TUESDAY
  • WEDNESDAY
  • THURSDAY
  • FRIDAY
  • SATURDAY

Days of the week for which the snapshot schedule rule applies.
Applicable only when rule type is every_week.
day_of_month int
Day of the month for which the snapshot schedule rule applies.
Applicable only when rule type is every_month.
Value should be [1, 31].
hour int
The hour when the snapshot will be taken.
Applicable for every_n_days, every_week, every_month rule types.
For create operation, if hour parameter is not specified, value will be taken as 0.
Value should be [0, 23].
minute int
Minute offset from the hour when the snapshot will be taken.
Applicable for all rule types.
For a create operation, if minute parameter is not specified, value will be taken as 0.
Value should be [0, 59].
desired_retention int
The number of days/hours for which snapshot will be retained.
When auto_delete is true, desired_retention cannot be specified.
Maximum desired retention supported is 31 days or 744 hours.
retention_unit str hours
  • hours
  • days

The retention unit for the snapshot.
auto_delete bool
Indicates whether the system can automatically delete the snapshot.
state str True
  • absent
  • present

Define whether the snapshot schedule should exist or not.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* Snapshot schedule created through Ansible will have only one rule. +* Modification of rule type is not allowed. Within the same type, other parameters can be modified. +* If an existing snapshot schedule has more than 1 rule in it, only get and delete operation is allowed. +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create snapshot schedule (Rule Type - every_n_hours) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_N_Hours_Testing" + type: "every_n_hours" + interval: 6 + desired_retention: 24 + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_day) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Day_Testing" + type: "every_day" + hours_of_day: + - 8 + - 14 + auto_delete: True + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_n_days) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_N_Day_Testing" + type: "every_n_days" + day_interval: 2 + desired_retention: 16 + retention_unit: "days" + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_week) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Week_Testing" + type: "every_week" + days_of_week: + - MONDAY + - FRIDAY + hour: 12 + minute: 30 + desired_retention: 200 + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_month) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Month_Testing" + type: "every_month" + day_of_month: 17 + auto_delete: True + state: "{{state_present}}" + +- name: Get snapshot schedule details using name + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_N_Hours_Testing" + state: "{{state_present}}" + +- name: Get snapshot schedule details using id + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + id: "{{id}}" + state: "{{state_present}}" + +- name: Modify snapshot schedule details id + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + id: "{{id}}" + type: "every_n_hours" + interval: 8 + state: "{{state_present}}" + +- name: Modify snapshot schedule using name + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Day_Testing" + type: "every_day" + desired_retention: 200 + auto_delete: False + state: "{{state_present}}" + +- name: Delete snapshot schedule using id + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + id: "{{id}}" + state: "{{state_absent}}" + +- name: Delete snapshot schedule using name + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Day_Testing" + state: "{{state_absent}}" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
snapshot_schedule_details dict When snapshot schedule exists Details of the snapshot schedule.
  id str success The system ID given to the snapshot schedule.
  luns dict success Details of volumes for which snapshot schedule applied.
   UnityLunList list success List of volumes for which snapshot schedule applied.
    UnityLun dict success Detail of volume.
  name str success The name of the snapshot schedule.
  rules list success Details of rules that apply to snapshot schedule.
   days_of_month list success Days of the month for which the snapshot schedule rule applies.
   days_of_week dict success Days of the week for which the snapshot schedule rule applies.
    DayOfWeekEnumList list success Enumeration of days of the week.
   hours list success Hourly frequency for the snapshot schedule rule.
   id str success The system ID of the rule.
   interval int success Number of days or hours between snaps, depending on the rule type.
   is_auto_delete bool success Indicates whether the system can automatically delete the snapshot based on pool automatic-deletion thresholds.
   minute int success Minute frequency for the snapshot schedule rule.
   retention_time int success Period of time in seconds for which to keep the snapshot.
   retention_time_in_hours int success Period of time in hours for which to keep the snapshot.
   rule_type str success Type of the rule applied to snapshot schedule.
  storage_resources dict success Details of storage resources for which snapshot. schedule applied.
   UnityStorageResourceList list success List of storage resources for which snapshot schedule applied.
    UnityStorageResource dict success Detail of storage resource.
+ +### Authors +* Akash Shendge (@shenda1) + +-------------------------------- +# Storage Pool Module + +Manage storage pool on Unity + +### Synopsis + Managing storage pool on Unity storage system contains the operations Get details of storage pool, Create a storage pool, Modify storage pool. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
pool_name str
Name of the storage pool, unique in the storage system.
pool_id str
Unique identifier of the pool instance.
new_pool_name str
New name of the storage pool, unique in the storage system.
pool_description str
The description of the storage pool.
fast_cache str
  • enabled
  • disabled

Indicates whether the fast cache is enabled for the storage pool.
Enabled - FAST Cache is enabled for the pool.
Disabled - FAST Cache is disabled for the pool.
fast_vp str
  • enabled
  • disabled

Indicates whether to enable scheduled data relocations for the pool.
Enabled - Enabled scheduled data relocations for the pool.
Disabled - Disabled scheduled data relocations for the pool.
raid_groups dict
Parameters to create RAID group from the disks and add it to the pool.
  disk_group_id str
Id of the disk group.
  disk_num int
Number of disks.
  raid_type str
  • None
  • RAID5
  • RAID0
  • RAID1
  • RAID3
  • RAID10
  • RAID6
  • Mixed
  • Automatic

RAID group types or RAID levels.
  stripe_width str
  • BEST_FIT
  • 2
  • 4
  • 5
  • 6
  • 8
  • 9
  • 10
  • 12
  • 13
  • 14
  • 16

RAID group stripe widths, including parity or mirror disks.
alert_threshold int
Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage.
Minimum threshold limit is 50.
Maximum threshold limit is 84.
is_harvest_enabled bool
Enable/Disable automatic deletion of snapshots based on pool space usage.
pool_harvest_high_threshold float
Max threshold for space used in pool beyond which the system automatically starts deleting snapshots in the pool.
Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
Minimum pool harvest high threshold value is 1.
Maximum pool harvest high threshold value is 99.
pool_harvest_low_threshold float
Min threshold for space used in pool below which the system automatically stops deletion of snapshots in the pool.
Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
Minimum pool harvest low threshold value is 0.
Maximum pool harvest low threshold value is 98.
is_snap_harvest_enabled bool
Enable/Disable automatic deletion of snapshots based on pool space usage.
snap_harvest_high_threshold float
Max threshold for space used in snapshot beyond which the system automatically starts deleting snapshots in the pool.
Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
Minimum snap harvest high threshold value is 1.
Maximum snap harvest high threshold value is 99.
snap_harvest_low_threshold float
Min threshold for space used in snapshot below which the system will stop automatically deleting snapshots in the pool.
Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
Minimum snap harvest low threshold value is 0.
Maximum snap harvest low threshold value is 98.
pool_type str
  • TRADITIONAL
  • DYNAMIC

Indicates storage pool type.
state str True
  • absent
  • present

Define whether the storage pool should exist or not.
Present - indicates that the storage pool should exist on the system.
Absent - indicates that the storage pool should not exist on the system.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* Deletion of storage pool is not allowed through Ansible module. +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Get Storage pool details using pool_name + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_name: "{{pool_name}}" + state: "present" + +- name: Get Storage pool details using pool_id + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_id: "{{pool_id}}" + state: "present" + +- name: Modify Storage pool attributes using pool_name + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_name: "{{pool_name}}" + new_pool_name: "{{new_pool_name}}" + pool_description: "{{pool_description}}" + fast_cache: "{{fast_cache_enabled}}" + fast_vp: "{{fast_vp_enabled}}" + state: "present" + +- name: Modify Storage pool attributes using pool_id + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_id: "{{pool_id}}" + new_pool_name: "{{new_pool_name}}" + pool_description: "{{pool_description}}" + fast_cache: "{{fast_cache_enabled}}" + fast_vp: "{{fast_vp_enabled}}" + state: "present" + +- name: Create a StoragePool + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_name: "Test" + pool_description: "test pool" + raid_groups: + disk_group_id : "dg_16" + disk_num : 2 + raid_type : "RAID10" + stripe_width : "BEST_FIT" + alert_threshold : 50 + is_harvest_enabled : True + pool_harvest_high_threshold : 60 + pool_harvest_low_threshold : 40 + is_snap_harvest_enabled : True + snap_harvest_high_threshold : 70 + snap_harvest_low_threshold : 50 + fast_vp: "enabled" + fast_cache: "enabled" + pool_type : "DYNAMIC" + state: "present" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the storage pool has changed.
storage_pool_details dict When storage pool exists. The storage pool details.
  drives list success Indicates information about the drives associated with the storage pool.
   disk_technology str success Indicates disk technology of the drive.
   id str success Unique identifier of the drive.
   name str success Indicates name of the drive.
   size str success Indicates size of the drive.
   tier_type str success Indicates tier type of the drive.
  id str success Pool id, unique identifier of the pool.
  is_fast_cache_enabled bool success Indicates whether the fast cache is enabled for the storage pool. true - FAST Cache is enabled for the pool. false - FAST Cache is disabled for the pool.
  is_fast_vp_enabled bool success Indicates whether to enable scheduled data relocations for the storage pool. true - Enabled scheduled data relocations for the pool. false - Disabled scheduled data relocations for the pool.
  name str success Pool name, unique in the storage system.
  size_free_with_unit str success Indicates size_free with its appropriate unit in human readable form.
  size_subscribed_with_unit str success Indicates size_subscribed with its appropriate unit in human readable form.
  size_total_with_unit str success Indicates size_total with its appropriate unit in human readable form.
  size_used_with_unit str success Indicates size_used with its appropriate unit in human readable form.
  snap_size_subscribed_with_unit str success Indicates snap_size_subscribed with its appropriate unit in human readable form.
  snap_size_used_with_unit str success Indicates snap_size_used with its appropriate unit in human readable form.
+ +### Authors +* Ambuj Dubey (@AmbujDube) + +-------------------------------- +# Quota Tree Module + +Manage quota tree on the Unity storage system + +### Synopsis + Managing Quota tree on the Unity storage system includes Create quota tree, Get quota tree, Modify quota tree and Delete quota tree. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
filesystem_name str
The name of the filesystem for which quota tree is created.
For creation or modification of a quota tree either filesystem_name or filesystem_id is required.
filesystem_id str
The ID of the filesystem for which the quota tree is created.
For creation of a quota tree either filesystem_id or filesystem_name is required.
nas_server_name str
The name of the NAS server in which the filesystem is created.
For creation of a quota tree either nas_server_name or nas_server_id is required.
nas_server_id str
The ID of the NAS server in which the filesystem is created.
For creation of a quota tree either filesystem_id or filesystem_name is required.
tree_quota_id str
The ID of the quota tree.
Either tree_quota_id or path to quota tree is required to view/modify/delete quota tree.
path str
The path to the quota tree.
Either tree_quota_id or path to quota tree is required to create/view/modify/delete a quota tree.
Path must start with a forward slash '/'.
hard_limit int
Hard limitation for a quota tree on the total space available. If exceeded, users in quota tree cannot write data.
Value 0 implies no limit.
One of the values of soft_limit and hard_limit can be 0, however, both cannot be both 0 during creation of a quota tree.
soft_limit int
Soft limitation for a quota tree on the total space available. If exceeded, notification will be sent to users in the quota tree for the grace period mentioned, beyond which users cannot use space.
Value 0 implies no limit.
Both soft_limit and hard_limit cannot be 0 during creation of quota tree.
cap_unit str
  • MB
  • GB
  • TB

Unit of soft_limit and hard_limit size.
It defaults to GB if not specified.
description str
Description of a quota tree.
state str True
  • absent
  • present

The state option is used to mention the existence of the filesystem quota tree.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Get quota tree details by quota tree id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_10" + state: "present" + + - name: Get quota tree details by quota tree path + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "fs_2171" + nas_server_id: "nas_21" + path: "/test" + state: "present" + + - name: Create quota tree for a filesystem with filesystem id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + path: "/test_new" + state: "present" + + - name: Create quota tree for a filesystem with filesystem name + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "Test_filesystem" + nas_server_name: "lglad068" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + path: "/test_new" + state: "present" + + - name: Modify quota tree limit usage by quota tree path + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + path: "/test_new" + hard_limit: 10 + cap_unit: "TB" + soft_limit: 8 + state: "present" + + - name: Modify quota tree by quota tree id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + tree_quota_id: "treequota_171798700679_10" + hard_limit: 12 + cap_unit: "TB" + soft_limit: 10 + state: "present" + + - name: Delete quota tree by quota tree id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + tree_quota_id: "treequota_171798700679_10" + state: "absent" + + - name: Delete quota tree by path + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/test_new" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
get_tree_quota_details dict When quota tree exists Details of the quota tree.
  description str success Description of the quota tree.
  filesystem dict success Filesystem details for which the quota tree is created.
   UnityFileSystem dict success Filesystem details for which the quota tree is created.
    id str success ID of the filesystem for which the quota tree is create.
  gp_left int success The grace period left after the soft limit for the user quota is exceeded.
  hard_limit int success Hard limit of quota tree. If the quota tree's space usage exceeds the hard limit, users in quota tree cannot write data.
  id str success Quota tree ID.
  path str success Path to quota tree. A valid path must start with a forward slash '/'. It is mandatory while creating a quota tree.
  size_used int success Size of used space in the filesystem by the user files.
  soft_limit int success Soft limit of the quota tree. If the quota tree's space usage exceeds the soft limit, the storage system starts to count down based on the specified grace period.
  state int success State of the quota tree.
+ +### Authors +* Spandita Panigrahi (@panigs7) + +-------------------------------- +# User Quota Module + +Manage user quota on the Unity storage system + +### Synopsis + Managing User Quota on the Unity storage system includes Create user quota, Get user quota, Modify user quota, Delete user quota, Create user quota for quota tree, Modify user quota for quota tree and Delete user quota for quota tree. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
filesystem_name str
The name of the filesystem for which the user quota is created.
For creation of a user quota either filesystem_name or filesystem_id is required.
filesystem_id str
The ID of the filesystem for which the user quota is created.
For creation of a user quota either filesystem_id or filesystem_name is required.
nas_server_name str
The name of the NAS server in which the filesystem is created.
For creation of a user quota either nas_server_name or nas_server_id is required.
nas_server_id str
The ID of the NAS server in which the filesystem is created.
For creation of a user quota either filesystem_id or filesystem_name is required.
hard_limit int
Hard limitation for a user on the total space available. If exceeded, user cannot write data.
Value 0 implies no limit.
One of the values of soft_limit and hard_limit can be 0, however, both cannot be 0 during creation or modification of user quota.
soft_limit int
Soft limitation for a user on the total space available. If exceeded, notification will be sent to the user for the grace period mentioned, beyond which the user cannot use space.
Value 0 implies no limit.
Both soft_limit and hard_limit cannot be 0 during creation or modification of user quota.
cap_unit str
  • MB
  • GB
  • TB

Unit of soft_limit and hard_limit size.
It defaults to GB if not specified.
user_type str
  • Unix
  • Windows

Type of user creating a user quota.
Mandatory while creating or modifying user quota.
win_domain str
Fully qualified or short domain name for Windows user type.
Mandatory when user_type is Windows.
user_name str
User name of the user quota when user_type is Windows or Unix.
Option user_name must be specified along with win_domain when user_type is Windows.
uid str
User ID of the user quota.
user_quota_id str
User quota ID generated after creation of a user quota.
tree_quota_id str
The ID of the quota tree.
Either tree_quota_id or path to quota tree is required to create/modify/delete user quota for a quota tree.
path str
The path to the quota tree.
Either tree_quota_id or path to quota tree is required to create/modify/delete user quota for a quota tree.
Path must start with a forward slash '/'.
state str True
  • absent
  • present

The state option is used to mention the existence of the user quota.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` + - name: Get user quota details by user quota id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + user_quota_id: "userquota_171798700679_0_123" + state: "present" + + - name: Get user quota details by user quota uid/user name + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "fs_2171" + nas_server_id: "nas_21" + user_name: "test" + state: "present" + + - name: Create user quota for a filesystem with filesystem id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + uid: "111" + state: "present" + + - name: Create user quota for a filesystem with filesystem name + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "Test_filesystem" + nas_server_name: "lglad068" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + uid: "111" + state: "present" + + - name: Modify user quota limit usage by user quota id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + user_quota_id: "userquota_171798700679_0_123" + hard_limit: 10 + cap_unit: "TB" + soft_limit: 8 + state: "present" + + - name: Modify user quota by filesystem id and user quota uid/user_name + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + hard_limit: 12 + cap_unit: "TB" + soft_limit: 10 + state: "present" + + - name: Delete user quota + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + win_domain: "prod" + user_name: "sample" + state: "absent" + + - name: Create user quota of a quota tree + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_4" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + soft_limit: 9 + cap_unit: "TB" + state: "present" + + - name: Create user quota of a quota tree by quota tree path + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/sample" + user_type: "Unix" + user_name: "test" + hard_limit: 2 + cap_unit: "TB" + state: "present" + + - name: Modify user quota of a quota tree + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_4" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + soft_limit: 10 + cap_unit: "TB" + state: "present" + + - name: Modify user quota of a quota tree by quota tree path + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/sample" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + hard_limit: 12 + cap_unit: "TB" + state: "present" + + - name: Delete user quota of a quota tree by quota tree path + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/sample" + win_domain: "prod" + user_name: "sample" + state: "absent" + + - name: Delete user quota of a quota tree by quota tree id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_4" + win_domain: "prod" + user_name: "sample" + state: "absent" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
get_user_quota_details dict When user quota exists Details of the user quota.
  filesystem dict success Filesystem details for which the user quota is created.
   UnityFileSystem dict success Filesystem details for which the user quota is created.
    id str success ID of the filesystem for which the user quota is created.
    name str success Name of filesystem.
    nas_server dict success Nasserver details where filesystem is created.
  gp_left int success The grace period left after the soft limit for the user quota is exceeded.
  hard_limit int success Hard limitation for a user on the total space available. If exceeded, user cannot write data.
  hard_ratio str success The hard ratio is the ratio between the hard limit size of the user quota and the amount of storage actually consumed.
  id str success User quota ID.
  size_used int success Size of used space in the filesystem by the user files.
  soft_limit int success Soft limitation for a user on the total space available. If exceeded, notification will be sent to user for the grace period mentioned, beyond which user cannot use space.
  soft_ratio str success The soft ratio is the ratio between the soft limit size of the user quota and the amount of storage actually consumed.
  state int success State of the user quota.
  tree_quota dict success Quota tree details for which the user quota is created.
   UnityTreeQuota dict success Quota tree details for which the user quota is created.
    id str success ID of the quota tree.
    path str success Path to quota tree.
  uid int success User ID of the user.
  unix_name str success Unix user name for this user quota's uid.
  windows_names str success Windows user name that maps to this quota's uid.
  windows_sids str success Windows SIDs that maps to this quota's uid
+ +### Authors +* Spandita Panigrahi (@panigs7) + +-------------------------------- +# Volume Module + +Manage volume on Unity storage system + +### Synopsis + Managing volume on Unity storage system includes- Create new volume, Modify volume attributes, Map Volume to host, Unmap volume to host, Display volume details, Delete volume. + +### Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterTypeRequiredDefaultChoicesDescription
vol_name str
The name of the volume. Mandatory only for create operation.
vol_id str
The id of the volume.
It can be used only for get, modify, map/unmap host, or delete operation.
pool_name str
This is the name of the pool where the volume will be created.
Either the pool_name or pool_id must be provided to create a new volume.
pool_id str
This is the id of the pool where the volume will be created.
Either the pool_name or pool_id must be provided to create a new volume.
size int
The size of the volume.
cap_unit str
  • GB
  • TB

The unit of the volume size. It defaults to GB, if not specified.
description str
Description about the volume.
Description can be removed by passing empty string ("").
snap_schedule str
Snapshot schedule assigned to the volume.
Add/Remove/Modify the snapshot schedule for the volume.
compression bool
Boolean variable , specifies whether or not to enable compression. Compression is supported only for thin volumes.
is_thin bool
Boolean variable , specifies whether or not it is a thin volume.
The value is set as true by default if not specified.
sp str
  • SPA
  • SPB

Storage Processor for this volume.
io_limit_policy str
IO limit policy associated with this volume. Once it is set, it cannot be removed through ansible module but it can be changed.
host_name str
Name of the host to be mapped/unmapped with this volume.
Either host_name or host_id can be specified in one task along with mapping_state.
host_id str
ID of the host to be mapped/unmapped with this volume.
Either host_name or host_id can be specified in one task along with mapping_state.
hlu int
Host Lun Unit to be mapped/unmapped with this volume.
It is an optional parameter, hlu can be specified along with host_name or host_id and mapping_state.
If hlu is not specified, unity will choose it automatically. The maximum value supported is 255.
mapping_state str
  • mapped
  • unmapped

State of host access for volume.
new_vol_name str
New name of the volume for rename operation.
tiering_policy str
  • AUTOTIER_HIGH
  • AUTOTIER
  • HIGHEST
  • LOWEST

Tiering policy choices for how the storage resource data will be distributed among the tiers available in the pool.
state str True
  • absent
  • present

State variable to determine whether volume will exist or not.
hosts list
elements: dict

Name of hosts for mapping to a volume.
  host_name str
Name of the host.
  host_id str
ID of the host.
  hlu str
Host Lun Unit to be mapped/unmapped with this volume.
It is an optional parameter, hlu can be specified along with host_name or host_id and mapping_state.
If hlu is not specified, unity will choose it automatically. The maximum value supported is 255.
unispherehost str True
IP or FQDN of the Unity management server.
username str True
The username of the Unity management server.
password str True
The password of the Unity management server.
validate_certs bool True
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified.
port int 443
Port number through which communication happens with Unity management server.
+ +### Notes +* The check_mode is not supported. +* The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform. + +### Examples +``` +- name: Create Volume + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + description: "{{description}}" + pool_name: "{{pool}}" + size: 2 + cap_unit: "{{cap_GB}}" + state: "{{state_present}}" + +- name: Expand Volume by volume id + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_id: "{{vol_id}}" + size: 5 + cap_unit: "{{cap_GB}}" + state: "{{state_present}}" + +- name: Modify Volume, map host by host_name + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + host_name: "{{host_name}}" + hlu: 5 + mapping_state: "{{state_mapped}}" + state: "{{state_present}}" + +- name: Modify Volume, unmap host mapping by host_name + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + host_name: "{{host_name}}" + mapping_state: "{{state_unmapped}}" + state: "{{state_present}}" + +- name: Map multiple hosts to a Volume + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_id: "{{vol_id}}" + hosts: + - host_name: "10.226.198.248" + hlu: 1 + - host_id: "Host_929" + hlu: 2 + mapping_state: "mapped" + state: "present" + +- name: Modify Volume attributes + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + new_vol_name: "{{new_vol_name}}" + tiering_policy: "AUTOTIER" + compression: True + state: "{{state_present}}" + +- name: Delete Volume by vol name + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + state: "{{state_absent}}" + +- name: Delete Volume by vol id + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_id: "{{vol_id}}" + state: "{{state_absent}}" +``` + +### Return Values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeyTypeReturnedDescription
changed bool always Whether or not the resource has changed.
volume_details dict When volume exists Details of the volume.
  current_sp str success Current storage processor for this volume.
  description str success Description about the volume.
  host_access list success Host mapped to this volume.
  id str success The system generated ID given to the volume.
  io_limit_policy dict success IO limit policy associated with this volume.
  is_data_reduction_enabled bool success Whether or not compression enabled on this volume.
  is_thin_enabled bool success Indicates whether thin provisioning is enabled for this volume.
  name str success Name of the volume.
  pool dict success The pool in which this volume is allocated.
  size_total_with_unit str success Size of the volume with actual unit.
  snap_schedule dict success Snapshot schedule applied to this volume.
  tiering_policy str success Tiering policy applied to this volume.
  wwn str success The world wide name of this volume.
+ +### Authors +* Arindam Datta (@arindam-emc) + +-------------------------------- diff --git a/ansible_collections/dellemc/unity/docs/Release Notes.md b/ansible_collections/dellemc/unity/docs/Release Notes.md new file mode 100644 index 00000000..b5572899 --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/Release Notes.md @@ -0,0 +1,76 @@ +**Ansible Modules for Dell Technologies Unity** +========================================= +### Release Notes 1.5.0 + +> © 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell +> and other trademarks are trademarks of Dell Inc. or its +> subsidiaries. Other trademarks may be trademarks of their respective +> owners. + +Content +------- +These release notes contain supplemental information about Ansible +Modules for Dell Technologies (Dell) Unity. + +- Revision History +- Product Description +- New Features & Enhancements +- Known Issues +- Limitations +- Distribution +- Documentation + +Revision history +---------------- +The table in this section lists the revision history of this document. + +Table 1. Revision history + +| Revision | Date | Description | +|----------|----------------|---------------------------------------------------------| +| 01 | December 2022 | Current release of Ansible Modules for Dell Unity 1.5.0 | + +Product Description +------------------- +The Ansible modules for Dell Unity are used to automate and orchestrate the deployment, configuration, and management of Dell Unity Family systems, including Unity, Unity XT, and the UnityVSA. The capabilities of Ansible modules are managing host, consistency group, filesystem, filesystem snapshots, CIFS server, NAS servers, NFS server, NFS export, SMB shares, interface, snapshots, snapshot schedules, storage pool, tree quota, user quota, volumes and obtaining Unity system information. The options available for each capability are list, show, create, delete, and modify; except for NAS server for which options available are list & modify and for CIFS server, NFS server the options available are create, list & modify. + +New features & enhancements +--------------------------- +This release has the following changes - + +- Updated modules to adhere with ansible community guidelines. + +Known issues +------------ +Known issues in this release are listed below: +- Filesystem creation with quota config + - Setting quota configuration while creating a filesystem may sometimes cause a delay in fetching the details about the quota config of the new filesystem. The module will throw an error to rerun the task to see the expected result. + +- Mapping and unmapping of hosts for a Consistency group + - Interoperability between Ansible Unity playbooks and Unisphere REST API is not supported for the mapping and unmapping of hosts for a consistency group. + > **WORKAROUND:** It is recommended to use Ansible Unity modules consistently for all mapping and unmapping of hosts for a consistency group instead of partially/mutually doing it through Unisphere and Ansible modules. + +- Unmapping of LUN's from consistency group after disabling replication fails intermittently + - Immediate removal/unmapping of LUN's after disabling replication may fail with this error message which indicates that the consistency group has snapshots. + + ``` "The LUN cannot be removed from the Consistency group because there are snapshots of the Consistency group that include the selected LUN. Please remove all snapshots containing the selected LUN and try again. (Error Code:0x6000c16)" ``` + + > **NOTE:** It is recommended to avoid immediate removal/unmapping of LUN's after disabling replication. + + +Limitations +----------- +There are no known limitations. + +Distribution +---------------- +The software package is available for download from the [Ansible Modules +for Unity GitHub](https://github.com/dell/ansible-unity/) page. + +Documentation +------------- +The documentation is available on [Ansible Modules for Unity GitHub](https://github.com/dell/ansible-unity/tree/1.5.0/docs) +page. It includes the following: +- README +- Release Notes (this document) +- Product Guide diff --git a/ansible_collections/dellemc/unity/docs/SECURITY.md b/ansible_collections/dellemc/unity/docs/SECURITY.md new file mode 100644 index 00000000..bcbf644e --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/SECURITY.md @@ -0,0 +1,22 @@ + + +# Security policy + +The Ansible modules for Dell Unity repository are inspected for security vulnerabilities via blackduck scans and static code analysis. + +In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-unity/blob/1.5.0/docs/CONTRIBUTING.md#Pull-requests) for more information. + +## Reporting a vulnerability + +Have you discovered a security vulnerability in this project? +We ask you to alert the maintainers by sending an email, describing the issue, impact, and fix - if applicable. + +You can reach the Ansible modules for Dell Unity maintainers at ansible.team@dell.com. diff --git a/ansible_collections/dellemc/unity/docs/SUPPORT.md b/ansible_collections/dellemc/unity/docs/SUPPORT.md new file mode 100644 index 00000000..78931d07 --- /dev/null +++ b/ansible_collections/dellemc/unity/docs/SUPPORT.md @@ -0,0 +1,12 @@ + + +## Support +For all your support needs you can interact with us on [GitHub](https://github.com/dell/ansible-unity) by creating a [GitHub Issue](https://github.com/dell/ansible-unity/issues) or through the [Ansible Community](https://www.dell.com/community/Automation/bd-p/Automation). diff --git a/ansible_collections/dellemc/unity/meta/execution-environment.yml b/ansible_collections/dellemc/unity/meta/execution-environment.yml new file mode 100644 index 00000000..5aa14625 --- /dev/null +++ b/ansible_collections/dellemc/unity/meta/execution-environment.yml @@ -0,0 +1,5 @@ +--- +version: 1 +dependencies: + galaxy: requirements.yml + python: requirements.txt diff --git a/ansible_collections/dellemc/unity/meta/runtime.yml b/ansible_collections/dellemc/unity/meta/runtime.yml new file mode 100644 index 00000000..31f91244 --- /dev/null +++ b/ansible_collections/dellemc/unity/meta/runtime.yml @@ -0,0 +1,79 @@ +--- +requires_ansible: ">=2.12" +plugin_routing: + modules: + dellemc_unity_info: + redirect: dellemc.unity.info + deprecation: + removal_date: "2024-03-31" + warning_text: Use info instead. + dellemc_unity_gatherfacts: + redirect: dellemc.unity.info + deprecation: + removal_date: "2024-03-31" + warning_text: Use info instead. + dellemc_unity_consistencygroup: + redirect: dellemc.unity.consistencygroup + deprecation: + removal_date: "2024-03-31" + warning_text: Use consistencygroup instead. + dellemc_unity_filesystem_snapshot: + redirect: dellemc.unity.filesystem_snapshot + deprecation: + removal_date: "2024-03-31" + warning_text: Use filesystem_snapshot instead. + dellemc_unity_filesystem: + redirect: dellemc.unity.filesystem + deprecation: + removal_date: "2024-03-31" + warning_text: Use filesystem instead. + dellemc_unity_host: + redirect: dellemc.unity.host + deprecation: + removal_date: "2024-03-31" + warning_text: Use host instead. + dellemc_unity_nasserver: + redirect: dellemc.unity.nasserver + deprecation: + removal_date: "2024-03-31" + warning_text: Use nasserver instead. + dellemc_unity_nfs: + redirect: dellemc.unity.nfs + deprecation: + removal_date: "2024-03-31" + warning_text: Use nfs instead. + dellemc_unity_smbshare: + redirect: dellemc.unity.smbshare + deprecation: + removal_date: "2024-03-31" + warning_text: Use smbshare instead. + dellemc_unity_snapshot: + redirect: dellemc.unity.snapshot + deprecation: + removal_date: "2024-03-31" + warning_text: Use snapshot instead. + dellemc_unity_snapshotschedule: + redirect: dellemc.unity.snapshotschedule + deprecation: + removal_date: "2024-03-31" + warning_text: Use snapshotschedule instead. + dellemc_unity_storagepool: + redirect: dellemc.unity.storagepool + deprecation: + removal_date: "2024-03-31" + warning_text: Use storagepool instead. + dellemc_unity_tree_quota: + redirect: dellemc.unity.tree_quota + deprecation: + removal_date: "2024-03-31" + warning_text: Use tree_quota instead. + dellemc_unity_user_quota: + redirect: dellemc.unity.user_quota + deprecation: + removal_date: "2024-03-31" + warning_text: Use user_quota instead. + dellemc_unity_volume: + redirect: dellemc.unity.volume + deprecation: + removal_date: "2024-03-31" + warning_text: Use volume instead. diff --git a/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py b/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py new file mode 100644 index 00000000..1ebc7f40 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py @@ -0,0 +1,53 @@ +# Copyright: (c) 2020, Dell Technologies. +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Documentation fragment for Unity (unity) + DOCUMENTATION = r''' + options: + unispherehost: + required: true + description: + - IP or FQDN of the Unity management server. + type: str + username: + type: str + required: true + description: + - The username of the Unity management server. + password: + type: str + required: true + description: + - The password of the Unity management server. + validate_certs: + type: bool + default: true + aliases: + - verifycert + description: + - Boolean variable to specify whether or not to validate SSL + certificate. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be + verified. + port: + description: + - Port number through which communication happens with Unity + management server. + type: int + default: 443 + requirements: + - A Dell Unity Storage device version 5.1 or later. + - Ansible-core 2.12 or later. + - Python 3.9, 3.10 or 3.11. + - Storops Python SDK 1.2.11. + notes: + - The modules present in this collection named as 'dellemc.unity' + are built to support the Dell Unity storage platform. +''' diff --git a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/__init__.py b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py new file mode 100644 index 00000000..232814e5 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py @@ -0,0 +1,25 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Custom rotating file handler for Unity""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from datetime import datetime +from logging.handlers import RotatingFileHandler + + +class CustomRotatingFileHandler(RotatingFileHandler): + def rotation_filename(self, default_name): + """ + Modify the filename of a log file when rotating. + :param default_name: The default name of the log file. + """ + src_file_name = default_name.split('.') + dest_file_name = "{0}_{1}.{2}.{3}".format( + src_file_name[0], '{0:%Y%m%d}'.format(datetime.now()), + src_file_name[1], src_file_name[2] + ) + return dest_file_name diff --git a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py new file mode 100644 index 00000000..c44b2bce --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py @@ -0,0 +1,254 @@ +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +from decimal import Decimal +import re +import traceback +import math +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell.logging_handler \ + import CustomRotatingFileHandler +from ansible.module_utils.basic import missing_required_lib + +try: + import urllib3 + + urllib3.disable_warnings() + HAS_URLLIB3, URLLIB3_IMP_ERR = True, None +except ImportError: + HAS_URLLIB3, URLLIB3_IMP_ERR = False, traceback.format_exc() + +try: + from storops import UnitySystem + from storops.unity.client import UnityClient + from storops.unity.resource import host, cg, snap_schedule, snap, \ + cifs_share, nas_server + from storops.unity.resource.lun import UnityLun + from storops.unity.resource.pool import UnityPool, UnityPoolList, RaidGroupParameter + from storops.unity.resource.filesystem import UnityFileSystem, \ + UnityFileSystemList + from storops.unity.resource.nas_server import UnityNasServer + from storops.unity.resource.nfs_share import UnityNfsShare, \ + UnityNfsShareList + from storops.unity.resource.snap_schedule import UnitySnapScheduleList, \ + UnitySnapSchedule + from storops.unity.resource.replication_session import UnityReplicationSession + from storops.unity.enums import HostInitiatorTypeEnum, \ + TieringPolicyEnum, ScheduleTypeEnum, DayOfWeekEnum, NodeEnum, \ + HostLUNAccessEnum, HostTypeEnum, AccessPolicyEnum, \ + FilesystemTypeEnum, FSSupportedProtocolEnum, FSFormatEnum, \ + NFSTypeEnum, NFSShareDefaultAccessEnum, NFSShareSecurityEnum, \ + FilesystemSnapAccessTypeEnum, FSLockingPolicyEnum, \ + CifsShareOfflineAvailabilityEnum, NasServerUnixDirectoryServiceEnum, \ + KdcTypeEnum, NodeEnum, FileInterfaceRoleEnum + from storops.exception import UnityResourceNotFoundError, \ + StoropsConnectTimeoutError, UnityNfsShareNameExistedError + from storops.connection.exceptions import HttpError, HTTPClientError + from storops.unity.resource.user_quota import UnityUserQuota, \ + UnityUserQuotaList + from storops.unity.resource.tree_quota import UnityTreeQuota, \ + UnityTreeQuotaList + from storops.unity.resource.quota_config import UnityQuotaConfig, \ + UnityQuotaConfigList + from storops.unity.resource.storage_resource import UnityStorageResource + from storops.unity.enums import QuotaPolicyEnum, RaidTypeEnum, \ + RaidStripeWidthEnum, StoragePoolTypeEnum + from storops.unity.resource.disk import UnityDisk, \ + UnityDiskList, UnityDiskGroup, UnityDiskGroupList + from storops.unity.resource.cifs_server import UnityCifsServer + from storops.unity.resource.nfs_server import UnityNfsServer + from storops.unity.resource.interface import UnityFileInterface + + HAS_UNITY_SDK, STOROPS_IMP_ERR = True, None +except ImportError: + HAS_UNITY_SDK, STOROPS_IMP_ERR = False, traceback.format_exc() + +try: + from pkg_resources import parse_version + import pkg_resources + + HAS_PKG_RESOURCE, PKG_RESOURCE_IMP_ERR = True, None +except ImportError: + HAS_PKG_RESOURCE, PKG_RESOURCE_IMP_ERR = False, traceback.format_exc() + + +def ensure_required_libs(module): + """Check required libraries""" + + if not HAS_UNITY_SDK: + module.fail_json(msg=missing_required_lib("storops"), + exception=STOROPS_IMP_ERR) + + if not HAS_PKG_RESOURCE: + module.fail_json(msg=missing_required_lib("pkg_resources"), + exception=PKG_RESOURCE_IMP_ERR) + + if not HAS_URLLIB3: + module.fail_json(msg=missing_required_lib("urllib3"), + exception=URLLIB3_IMP_ERR) + + min_ver = '1.2.11' + try: + curr_version = pkg_resources.require("storops")[0].version + except Exception as err: + module.fail_json(msg="Failed to get Storops SDK version - " + "{0}".format(str(err))) + + if parse_version(curr_version) < parse_version(min_ver): + module.fail_json(msg="Storops {0} is not supported. " + "Required minimum version is " + "{1}".format(curr_version, min_ver)) + + +def get_unity_management_host_parameters(): + """Provides common access parameters required for the + ansible modules on Unity StorageSystem""" + + return dict( + unispherehost=dict(type='str', required=True, no_log=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, + aliases=['verifycert'], default=True), + port=dict(type='int', required=False, default=443, no_log=True) + ) + + +def get_unity_unisphere_connection(module_params, application_type=None): + """Establishes connection with Unity array using storops SDK""" + + if HAS_UNITY_SDK: + conn = UnitySystem(host=module_params['unispherehost'], + port=module_params['port'], + verify=module_params['validate_certs'], + username=module_params['username'], + password=module_params['password'], + application_type=application_type) + return conn + + +def get_logger(module_name, log_file_name='ansible_unity.log', + log_devel=logging.INFO): + """Intializes and returns the logger object + + :param module_name: Name of module to be part of log message + :param log_file_name: Name of file in which the log messages get appended + :param log_devel: Log level + """ + + FORMAT = '%(asctime)-15s %(filename)s %(levelname)s : %(message)s' + max_bytes = 5 * 1024 * 1024 + logging.basicConfig(filename=log_file_name, format=FORMAT) + LOG = logging.getLogger(module_name) + LOG.setLevel(log_devel) + handler = CustomRotatingFileHandler(log_file_name, + maxBytes=max_bytes, + backupCount=5) + formatter = logging.Formatter(FORMAT) + handler.setFormatter(formatter) + LOG.addHandler(handler) + LOG.propagate = False + return LOG + + +KB_IN_BYTES = 1024 +MB_IN_BYTES = 1024 * 1024 +GB_IN_BYTES = 1024 * 1024 * 1024 +TB_IN_BYTES = 1024 * 1024 * 1024 * 1024 + + +def get_size_bytes(size, cap_units): + """Convert the given size to bytes""" + + if size is not None and size > 0: + if cap_units in ('kb', 'KB'): + return size * KB_IN_BYTES + elif cap_units in ('mb', 'MB'): + return size * MB_IN_BYTES + elif cap_units in ('gb', 'GB'): + return size * GB_IN_BYTES + elif cap_units in ('tb', 'TB'): + return size * TB_IN_BYTES + else: + return size + else: + return 0 + + +def convert_size_with_unit(size_bytes): + """Convert size in byte with actual unit like KB,MB,GB,TB,PB etc.""" + + if not isinstance(size_bytes, int): + raise ValueError('This method takes Integer type argument only') + if size_bytes == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return "%s %s" % (s, size_name[i]) + + +def get_size_in_gb(size, cap_units): + """Convert the given size to size in GB, size is restricted to 2 decimal places""" + + size_in_bytes = get_size_bytes(size, cap_units) + size = Decimal(size_in_bytes / GB_IN_BYTES) + size_in_gb = round(size) + return size_in_gb + + +def is_input_empty(item): + """Check whether input string is empty""" + + if item == "" or item.isspace(): + return True + else: + return False + + +def is_size_negative(size): + """Check whether size is negative""" + + if size and size < 0: + return True + else: + return False + + +def has_special_char(value): + """Check whether the string has any special character. + It allows '_' character""" + + regex = re.compile(r'[@!#$%^&*()<>?/\|}{~:]') + if regex.search(value) is None: + return False + else: + return True + + +def is_initiator_valid(value): + """Validate format of the FC or iSCSI initiator""" + + if value.startswith('iqn') or re.match(r"([A-Fa-f0-9]{2}:){15}[A-Fa-f0-9]{2}", value, re.I) is not None: + return True + else: + return False + + +def is_valid_netmask(netmask): + """Validates if ip is valid subnet mask""" + + if netmask: + regexp = re.compile(r'^((128|192|224|240|248|252|254)\.0\.0\.0)|' + r'(255\.(((0|128|192|224|240|248|252|254)\.0\.0)|' + r'(255\.(((0|128|192|224|240|248|252|254)\.0)|' + r'255\.(0|128|192|224|240|248|252|254)))))$') + if not regexp.search(netmask): + return False + return True diff --git a/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py new file mode 100644 index 00000000..15514e3a --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py @@ -0,0 +1,629 @@ +#!/usr/bin/python +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing CIFS server on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +module: cifsserver +version_added: '1.4.0' +short_description: Manage CIFS server on Unity storage system +description: +- Managing the CIFS server on the Unity storage system includes creating CIFS server, getting CIFS server details + and deleting CIFS server. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Akash Shendge (@shenda1) + +options: + nas_server_name: + description: + - Name of the NAS server on which CIFS server will be hosted. + type: str + nas_server_id: + description: + - ID of the NAS server on which CIFS server will be hosted. + type: str + netbios_name: + description: + - The computer name of the SMB server in Windows network. + type: str + workgroup: + description: + - Standalone SMB server workgroup. + type: str + local_password: + description: + - Standalone SMB server administrator password. + type: str + domain: + description: + - The domain name where the SMB server is registered in Active Directory. + type: str + domain_username: + description: + - Active Directory domain user name. + type: str + domain_password: + description: + - Active Directory domain password. + type: str + cifs_server_name: + description: + - The name of the CIFS server. + type: str + cifs_server_id: + description: + - The ID of the CIFS server. + type: str + interfaces: + description: + - List of file IP interfaces that service CIFS protocol of SMB server. + type: list + elements: str + unjoin_cifs_server_account: + description: + - Keep SMB server account unjoined in Active Directory after deletion. + - C(false) specifies keep SMB server account joined after deletion. + - C(true) specifies unjoin SMB server account from Active Directory before deletion. + type: bool + state: + description: + - Define whether the CIFS server should exist or not. + choices: [absent, present] + required: true + type: str +notes: +- The I(check_mode) is supported. +''' + +EXAMPLES = r''' +- name: Create CIFS server belonging to Active Directory + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "test_nas1" + cifs_server_name: "test_cifs" + domain: "ad_domain" + domain_username: "domain_username" + domain_password: "domain_password" + state: "present" + +- name: Get CIFS server details using CIFS server ID + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cifs_server_id: "cifs_37" + state: "present" + +- name: Get CIFS server details using NAS server name + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "test_nas1" + state: "present" + +- name: Delete CIFS server + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cifs_server_id: "cifs_37" + unjoin_cifs_server_account: True + domain_username: "domain_username" + domain_password: "domain_password" + state: "absent" + +- name: Create standalone CIFS server + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + netbios_name: "ANSIBLE_CIFS" + workgroup: "ansible" + local_password: "Password123!" + nas_server_name: "test_nas1" + state: "present" + +- name: Get CIFS server details using netbios name + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + netbios_name: "ANSIBLE_CIFS" + state: "present" + +- name: Delete standalone CIFS server + dellemc.unity.cifsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cifs_server_id: "cifs_40" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: true + +cifs_server_details: + description: Details of the CIFS server. + returned: When CIFS server exists + type: dict + contains: + id: + description: Unique identifier of the CIFS server instance. + type: str + name: + description: User-specified name for the SMB server. + type: str + netbios_name: + description: Computer Name of the SMB server in windows network. + type: str + description: + description: Description of the SMB server. + type: str + domain: + description: Domain name where SMB server is registered in Active Directory. + type: str + workgroup: + description: Windows network workgroup for the SMB server. + type: str + is_standalone: + description: Indicates whether the SMB server is standalone. + type: bool + nasServer: + description: Information about the NAS server in the storage system. + type: dict + contains: + UnityNasServer: + description: Information about the NAS server in the storage system. + type: dict + contains: + id: + description: Unique identifier of the NAS server instance. + type: str + file_interfaces: + description: The file interfaces associated with the NAS server. + type: dict + contains: + UnityFileInterfaceList: + description: List of file interfaces associated with the NAS server. + type: list + contains: + UnityFileInterface: + description: Details of file interface associated with the NAS server. + type: dict + contains: + id: + description: Unique identifier of the file interface. + type: str + smb_multi_channel_supported: + description: Indicates whether the SMB 3.0+ multichannel feature is supported. + type: bool + smb_protocol_versions: + description: Supported SMB protocols, such as 1.0, 2.0, 2.1, 3.0, and so on. + type: list + smbca_supported: + description: Indicates whether the SMB server supports continuous availability. + type: bool + sample: { + "description": null, + "domain": "xxx.xxx.xxx.com", + "existed": true, + "file_interfaces": { + "UnityFileInterfaceList": [ + { + "UnityFileInterface": { + "hash": -9223363258905013637, + "id": "if_43" + } + } + ] + }, + "hash": -9223363258905010379, + "health": { + "UnityHealth": { + "hash": 8777949765559 + } + }, + "id": "cifs_40", + "is_standalone": false, + "last_used_organizational_unit": "ou=Computers,ou=Dell NAS servers", + "name": "ansible_cifs", + "nas_server": { + "UnityNasServer": { + "hash": 8777949765531, + "id": "nas_18" + } + }, + "netbios_name": "ANSIBLE_CIFS", + "smb_multi_channel_supported": true, + "smb_protocol_versions": [ + "1.0", + "2.0", + "2.1", + "3.0" + ], + "smbca_supported": true, + "workgroup": null + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils + +LOG = utils.get_logger('cifsserver') + + +application_type = "Ansible/1.5.0" + + +class CIFSServer(object): + """Class with CIFS server operations""" + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_cifs_server_parameters()) + + mutually_exclusive = [['nas_server_name', 'nas_server_id'], ['cifs_server_id', 'cifs_server_name'], + ['cifs_server_id', 'netbios_name']] + required_one_of = [['cifs_server_id', 'cifs_server_name', 'netbios_name', 'nas_server_name', 'nas_server_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of + ) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + LOG.info('Check Mode Flag %s', self.module.check_mode) + + def get_details(self, cifs_server_id=None, cifs_server_name=None, netbios_name=None, nas_server_id=None): + """Get CIFS server details. + :param: cifs_server_id: The ID of the CIFS server + :param: cifs_server_name: The name of the CIFS server + :param: netbios_name: Name of the SMB server in windows network + :param: nas_server_id: The ID of the NAS server + :return: Dict containing CIFS server details if exists + """ + + LOG.info("Getting CIFS server details") + id_or_name = get_id_name(cifs_server_id, cifs_server_name, netbios_name, nas_server_id) + + try: + if cifs_server_id: + cifs_server_details = self.unity_conn.get_cifs_server(_id=cifs_server_id) + return process_response(cifs_server_details) + + if cifs_server_name: + cifs_server_details = self.unity_conn.get_cifs_server(name=cifs_server_name) + return process_response(cifs_server_details) + + if netbios_name: + cifs_server_details = self.unity_conn.get_cifs_server(netbios_name=netbios_name) + if len(cifs_server_details) > 0: + return process_dict(cifs_server_details._get_properties()) + + if nas_server_id: + cifs_server_details = self.unity_conn.get_cifs_server(nas_server=nas_server_id) + if len(cifs_server_details) > 0: + return process_dict(cifs_server_details._get_properties()) + return None + except utils.HttpError as e: + if e.http_status == 401: + msg = "Failed to get CIFS server: %s due to incorrect " \ + "username/password error: %s" % (id_or_name, str(e)) + else: + msg = "Failed to get CIFS server: %s with error: %s" % (id_or_name, str(e)) + except utils.UnityResourceNotFoundError: + msg = "CIFS server with ID %s not found" % cifs_server_id + LOG.info(msg) + return None + except utils.StoropsConnectTimeoutError as e: + msg = "Failed to get CIFS server: %s with error: %s. Please check unispherehost IP: %s" % ( + id_or_name, str(e), self.module.params['unispherehost']) + except Exception as e: + msg = "Failed to get details of CIFS server: %s with error: %s" % (id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_cifs_server_instance(self, cifs_server_id): + """Get CIFS server instance. + :param: cifs_server_id: The ID of the CIFS server + :return: Return CIFS server instance if exists + """ + + try: + cifs_server_obj = utils.UnityCifsServer.get(cli=self.unity_conn._cli, _id=cifs_server_id) + return cifs_server_obj + + except Exception as e: + error_msg = "Failed to get the CIFS server %s instance" \ + " with error %s" % (cifs_server_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def delete_cifs_server(self, cifs_server_id, skip_unjoin=None, domain_username=None, domain_password=None): + """Delete CIFS server. + :param: cifs_server_id: The ID of the CIFS server + :param: skip_unjoin: Flag indicating whether to unjoin SMB server account from AD before deletion + :param: domain_username: The domain username + :param: domain_password: The domain password + :return: Return True if CIFS server is deleted + """ + + LOG.info("Deleting CIFS server") + try: + if not self.module.check_mode: + cifs_obj = self.get_cifs_server_instance(cifs_server_id=cifs_server_id) + cifs_obj.delete(skip_domain_unjoin=skip_unjoin, username=domain_username, password=domain_password) + return True + + except Exception as e: + msg = "Failed to delete CIFS server: %s with error: %s" % (cifs_server_id, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_nas_server_id(self, nas_server_name): + """Get NAS server ID. + :param: nas_server_name: The name of NAS server + :return: Return NAS server ID if exists + """ + + LOG.info("Getting NAS server ID") + try: + obj_nas = self.unity_conn.get_nas_server(name=nas_server_name) + return obj_nas.get_id() + + except Exception as e: + msg = "Failed to get details of NAS server: %s with error: %s" % (nas_server_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def is_modify_interfaces(self, cifs_server_details): + """Check if modification is required in existing interfaces + :param: cifs_server_details: CIFS server details + :return: Flag indicating if modification is required + """ + + existing_interfaces = [] + if cifs_server_details['file_interfaces']['UnityFileInterfaceList']: + for interface in cifs_server_details['file_interfaces']['UnityFileInterfaceList']: + existing_interfaces.append(interface['UnityFileInterface']['id']) + + for interface in self.module.params['interfaces']: + if interface not in existing_interfaces: + return True + return False + + def is_modification_required(self, cifs_server_details): + """Check if modification is required in existing CIFS server + :param: cifs_server_details: CIFS server details + :return: Flag indicating if modification is required + """ + + LOG.info("Checking if any modification is required") + param_list = ['netbios_name', 'workgroup'] + for param in param_list: + if self.module.params[param] is not None and cifs_server_details[param] is not None and \ + self.module.params[param].upper() != cifs_server_details[param]: + return True + + # Check for domain + if self.module.params['domain'] is not None and cifs_server_details['domain'] is not None and \ + self.module.params['domain'] != cifs_server_details['domain']: + return True + + # Check file interfaces + if self.module.params['interfaces'] is not None: + return self.is_modify_interfaces(cifs_server_details) + return False + + def create_cifs_server(self, nas_server_id, interfaces=None, netbios_name=None, cifs_server_name=None, domain=None, + domain_username=None, domain_password=None, workgroup=None, local_password=None): + """Create CIFS server. + :param: nas_server_id: The ID of NAS server + :param: interfaces: List of file interfaces + :param: netbios_name: Name of the SMB server in windows network + :param: cifs_server_name: Name of the CIFS server + :param: domain: The domain name where the SMB server is registered in Active Directory + :param: domain_username: The domain username + :param: domain_password: The domain password + :param: workgroup: Standalone SMB server workgroup + :param: local_password: Standalone SMB server admin password + :return: Return True if CIFS server is created + """ + + LOG.info("Creating CIFS server") + try: + if not self.module.check_mode: + utils.UnityCifsServer.create(cli=self.unity_conn._cli, nas_server=nas_server_id, interfaces=interfaces, + netbios_name=netbios_name, name=cifs_server_name, domain=domain, + domain_username=domain_username, domain_password=domain_password, + workgroup=workgroup, local_password=local_password) + return True + except Exception as e: + msg = "Failed to create CIFS server with error: %s" % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_params(self): + """Validate the parameters + """ + + param_list = ['nas_server_id', 'nas_server_name', 'domain', 'cifs_server_id', 'cifs_server_name', + 'local_password', 'netbios_name', 'workgroup', 'domain_username', 'domain_password'] + + msg = "Please provide valid {0}" + for param in param_list: + if self.module.params[param] is not None and len(self.module.params[param].strip()) == 0: + errmsg = msg.format(param) + self.module.fail_json(msg=errmsg) + + def perform_module_operation(self): + """ + Perform different actions on CIFS server module based on parameters + passed in the playbook + """ + cifs_server_id = self.module.params['cifs_server_id'] + cifs_server_name = self.module.params['cifs_server_name'] + nas_server_id = self.module.params['nas_server_id'] + nas_server_name = self.module.params['nas_server_name'] + netbios_name = self.module.params['netbios_name'] + workgroup = self.module.params['workgroup'] + local_password = self.module.params['local_password'] + domain = self.module.params['domain'] + domain_username = self.module.params['domain_username'] + domain_password = self.module.params['domain_password'] + interfaces = self.module.params['interfaces'] + unjoin_cifs_server_account = self.module.params['unjoin_cifs_server_account'] + state = self.module.params['state'] + + # result is a dictionary that contains changed status and CIFS server details + result = dict( + changed=False, + cifs_server_details={} + ) + + # Validate the parameters + self.validate_params() + + if nas_server_name is not None: + nas_server_id = self.get_nas_server_id(nas_server_name) + + cifs_server_details = self.get_details(cifs_server_id=cifs_server_id, cifs_server_name=cifs_server_name, + netbios_name=netbios_name, nas_server_id=nas_server_id) + + # Check if modification is required + if cifs_server_details: + if cifs_server_id is None: + cifs_server_id = cifs_server_details['id'] + modify_flag = self.is_modification_required(cifs_server_details) + if modify_flag: + self.module.fail_json(msg="Modification is not supported through Ansible module") + + if not cifs_server_details and state == 'present': + if not nas_server_id: + self.module.fail_json(msg="Please provide nas server id/name to create CIFS server.") + + if any([netbios_name, workgroup, local_password]): + if not all([netbios_name, workgroup, local_password]): + msg = "netbios_name, workgroup and local_password" \ + " are required to create standalone CIFS server." + LOG.error(msg) + self.module.fail_json(msg=msg) + + result['changed'] = self.create_cifs_server(nas_server_id, interfaces, netbios_name, + cifs_server_name, domain, domain_username, domain_password, + workgroup, local_password) + + if state == 'absent' and cifs_server_details: + skip_unjoin = None + if unjoin_cifs_server_account is not None: + skip_unjoin = not unjoin_cifs_server_account + result['changed'] = self.delete_cifs_server(cifs_server_id, skip_unjoin, domain_username, + domain_password) + + if state == 'present': + result['cifs_server_details'] = self.get_details(cifs_server_id=cifs_server_id, + cifs_server_name=cifs_server_name, + netbios_name=netbios_name, + nas_server_id=nas_server_id) + LOG.info("Process Dict: %s", result['cifs_server_details']) + self.module.exit_json(**result) + + +def get_id_name(cifs_server_id=None, cifs_server_name=None, netbios_name=None, nas_server_id=None): + """Get the id_or_name. + :param: cifs_server_id: The ID of CIFS server + :param: cifs_server_name: The name of CIFS server + :param: netbios_name: Name of the SMB server in windows network + :param: nas_server_id: The ID of NAS server + :return: Return id_or_name + """ + if cifs_server_id: + id_or_name = cifs_server_id + elif cifs_server_name: + id_or_name = cifs_server_name + elif netbios_name: + id_or_name = netbios_name + else: + id_or_name = nas_server_id + return id_or_name + + +def process_response(cifs_server_details): + """Process CIFS server details. + :param: cifs_server_details: Dict containing CIFS server details + :return: Processed dict containing CIFS server details + """ + if cifs_server_details.existed: + return cifs_server_details._get_properties() + + +def process_dict(cifs_server_details): + """Process CIFS server details. + :param: cifs_server_details: Dict containing CIFS server details + :return: Processed dict containing CIFS server details + """ + param_list = ['description', 'domain', 'file_interfaces', 'health', 'id', 'is_standalone', 'name', 'nas_server' + 'netbios_name', 'smb_multi_channel_supported', 'smb_protocol_versions', 'smbca_supported', + 'workgroup', 'netbios_name'] + + for param in param_list: + if param in cifs_server_details: + cifs_server_details[param] = cifs_server_details[param][0] + return cifs_server_details + + +def get_cifs_server_parameters(): + """This method provide parameters required for the ansible + CIFS server module on Unity""" + return dict( + cifs_server_id=dict(), cifs_server_name=dict(), + netbios_name=dict(), workgroup=dict(), + local_password=dict(no_log=True), domain=dict(), + domain_username=dict(), domain_password=dict(no_log=True), + nas_server_name=dict(), nas_server_id=dict(), + interfaces=dict(type='list', elements='str'), + unjoin_cifs_server_account=dict(type='bool'), + state=dict(required=True, type='str', choices=['present', 'absent']), + ) + + +def main(): + """Create Unity CIFS server object and perform action on it + based on user input from playbook""" + obj = CIFSServer() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py new file mode 100644 index 00000000..4bb68e23 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py @@ -0,0 +1,1516 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing consistency group on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: consistencygroup +version_added: '1.1.0' +short_description: Manage consistency groups on Unity storage system +description: +- Managing the consistency group on the Unity storage system includes + creating new consistency group, adding volumes to consistency + group, removing volumes from consistency group, mapping hosts to + consistency group, unmapping hosts from consistency group, + renaming consistency group, modifying attributes of consistency group, + enabling replication in consistency group, disabling replication in + consistency group and deleting consistency group. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Akash Shendge (@shenda1) + +options: + cg_name: + description: + - The name of the consistency group. + - It is mandatory for the create operation. + - Specify either I(cg_name) or I(cg_id) (but not both) for any operation. + type: str + cg_id: + description: + - The ID of the consistency group. + - It can be used only for get, modify, add/remove volumes, or delete + operations. + type: str + volumes: + description: + - This is a list of volumes. + - Either the volume ID or name must be provided for adding/removing + existing volumes from consistency group. + - If I(volumes) are given, then I(vol_state) should also be specified. + - Volumes cannot be added/removed from consistency group, if the + consistency group or the volume has snapshots. + type: list + elements: dict + suboptions: + vol_id: + description: + - The ID of the volume. + type: str + vol_name: + description: + - The name of the volume. + type: str + vol_state: + description: + - String variable, describes the state of volumes inside consistency + group. + - If I(volumes) are given, then I(vol_state) should also be specified. + choices: [present-in-group , absent-in-group] + type: str + new_cg_name: + description: + - The new name of the consistency group, used in rename operation. + type: str + description: + description: + - Description of the consistency group. + type: str + snap_schedule: + description: + - Snapshot schedule assigned to the consistency group. + - Specifying an empty string "" removes the existing snapshot schedule + from consistency group. + type: str + tiering_policy: + description: + - Tiering policy choices for how the storage resource data will be + distributed among the tiers available in the pool. + choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST'] + type: str + hosts: + description: + - This is a list of hosts. + - Either the host ID or name must be provided for mapping/unmapping + hosts for a consistency group. + - If I(hosts) are given, then I(mapping_state) should also be specified. + - Hosts cannot be mapped to a consistency group, if the + consistency group has no volumes. + - When a consistency group is being mapped to the host, + users should not use the volume module to map the volumes + in the consistency group to hosts. + type: list + elements: dict + suboptions: + host_id: + description: + - The ID of the host. + type: str + host_name: + description: + - The name of the host. + type: str + mapping_state: + description: + - String variable, describes the state of hosts inside the consistency + group. + - If I(hosts) are given, then I(mapping_state) should also be specified. + choices: [mapped , unmapped] + type: str + replication_params: + description: + - Settings required for enabling replication. + type: dict + suboptions: + destination_cg_name: + description: + - Name of the destination consistency group. + - Default value will be source consistency group name prefixed by 'DR_'. + type: str + replication_mode: + description: + - The replication mode. + type: str + required: true + choices: ['asynchronous', 'manual'] + rpo: + description: + - Maximum time to wait before the system syncs the source and destination LUNs. + - Option I(rpo) should be specified if the I(replication_mode) is C(asynchronous). + - The value should be in range of C(5) to C(1440). + type: int + replication_type: + description: + - Type of replication. + choices: ['local', 'remote'] + default: local + type: str + remote_system: + description: + - Details of remote system to which the replication is being configured. + - The I(remote_system) option should be specified if the I(replication_type) is C(remote). + type: dict + suboptions: + remote_system_host: + required: true + description: + - IP or FQDN for remote Unity unisphere Host. + type: str + remote_system_username: + type: str + required: true + description: + - User name of remote Unity unisphere Host. + remote_system_password: + type: str + required: true + description: + - Password of remote Unity unisphere Host. + remote_system_verifycert: + type: bool + default: true + description: + - Boolean variable to specify whether or not to validate SSL + certificate of remote Unity unisphere Host. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be + verified. + remote_system_port: + description: + - Port at which remote Unity unisphere is hosted. + type: int + default: 443 + destination_pool_name: + description: + - Name of pool to allocate destination Luns. + - Mutually exclusive with I(destination_pool_id). + type: str + destination_pool_id: + description: + - Id of pool to allocate destination Luns. + - Mutually exclusive with I(destination_pool_name). + type: str + replication_state: + description: + - State of the replication. + choices: ['enable', 'disable'] + type: str + state: + description: + - Define whether the consistency group should exist or not. + choices: [absent, present] + required: true + type: str +notes: + - The I(check_mode) is not supported. +""" + +EXAMPLES = r""" +- name: Create consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + cg_name: "{{cg_name}}" + description: "{{description}}" + snap_schedule: "{{snap_schedule1}}" + state: "present" + +- name: Get details of consistency group using id + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + state: "present" + +- name: Add volumes to consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + volumes: + - vol_name: "Ansible_Test-3" + - vol_id: "sv_1744" + vol_state: "{{vol_state_present}}" + state: "present" + +- name: Rename consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{cg_name}}" + new_cg_name: "{{new_cg_name}}" + state: "present" + +- name: Modify consistency group details + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{new_cg_name}}" + snap_schedule: "{{snap_schedule2}}" + tiering_policy: "{{tiering_policy1}}" + state: "present" + +- name: Map hosts to a consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + hosts: + - host_name: "10.226.198.248" + - host_id: "Host_511" + mapping_state: "mapped" + state: "present" + +- name: Unmap hosts from a consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "{{cg_id}}" + hosts: + - host_id: "Host_511" + - host_name: "10.226.198.248" + mapping_state: "unmapped" + state: "present" + +- name: Remove volumes from consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{new_cg_name}}" + volumes: + - vol_name: "Ansible_Test-3" + - vol_id: "sv_1744" + vol_state: "{{vol_state_absent}}" + state: "present" + +- name: Delete consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "{{new_cg_name}}" + state: "absent" + +- name: Enable replication for consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_id: "cg_id_1" + replication_params: + destination_cg_name: "destination_cg_1" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "remote" + remote_system: + remote_system_host: '10.1.2.3' + remote_system_verifycert: False + remote_system_username: 'username' + remote_system_password: 'password' + destination_pool_name: "pool_test_1" + replication_state: "enable" + state: "present" + +- name: Disable replication for consistency group + dellemc.unity.consistencygroup: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + cg_name: "dis_repl_ans_source" + replication_state: "disable" + state: "present" +""" + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: true + +consistency_group_details: + description: Details of the consistency group. + returned: When consistency group exists + type: dict + contains: + id: + description: The system ID given to the consistency group. + type: str + relocation_policy: + description: FAST VP tiering policy for the consistency group. + type: str + cg_replication_enabled: + description: Whether or not the replication is enabled.. + type: bool + snap_schedule: + description: Snapshot schedule applied to consistency group. + type: dict + contains: + UnitySnapSchedule: + description: Snapshot schedule applied to consistency + group. + type: dict + contains: + id: + description: The system ID given to the + snapshot schedule. + type: str + name: + description: The name of the snapshot schedule. + type: str + luns: + description: Details of volumes part of consistency group. + type: dict + contains: + UnityLunList: + description: List of volumes part of consistency group. + type: list + contains: + UnityLun: + description: Detail of volume. + type: dict + contains: + id: + description: The system ID given to volume. + type: str + name: + description: The name of the volume. + type: str + snapshots: + description: List of snapshots of consistency group. + type: list + contains: + name: + description: Name of the snapshot. + type: str + creation_time: + description: Date and time on which the snapshot was taken. + type: str + expirationTime: + description: Date and time after which the snapshot will expire. + type: str + storageResource: + description: Storage resource for which the snapshot was + taken. + type: dict + contains: + UnityStorageResource: + description: Details of the storage resource. + type: dict + contains: + id: + description: The id of the storage + resource. + type: str + block_host_access: + description: Details of hosts mapped to the consistency group. + type: dict + contains: + UnityBlockHostAccessList: + description: List of hosts mapped to consistency group. + type: list + contains: + UnityBlockHostAccess: + description: Details of host. + type: dict + contains: + id: + description: The ID of the host. + type: str + name: + description: The name of the host. + type: str + sample: { + "advanced_dedup_status": "DedupStatusEnum.DISABLED", + "block_host_access": null, + "cg_replication_enabled": false, + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "data_reduction_status": "DataReductionStatusEnum.DISABLED", + "datastores": null, + "dedup_status": null, + "description": "Ansible testing", + "esx_filesystem_block_size": null, + "esx_filesystem_major_version": null, + "existed": true, + "filesystem": null, + "hash": 8776023812033, + "health": { + "UnityHealth": { + "hash": 8776023811889 + } + }, + "host_v_vol_datastore": null, + "id": "res_7477", + "is_replication_destination": false, + "is_snap_schedule_paused": null, + "luns": null, + "metadata_size": 0, + "metadata_size_allocated": 0, + "name": "Ansible_CG_Testing", + "per_tier_size_used": null, + "pools": null, + "relocation_policy": "TieringPolicyEnum.MIXED", + "replication_type": "ReplicationTypeEnum.NONE", + "size_allocated": 0, + "size_total": 0, + "size_used": null, + "snap_count": 0, + "snap_schedule": null, + "snaps_size_allocated": 0, + "snaps_size_total": 0, + "snapshots": [], + "thin_status": "ThinStatusEnum.FALSE", + "type": "StorageResourceTypeEnum.CONSISTENCY_GROUP", + "virtual_volumes": null, + "vmware_uuid": null + } +''' + +import logging +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('consistencygroup', + log_devel=logging.INFO) + +application_type = "Ansible/1.5.0" + + +class ConsistencyGroup(object): + """Class with consistency group operations""" + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_consistencygroup_parameters()) + + mutually_exclusive = [['cg_name', 'cg_id']] + required_one_of = [['cg_name', 'cg_id']] + required_together = [['volumes', 'vol_state'], ['hosts', 'mapping_state']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_together=required_together + ) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + + def return_cg_instance(self, cg_name): + """Return the consistency group instance. + :param cg_name: The name of the consistency group + :return: Instance of the consistency group + """ + + try: + cg_details = self.unity_conn.get_cg(name=cg_name) + cg_id = cg_details.get_id() + cg_obj = utils.cg.UnityConsistencyGroup.get(self.unity_conn._cli, + cg_id) + return cg_obj + + except Exception as e: + msg = "Failed to get the consistency group {0} instance with " \ + "error {1}".format(cg_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_details(self, cg_id=None, cg_name=None): + """Get consistency group details. + :param cg_id: The id of the consistency group + :param cg_name: The name of the consistency group + :return: Dict containing consistency group details if exists + """ + + id_or_name = cg_id if cg_id else cg_name + errormsg = "Failed to get details of consistency group {0} with" \ + " error {1}" + + try: + cg_details = self.unity_conn.get_cg(_id=cg_id, name=cg_name) + if cg_name is None: + cg_name = cg_details.name + + if cg_details.existed: + cg_obj = self.return_cg_instance(cg_name) + snapshots = cg_obj.snapshots + + snapshot_list = [snap._get_properties() for snap in snapshots] + + cg_ret_details = cg_details._get_properties() + + # Append details of host mapped to the consistency group + # in return response + if cg_ret_details['block_host_access']: + for i in range(len(cg_details.block_host_access)): + cg_ret_details['block_host_access']['UnityBlockHostAccessList'][i]['UnityBlockHostAccess'][ + 'id'] = cg_details.block_host_access[i].host.id + cg_ret_details['block_host_access']['UnityBlockHostAccessList'][i]['UnityBlockHostAccess'][ + 'name'] = cg_details.block_host_access[i].host.name + cg_ret_details['snapshots'] = snapshot_list + + # Add volume name to the dict + if cg_ret_details['luns'] is not None: + for i in range(len(cg_details.luns)): + cg_ret_details['luns']['UnityLunList'][i]['UnityLun'][ + 'name'] = cg_details.luns[i].name + + # Add snapshot schedule name to the dict + if cg_ret_details['snap_schedule'] is not None: + cg_ret_details['snap_schedule']['UnitySnapSchedule'][ + 'name'] = cg_details.snap_schedule.name + + # Status of cg replication + cg_ret_details['cg_replication_enabled'] = True if cg_details.check_cg_is_replicated() else False + + return cg_ret_details + else: + LOG.info("Failed to get details of consistency group %s", + id_or_name) + return None + + except utils.HttpError as e: + if e.http_status == 401: + auth_err = "Incorrect username or password, {0}".format( + e.message) + msg = errormsg.format(id_or_name, auth_err) + LOG.error(msg) + self.module.fail_json(msg=msg) + else: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except utils.UnityResourceNotFoundError as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + return None + + except Exception as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_host_id_by_name(self, host_name): + """ Get host ID by host name + :param host_name: str + :return: unity host ID + :rtype: str + """ + try: + host_obj = self.unity_conn.get_host(name=host_name) + if host_obj and host_obj.existed: + return host_obj.id + else: + msg = "Host name: %s does not exists" % host_name + LOG.error(msg) + self.module.fail_json(msg=msg) + except Exception as e: + msg = "Failed to get host ID by name: %s error: %s" % ( + host_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_volume_details(self, vol_name=None, vol_id=None): + """Get the details of a volume. + :param vol_name: The name of the volume + :param vol_id: The id of the volume + :return: Dict containing volume details if exists + """ + + id_or_name = vol_id if vol_id else vol_name + + try: + lun = self.unity_conn.get_lun(name=vol_name, _id=vol_id) + + cg = None + if lun.existed: + lunid = lun.get_id() + unitylun = utils.UnityLun.get(self.unity_conn._cli, lunid) + if unitylun.cg is not None: + cg = unitylun.cg + else: + errormsg = "The volume {0} not found.".format(id_or_name) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + cg_details = self.get_details( + cg_id=self.module.params['cg_id'], + cg_name=self.module.params['cg_name']) + + # Check if volume is already part of another consistency group + if cg is None: + return lun._get_properties()['id'] + + errormsg = "The volume {0} is already part of consistency group" \ + " {1}".format(id_or_name, cg.name) + + if cg_details is None: + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + if cg.id != cg_details['id']: + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + return lun._get_properties()['id'] + + except Exception as e: + msg = "Failed to get the volume {0} with error {1}".format( + id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def remove_volumes_from_cg(self, cg_name, volumes): + """Remove volumes from consistency group. + :param cg_name: The name of the consistency group + :param volumes: The list of volumes to be removed + :return: Boolean value to indicate if volumes are removed from + consistency group + """ + + cg_details = self.unity_conn.get_cg(name=cg_name)._get_properties() + existing_volumes_in_cg = cg_details['luns'] + existing_vol_ids = [] + + if existing_volumes_in_cg: + existing_vol_ids = [vol['UnityLun']['id'] for vol in + existing_volumes_in_cg['UnityLunList']] + + ids_to_remove = [] + vol_name_list = [] + vol_id_list = [] + + for vol in volumes: + if 'vol_id' in vol and not (vol['vol_id'] in vol_id_list): + vol_id_list.append(vol['vol_id']) + elif 'vol_name' in vol and not (vol['vol_name'] in vol_name_list): + vol_name_list.append(vol['vol_name']) + + """remove volume by name""" + for vol in vol_name_list: + ids_to_remove.append(self.get_volume_details(vol_name=vol)) + + vol_id_list = list(set(vol_id_list + ids_to_remove)) + ids_to_remove = list(set(existing_vol_ids).intersection(set(vol_id_list))) + + LOG.info("Volume IDs to remove %s", ids_to_remove) + + if len(ids_to_remove) == 0: + return False + + vol_remove_list = [] + for vol in ids_to_remove: + vol_dict = {"id": vol} + vol_remove_list.append(vol_dict) + + cg_obj = self.return_cg_instance(cg_name) + + try: + cg_obj.modify(lun_remove=vol_remove_list) + return True + except Exception as e: + errormsg = "Remove existing volumes from consistency group {0} " \ + "failed with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def add_volumes_to_cg(self, cg_name, volumes, tiering_policy): + """Add volumes to consistency group. + :param cg_name: The name of the consistency group + :param volumes: The list of volumes to be added to consistency + group + :param tiering_policy: The tiering policy that is to be applied to + consistency group + :return: The boolean value to indicate if volumes are added to + consistency group + """ + + cg_details = self.unity_conn.get_cg(name=cg_name)._get_properties() + existing_volumes_in_cg = cg_details['luns'] + existing_vol_ids = [] + + if existing_volumes_in_cg: + existing_vol_ids = [vol['UnityLun']['id'] for vol in + existing_volumes_in_cg['UnityLunList']] + + ids_to_add = [] + vol_name_list = [] + vol_id_list = [] + all_vol_ids = [] + + for vol in volumes: + if 'vol_id' in vol and not (vol['vol_id'] in vol_id_list): + vol_id_list.append(vol['vol_id']) + elif 'vol_name' in vol and not (vol['vol_name'] in vol_name_list): + vol_name_list.append(vol['vol_name']) + + """add volume by name""" + for vol in vol_name_list: + ids_to_add.append(self.get_volume_details(vol_name=vol)) + + """add volume by id""" + for vol in vol_id_list: + """verifying if volume id exists in array""" + ids_to_add.append(self.get_volume_details(vol_id=vol)) + + all_vol_ids = ids_to_add + existing_vol_ids + ids_to_add = list(set(all_vol_ids) - set(existing_vol_ids)) + + LOG.info("Volume IDs to add %s", ids_to_add) + + if len(ids_to_add) == 0: + return False + + vol_add_list = [] + for vol in ids_to_add: + vol_dict = {"id": vol} + vol_add_list.append(vol_dict) + + cg_obj = self.return_cg_instance(cg_name) + + policy_enum = None + if tiering_policy: + if utils.TieringPolicyEnum[tiering_policy]: + policy_enum = utils.TieringPolicyEnum[tiering_policy] + else: + errormsg = "Invalid choice {0} for tiering policy".format( + tiering_policy) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + try: + cg_obj.modify(lun_add=vol_add_list, tiering_policy=policy_enum) + return True + except Exception as e: + errormsg = "Add existing volumes to consistency group {0} " \ + "failed with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def map_hosts_to_cg(self, cg_name, add_hosts): + """Map hosts to consistency group. + :param cg_name: The name of the consistency group + :param add_hosts: List of hosts that are to be mapped to cg + :return: Boolean value to indicate if hosts were mapped to cg + """ + cg_details = self.unity_conn.get_cg(name=cg_name) + existing_volumes_in_cg = cg_details.luns + + existing_hosts_in_cg = cg_details.block_host_access + existing_host_ids = [] + + """Get list of existing hosts in consistency group""" + if existing_hosts_in_cg: + for i in range(len(existing_hosts_in_cg)): + existing_host_ids.append(existing_hosts_in_cg[i].host.id) + + host_id_list = [] + host_name_list = [] + add_hosts_id = [] + host_add_list = [] + all_hosts = [] + + for host in add_hosts: + if 'host_id' in host and not (host['host_id'] in host_id_list): + host_id_list.append(host['host_id']) + elif 'host_name' in host and not (host['host_name'] in host_name_list): + host_name_list.append(host['host_name']) + + """add hosts by name""" + for host_name in host_name_list: + add_hosts_id.append(self.get_host_id_by_name(host_name)) + + all_hosts = host_id_list + existing_host_ids + add_hosts_id + add_hosts_id = list(set(all_hosts) - set(existing_host_ids)) + + if len(add_hosts_id) == 0: + return False + + if existing_volumes_in_cg: + + for host_id in add_hosts_id: + host_dict = {"id": host_id} + host_add_list.append(host_dict) + + LOG.info("List of hosts to be added to consistency group " + "%s ", host_add_list) + cg_obj = self.return_cg_instance(cg_name) + try: + cg_obj.modify(name=cg_name, host_add=host_add_list) + return True + except Exception as e: + errormsg = "Adding host to consistency group {0} " \ + "failed with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def unmap_hosts_to_cg(self, cg_name, remove_hosts): + """Unmap hosts to consistency group. + :param cg_name: The name of the consistency group + :param remove_hosts: List of hosts that are to be unmapped from cg + :return: Boolean value to indicate if hosts were mapped to cg + """ + cg_details = self.unity_conn.get_cg(name=cg_name) + existing_hosts_in_cg = cg_details.block_host_access + existing_host_ids = [] + + """Get host ids existing in consistency group""" + if existing_hosts_in_cg: + for i in range(len(existing_hosts_in_cg)): + existing_host_ids.append(existing_hosts_in_cg[i].host.id) + + host_remove_list = [] + host_id_list = [] + host_name_list = [] + remove_hosts_id = [] + + for host in remove_hosts: + if 'host_id' in host and not (host['host_id'] in host_id_list): + host_id_list.append(host['host_id']) + elif 'host_name' in host and not (host['host_name'] in host_name_list): + host_name_list.append(host['host_name']) + + """remove hosts by name""" + for host in host_name_list: + remove_hosts_id.append(self.get_host_id_by_name(host)) + + host_id_list = list(set(host_id_list + remove_hosts_id)) + remove_hosts_id = list(set(existing_host_ids).intersection(set(host_id_list))) + + if len(remove_hosts_id) == 0: + return False + + for host in remove_hosts_id: + host_dict = {"id": host} + host_remove_list.append(host_dict) + cg_obj = self.return_cg_instance(cg_name) + try: + cg_obj.modify(name=cg_name, host_remove=host_remove_list) + return True + except Exception as e: + errormsg = "Removing host from consistency group {0} " \ + "failed with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def rename_cg(self, cg_name, new_cg_name): + """Rename consistency group. + :param cg_name: The name of the consistency group + :param new_cg_name: The new name of the consistency group + :return: Boolean value to indicate if consistency group renamed + """ + cg_obj = self.return_cg_instance(cg_name) + + try: + cg_obj.modify(name=new_cg_name) + return True + except Exception as e: + errormsg = "Rename operation of consistency group {0} failed " \ + "with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def is_cg_modified(self, cg_details): + """Check if the desired consistency group state is different from + existing consistency group. + :param cg_details: The dict containing consistency group details + :return: Boolean value to indicate if modification is needed + """ + modified = False + + if self.module.params['tiering_policy'] and cg_details['luns'] is \ + None and self.module.params['volumes'] is None: + self.module.fail_json(msg="The system cannot assign a tiering" + " policy to an empty consistency group." + ) + + if self.module.params['hosts'] and cg_details['luns'] is \ + None and self.module.params['volumes'] is None: + self.module.fail_json(msg="The system cannot assign hosts" + " to an empty consistency group.") + + if ((cg_details['description'] is not None and + self.module.params['description'] is not None and + cg_details['description'] != self.module.params['description']) + or (cg_details['description'] is None and + self.module.params['description'] is not None)) or \ + ((cg_details['snap_schedule'] is not None and + self.module.params['snap_schedule'] is not None and + cg_details['snap_schedule']['UnitySnapSchedule']['name'] != + self.module.params['snap_schedule']) or + (cg_details['snap_schedule'] is None and + self.module.params['snap_schedule'])): + modified = True + + if cg_details['relocation_policy']: + tier_policy = cg_details['relocation_policy'].split('.') + if self.module.params['tiering_policy'] is not None and \ + tier_policy[1] != self.module.params['tiering_policy']: + modified = True + + return modified + + def create_cg(self, cg_name, description, snap_schedule): + """Create a consistency group. + :param cg_name: The name of the consistency group + :param description: The description of the consistency group + :param snap_schedule: The name of the snapshot schedule + :return: The boolean value to indicate if consistency group + created and also returns the CG object + """ + + try: + if snap_schedule is not None: + snap_schedule = {"name": snap_schedule} + + cg_obj = utils.cg.UnityConsistencyGroup.create( + self.unity_conn._cli, name=cg_name, description=description, + snap_schedule=snap_schedule) + return True, cg_obj + except Exception as e: + errormsg = "Create operation of consistency group {0} failed" \ + " with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_cg(self, cg_name, description, snap_schedule, tiering_policy): + """Modify consistency group. + :param cg_name: The name of the consistency group + :param description: The description of the consistency group + :param snap_schedule: The name of the snapshot schedule + :param tiering_policy: The tiering policy that is to be applied to + consistency group + :return: The boolean value to indicate if consistency group + modified + """ + cg_obj = self.return_cg_instance(cg_name) + is_snap_schedule_paused = None + + if self.module.params['snap_schedule'] == "": + is_snap_schedule_paused = False + + if snap_schedule is not None: + if snap_schedule == "": + snap_schedule = {"name": None} + else: + snap_schedule = {"name": snap_schedule} + + policy_enum = None + if tiering_policy: + if utils.TieringPolicyEnum[tiering_policy]: + policy_enum = utils.TieringPolicyEnum[tiering_policy] + else: + errormsg = "Invalid choice {0} for tiering policy".format( + tiering_policy) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + try: + cg_obj.modify(description=description, + snap_schedule=snap_schedule, + tiering_policy=policy_enum, + is_snap_schedule_paused=is_snap_schedule_paused) + return True + + except Exception as e: + errormsg = "Modify operation of consistency group {0} failed " \ + "with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_cg(self, cg_name): + """Delete consistency group. + :param cg_name: The name of the consistency group + :return: The boolean value to indicate if consistency group deleted + """ + cg_obj = self.return_cg_instance(cg_name) + + try: + cg_obj.delete() + return True + + except Exception as e: + errormsg = "Delete operation of consistency group {0} failed " \ + "with error {1}".format(cg_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def refine_volumes(self, volumes): + """Refine volumes. + :param volumes: Volumes that is to be added/removed + :return: List of volumes with each volume being identified with either + vol_id or vol_name + """ + for vol in volumes: + if vol['vol_id'] is not None and vol['vol_name'] is None: + del vol['vol_name'] + elif vol['vol_name'] is not None and vol['vol_id'] is None: + del vol['vol_id'] + return volumes + + def refine_hosts(self, hosts): + """Refine hosts. + :param hosts: Hosts that is to be mapped/unmapped + :return: List of hosts with each host being identified with either + host_id or host_name + """ + for host in hosts: + if host['host_id'] is not None and host['host_name'] is None: + del host['host_name'] + elif host['host_name'] is not None and host['host_id'] is None: + del host['host_id'] + return hosts + + def validate_volumes(self, volumes): + """Validate the volumes. + :param volumes: List of volumes + """ + + for vol in volumes: + if ('vol_id' in vol) and ('vol_name' in vol): + errormsg = "Both name and id are found for volume {0}. No" \ + " action would be taken. Please specify either" \ + " name or id.".format(vol) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + elif 'vol_id' in vol and (len(vol['vol_id'].strip()) == 0): + errormsg = "vol_id is blank. Please specify valid vol_id." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + elif 'vol_name' in vol and (len(vol.get('vol_name').strip()) == 0): + errormsg = "vol_name is blank. Please specify valid vol_name." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + elif 'vol_name' in vol: + self.get_volume_details(vol_name=vol['vol_name']) + elif 'vol_id' in vol: + self.get_volume_details(vol_id=vol['vol_id']) + else: + errormsg = "Expected either vol_name or vol_id, found" \ + " neither for volume {0}".format(vol) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_hosts(self, hosts): + """Validate hosts. + :param hosts: List of hosts + """ + + for host in hosts: + if ('host_id' in host) and ('host_name' in host): + errormsg = "Both name and id are found for host {0}. No" \ + " action would be taken. Please specify either" \ + " name or id.".format(host) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + elif 'host_id' in host and (len(host['host_id'].strip()) == 0): + errormsg = "host_id is blank. Please specify valid host_id." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + elif 'host_name' in host and (len(host.get('host_name').strip()) == 0): + errormsg = "host_name is blank. Please specify valid host_name." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + elif 'host_name' in host: + self.get_host_id_by_name(host_name=host['host_name']) + elif 'host_id' in host: + host_obj = self.unity_conn.get_host(_id=host['host_id']) + if host_obj is None or host_obj.existed is False: + msg = "Host id: %s does not exists" % host['host_id'] + LOG.error(msg) + self.module.fail_json(msg=msg) + + else: + errormsg = "Expected either host_name or host_id, found" \ + " neither for host {0}".format(host) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def update_replication_params(self, replication): + ''' Update replication params ''' + + if 'replication_type' in replication and replication['replication_type'] == 'remote': + connection_params = { + 'unispherehost': replication['remote_system']['remote_system_host'], + 'username': replication['remote_system']['remote_system_username'], + 'password': replication['remote_system']['remote_system_password'], + 'validate_certs': replication['remote_system']['remote_system_verifycert'], + 'port': replication['remote_system']['remote_system_port'] + } + remote_system_conn = utils.get_unity_unisphere_connection( + connection_params, application_type) + replication['remote_system_name'] = remote_system_conn.name + if replication['destination_pool_name'] is not None: + pool_object = remote_system_conn.get_pool(name=replication['destination_pool_name']) + replication['destination_pool_id'] = pool_object.id + else: + if replication['destination_pool_name'] is not None: + pool_object = self.unity_conn.get_pool(name=replication['destination_pool_name']) + replication['destination_pool_id'] = pool_object.id + + def get_destination_cg_luns(self, source_lun_list): + ''' Form destination cg lun list ''' + destination_cg_lun_list = [] + if source_lun_list is not None: + for source_lun in source_lun_list: + destination_cg_lun_info = utils.UnityStorageResource() + destination_cg_lun_info.name = "DR_" + source_lun.name + destination_cg_lun_info.is_thin_enabled = source_lun.is_thin_enabled + destination_cg_lun_info.size_total = source_lun.size_total + destination_cg_lun_info.id = source_lun.id + destination_cg_lun_info.is_data_reduction_enabled = source_lun.is_data_reduction_enabled + destination_cg_lun_list.append(destination_cg_lun_info) + return destination_cg_lun_list + + def enable_cg_replication(self, cg_name, replication): + ''' Add replication to the consistency group ''' + try: + # Validate replication params + self.validate_cg_replication_params(replication) + + # Get cg instance + cg_object = self.return_cg_instance(cg_name) + + # Check if replication is enabled for cg + if cg_object.check_cg_is_replicated(): + return False + + # Update replication params + self.update_replication_params(replication) + + # Get destination pool id + replication_args_list = { + 'dst_pool_id': replication['destination_pool_id'] + } + + # Get replication mode + if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous': + replication_args_list['max_time_out_of_sync'] = replication['rpo'] + else: + replication_args_list['max_time_out_of_sync'] = -1 + + # Get remote system + if 'replication_type' in replication and replication['replication_type'] == 'remote': + remote_system_name = replication['remote_system_name'] + remote_system_list = self.unity_conn.get_remote_system() + for remote_system in remote_system_list: + if remote_system.name == remote_system_name: + replication_args_list['remote_system'] = remote_system + break + if 'remote_system' not in replication_args_list.keys(): + errormsg = "Remote system %s is not found" % (remote_system_name) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # Form destination LUNs list + source_lun_list = cg_object.luns + replication_args_list['source_luns'] = self.get_destination_cg_luns(source_lun_list) + + # Form destination cg name + if 'destination_cg_name' in replication and replication['destination_cg_name'] is not None: + replication_args_list['dst_cg_name'] = replication['destination_cg_name'] + else: + replication_args_list['dst_cg_name'] = "DR_" + cg_object.name + + LOG.info(("Enabling replication to the consistency group %s", cg_object.name)) + cg_object.replicate_cg_with_dst_resource_provisioning(**replication_args_list) + return True + except Exception as e: + errormsg = "Enabling replication to the consistency group %s failed " \ + "with error %s" % (cg_object.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def disable_cg_replication(self, cg_name): + ''' Remove replication from the consistency group ''' + try: + # Get cg instance + cg_object = self.return_cg_instance(cg_name) + + # Check if replication is enabled for cg + if not cg_object.check_cg_is_replicated(): + return False + + LOG.info(("Disabling replication from the consistency group %s", cg_object.name)) + curr_cg_repl_session = self.unity_conn.get_replication_session(src_resource_id=cg_object.id) + for repl_session in curr_cg_repl_session: + repl_session.delete() + return True + except Exception as e: + errormsg = "Disabling replication to the consistency group %s failed " \ + "with error %s" % (cg_object.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def perform_module_operation(self): + """ + Perform different actions on consistency group module based on + parameters chosen in playbook + """ + cg_name = self.module.params['cg_name'] + cg_id = self.module.params['cg_id'] + description = self.module.params['description'] + volumes = self.module.params['volumes'] + snap_schedule = self.module.params['snap_schedule'] + new_cg_name = self.module.params['new_cg_name'] + tiering_policy = self.module.params['tiering_policy'] + vol_state = self.module.params['vol_state'] + hosts = self.module.params['hosts'] + mapping_state = self.module.params['mapping_state'] + replication = self.module.params['replication_params'] + replication_state = self.module.params['replication_state'] + state = self.module.params['state'] + + # result is a dictionary that contains changed status and consistency + # group details + result = dict( + changed=False, + create_cg='', + modify_cg='', + rename_cg='', + add_vols_to_cg='', + remove_vols_from_cg='', + delete_cg='', + add_hosts_to_cg='', + remove_hosts_from_cg='', + consistency_group_details={} + ) + cg_details = self.get_details(cg_id=cg_id, cg_name=cg_name) + + if cg_name is None and cg_details: + cg_id = None + cg_name = cg_details['name'] + if volumes: + volumes = self.refine_volumes(volumes) + self.validate_volumes(volumes) + if hosts: + hosts = self.refine_hosts(hosts) + self.validate_hosts(hosts) + + modified = False + + if cg_details: + modified = self.is_cg_modified(cg_details) + + if vol_state and not volumes: + self.module.fail_json(msg="Please specify volumes along with vol_state") + + if mapping_state and not hosts: + self.module.fail_json(msg="Please specify hosts along with mapping_state") + + if replication and replication_state is None: + self.module.fail_json(msg="Please specify replication_state along with replication_params") + + if state == 'present' and not cg_details: + if not volumes and tiering_policy: + self.module.fail_json(msg="The system cannot assign a" + " tiering policy to an empty" + " consistency group") + if not volumes and hosts: + self.module.fail_json(msg="The system cannot assign" + " hosts to an empty" + " consistency group") + + if not cg_name: + msg = "The parameter cg_name length is 0. It is too short." \ + " The min length is 1." + self.module.fail_json(msg=msg) + + if new_cg_name: + self.module.fail_json(msg="Invalid argument, new_cg_name is" + " not required") + + result['create_cg'], cg_details = self.create_cg( + cg_name, description, snap_schedule) + elif state == 'absent' and cg_details: + if cg_details['cg_replication_enabled']: + self.module.fail_json(msg="Consistency group cannot be deleted" + " because it is participating" + " in a replication session.") + if cg_details['luns']: + self.module.fail_json(msg="Please remove all volumes which" + " are part of consistency group" + " before deleting it.") + result['delete_cg'] = self.delete_cg(cg_name) + + if state == 'present' and vol_state == 'present-in-group' and \ + cg_details and volumes: + result['add_vols_to_cg'] = self.add_volumes_to_cg(cg_name, + volumes, + tiering_policy) + elif state == 'present' and vol_state == 'absent-in-group' and \ + cg_details and volumes: + result['remove_vols_from_cg'] = self.remove_volumes_from_cg( + cg_name, volumes) + + if hosts and mapping_state == 'mapped' and \ + cg_details: + result['add_hosts_to_cg'] = self.map_hosts_to_cg(cg_name, hosts) + + if hosts and mapping_state == 'unmapped' and \ + cg_details: + result['remove_hosts_from_cg'] = self.unmap_hosts_to_cg(cg_name, hosts) + + if state == 'present' and new_cg_name is not None: + if not new_cg_name: + msg = "The parameter new_cg_name length is 0. It is too" \ + " short. The min length is 1." + self.module.fail_json(msg=msg) + + if cg_name != new_cg_name: + result['rename_cg'] = self.rename_cg(cg_name, new_cg_name) + cg_name = new_cg_name + + if state == 'present' and cg_details and modified: + result['modify_cg'] = self.modify_cg(cg_name, description, + snap_schedule, tiering_policy + ) + + if state == 'present' and cg_details and replication_state is not None: + if replication_state == 'enable': + result['changed'] = self.enable_cg_replication(cg_name, replication) + else: + result['changed'] = self.disable_cg_replication(cg_name) + + if result['create_cg'] or result['modify_cg'] or result[ + 'add_vols_to_cg'] or result['remove_vols_from_cg'] or result[ + 'delete_cg'] or result['rename_cg'] or result[ + 'add_hosts_to_cg'] or result['remove_hosts_from_cg']: + result['changed'] = True + + result['consistency_group_details'] = self.get_details(cg_id=cg_id, + cg_name=cg_name + ) + + self.module.exit_json(**result) + + def validate_cg_replication_params(self, replication): + ''' Validate cg replication params ''' + + # Valdiate replication + if replication is None: + errormsg = "Please specify replication_params to enable replication." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # validate destination pool info + if replication['destination_pool_id'] is not None and replication['destination_pool_name'] is not None: + errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + if replication['destination_pool_id'] is None and replication['destination_pool_name'] is None: + errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # Validate replication mode + if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous': + if replication['rpo'] is None: + errormsg = "rpo is required together with 'asynchronous' replication_mode." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + if replication['rpo'] < 5 or replication['rpo'] > 1440: + errormsg = "rpo value should be in range of 5 to 1440" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # Validate replication type + if replication['replication_type'] == 'remote' and replication['remote_system'] is None: + errormsg = "remote_system is required together with 'remote' replication_type" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # Validate destination cg name + if 'destination_cg_name' in replication and replication['destination_cg_name'] is not None: + dst_cg_name_length = len(replication['destination_cg_name']) + if dst_cg_name_length == 0 or dst_cg_name_length > 95: + errormsg = "destination_cg_name value should be in range of 1 to 95" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + +def get_consistencygroup_parameters(): + """This method provide parameters required for the ansible consistency + group module on Unity""" + return dict( + cg_name=dict(required=False, type='str'), + cg_id=dict(required=False, type='str'), + description=dict(required=False, type='str'), + volumes=dict(required=False, type='list', elements='dict', + options=dict( + vol_name=dict(type='str'), + vol_id=dict(type='str') + ) + ), + snap_schedule=dict(required=False, type='str'), + new_cg_name=dict(required=False, type='str'), + tiering_policy=dict(required=False, type='str', choices=[ + 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']), + vol_state=dict(required=False, type='str', + choices=['present-in-group', 'absent-in-group']), + hosts=dict(required=False, type='list', elements='dict', + options=dict( + host_name=dict(type='str'), + host_id=dict(type='str') + )), + mapping_state=dict(required=False, type='str', + choices=['mapped', 'unmapped']), + replication_params=dict(type='dict', options=dict( + destination_cg_name=dict(type='str'), + replication_mode=dict(type='str', choices=['asynchronous', 'manual'], required=True), + rpo=dict(type='int'), + replication_type=dict(type='str', choices=['local', 'remote'], default='local'), + remote_system=dict(type='dict', + options=dict( + remote_system_host=dict(type='str', required=True, no_log=True), + remote_system_verifycert=dict(type='bool', required=False, + default=True), + remote_system_username=dict(type='str', required=True), + remote_system_password=dict(type='str', required=True, no_log=True), + remote_system_port=dict(type='int', required=False, default=443, no_log=True) + )), + destination_pool_name=dict(type='str'), + destination_pool_id=dict(type='str') + )), + replication_state=dict(type='str', choices=['enable', 'disable']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create Unity consistency group object and perform action on it + based on user input from playbook""" + obj = ConsistencyGroup() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py new file mode 100644 index 00000000..bc44b514 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py @@ -0,0 +1,1889 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing FileSystem on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" + +module: filesystem +version_added: '1.1.0' +short_description: Manage filesystem on Unity storage system +description: +- Managing filesystem on Unity storage system includes + Create new filesystem, + Modify snapschedule attribute of filesystem, + Modify filesystem attributes, + Display filesystem details, + Display filesystem snapshots, + Display filesystem snapschedule, + Delete snapschedule associated with the filesystem, + Delete filesystem, + Create new filesystem with quota configuration, + Enable, modify and disable replication. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Arindam Datta (@dattaarindam) +- Meenakshi Dembi (@dembim) +- Spandita Panigrahi (@panigs7) + +options: + filesystem_name: + description: + - The name of the filesystem. Mandatory only for the create operation. + All the operations are supported through I(filesystem_name). + - It is mutually exclusive with I(filesystem_id). + type: str + filesystem_id: + description: + - The id of the filesystem. + - It can be used only for get, modify, or delete operations. + - It is mutually exclusive with I(filesystem_name). + type: str + pool_name: + description: + - This is the name of the pool where the filesystem will be created. + - Either the I(pool_name) or I(pool_id) must be provided to create a new + filesystem. + type: str + pool_id: + description: + - This is the ID of the pool where the filesystem will be created. + - Either the I(pool_name) or I(pool_id) must be provided to create a new + filesystem. + type: str + size: + description: + - The size of the filesystem. + type: int + cap_unit: + description: + - The unit of the filesystem size. It defaults to C(GB), if not specified. + choices: ['GB' , 'TB'] + type: str + nas_server_name: + description: + - Name of the NAS server on which filesystem will be hosted. + type: str + nas_server_id: + description: + - ID of the NAS server on which filesystem will be hosted. + type: str + supported_protocols: + description: + - Protocols supported by the file system. + - It will be overridden by NAS server configuration if NAS Server is C(Multiprotocol). + type: str + choices: ['NFS', 'CIFS', 'MULTIPROTOCOL'] + description: + description: + - Description about the filesystem. + - Description can be removed by passing empty string (""). + type: str + smb_properties: + description: + - Advance settings for SMB. It contains optional candidate variables. + type: dict + suboptions: + is_smb_sync_writes_enabled: + description: + - Indicates whether the synchronous writes option is enabled on the + file system. + type: bool + is_smb_notify_on_access_enabled: + description: + - Indicates whether notifications of changes to directory file + structure are enabled. + type: bool + is_smb_op_locks_enabled: + description: + - Indicates whether opportunistic file locking is enabled on the file + system. + type: bool + is_smb_notify_on_write_enabled: + description: + - Indicates whether file write notifications are enabled on the file + system. + type: bool + smb_notify_on_change_dir_depth: + description: + - Integer variable, determines the lowest directory level to which + the enabled notifications apply. + - Minimum value is C(1). + type: int + data_reduction: + description: + - Boolean variable, specifies whether or not to enable compression. + Compression is supported only for thin filesystem. + type: bool + is_thin: + description: + - Boolean variable, specifies whether or not it is a thin filesystem. + type: bool + access_policy: + description: + - Access policy of a filesystem. + choices: ['NATIVE', 'UNIX', 'WINDOWS'] + type: str + locking_policy: + description: + - File system locking policies. These policy choices control whether the + NFSv4 range locks must be honored. + type: str + choices: ['ADVISORY', 'MANDATORY'] + tiering_policy: + description: + - Tiering policy choices for how the storage resource data will be + distributed among the tiers available in the pool. + choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST'] + type: str + quota_config: + description: + - Configuration for quota management. It contains optional parameters. + type: dict + suboptions: + grace_period: + description: + - Grace period set in quota configuration after soft limit is reached. + - If I(grace_period) is not set during creation of filesystem, + it will be set to C(7 days) by default. + type: int + grace_period_unit: + description: + - Unit of grace period. + - Default unit is C(days). + type: str + choices: ['minutes', 'hours', 'days'] + default_hard_limit: + description: + - Default hard limit for user quotas and tree quotas. + - If I(default_hard_limit) is not set while creation of filesystem, + it will be set to C(0B) by default. + type: int + default_soft_limit: + description: + - Default soft limit for user quotas and tree quotas. + - If I(default_soft_limit) is not set while creation of filesystem, + it will be set to C(0B) by default. + type: int + is_user_quota_enabled: + description: + - Indicates whether the user quota is enabled. + - If I(is_user_quota_enabled) is not set while creation of filesystem, + it will be set to C(false) by default. + - Parameters I(is_user_quota_enabled) and I(quota_policy) are + mutually exclusive. + type: bool + quota_policy: + description: + - Quota policy set in quota configuration. + - If I(quota_policy) is not set while creation of filesystem, it will + be set to C(FILE_SIZE) by default. + - Parameters I(is_user_quota_enabled) and I(quota_policy) are + mutually exclusive. + choices: ['FILE_SIZE','BLOCKS'] + type: str + cap_unit: + description: + - Unit of I(default_soft_limit) and I(default_hard_limit) size. + - Default unit is C(GB). + choices: ['MB', 'GB', 'TB'] + type: str + state: + description: + - State variable to determine whether filesystem will exist or not. + choices: ['absent', 'present'] + required: true + type: str + snap_schedule_name: + description: + - This is the name of an existing snapshot schedule which is to be associated with the filesystem. + - This is mutually exclusive with I(snapshot_schedule_id). + type: str + snap_schedule_id: + description: + - This is the id of an existing snapshot schedule which is to be associated with the filesystem. + - This is mutually exclusive with I(snapshot_schedule_name). + type: str + replication_params: + description: + - Settings required for enabling or modifying replication. + type: dict + suboptions: + replication_name: + description: + - Name of the replication session. + type: str + new_replication_name: + description: + - Replication name to rename the session to. + type: str + replication_mode: + description: + - The replication mode. + - This is a mandatory field while creating a replication session. + type: str + choices: ['asynchronous', 'manual'] + rpo: + description: + - Maximum time to wait before the system syncs the source and destination LUNs. + - The I(rpo) option should be specified if the I(replication_mode) is C(asynchronous). + - The value should be in range of C(5) to C(1440). + type: int + replication_type: + description: + - Type of replication. + choices: ['local', 'remote'] + type: str + remote_system: + description: + - Details of remote system to which the replication is being configured. + - The I(remote_system) option should be specified if the I(replication_type) is C(remote). + type: dict + suboptions: + remote_system_host: + required: true + description: + - IP or FQDN for remote Unity unisphere Host. + type: str + remote_system_username: + type: str + required: true + description: + - User name of remote Unity unisphere Host. + remote_system_password: + type: str + required: true + description: + - Password of remote Unity unisphere Host. + remote_system_verifycert: + type: bool + default: true + description: + - Boolean variable to specify whether or not to validate SSL + certificate of remote Unity unisphere Host. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be + verified. + remote_system_port: + description: + - Port at which remote Unity unisphere is hosted. + type: int + default: 443 + destination_pool_id: + type: str + description: + - ID of pool to allocate destination filesystem. + destination_pool_name: + type: str + description: + - Name of pool to allocate destination filesystem. + replication_state: + description: + - State of the replication. + choices: ['enable', 'disable'] + type: str + +notes: +- SMB shares, NFS exports, and snapshots associated with filesystem need + to be deleted prior to deleting a filesystem. +- The I(quota_config) parameter can be used to update default hard limit + and soft limit values to limit the maximum space that can be used. + By default they both are set to 0 during filesystem + creation which means unlimited. +- The I(check_mode) is not supported. +""" + +EXAMPLES = r""" +- name: Create FileSystem + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + pool_name: "pool_1" + size: 5 + state: "present" + +- name: Create FileSystem with quota configuration + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + pool_name: "pool_1" + size: 5 + quota_config: + grace_period: 8 + grace_period_unit: "days" + default_soft_limit: 10 + is_user_quota_enabled: False + state: "present" + +- name: Expand FileSystem size + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + size: 10 + state: "present" + +- name: Expand FileSystem size + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + size: 10 + state: "present" + +- name: Modify FileSystem smb_properties + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "ansible_test_fs" + nas_server_name: "lglap761" + smb_properties: + is_smb_op_locks_enabled: True + smb_notify_on_change_dir_depth: 5 + is_smb_notify_on_access_enabled: True + state: "present" + +- name: Modify FileSystem Snap Schedule + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_141" + snap_schedule_id: "{{snap_schedule_id}}" + state: "{{state_present}}" + +- name: Get details of FileSystem using id + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + state: "present" + +- name: Delete a FileSystem using id + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + state: "absent" + +- name: Enable replication on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_params: + replication_name: "test_repl" + replication_type: "remote" + replication_mode: "asynchronous" + rpo: 60 + remote_system: + remote_system_host: '0.1.2.3' + remote_system_verifycert: False + remote_system_username: 'username' + remote_system_password: 'password' + destination_pool_name: "pool_test_1" + replication_state: "enable" + state: "present" + +- name: Modify replication on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_params: + replication_name: "test_repl" + new_replication_name: "test_repl_updated" + replication_mode: "asynchronous" + rpo: 50 + replication_state: "enable" + state: "present" + +- name: Disable replication on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_state: "disable" + state: "present" + +- name: Disable replication by specifying replication_name on the fs + dellemc.unity.filesystem: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "rs_405" + replication_params: + replication_name: "test_replication" + replication_state: "disable" + state: "present" +""" + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: true + +filesystem_details: + description: Details of the filesystem. + returned: When filesystem exists + type: dict + contains: + id: + description: The system generated ID given to the filesystem. + type: str + name: + description: Name of the filesystem. + type: str + description: + description: Description about the filesystem. + type: str + is_data_reduction_enabled: + description: Whether or not compression enabled on this + filesystem. + type: bool + size_total_with_unit: + description: Size of the filesystem with actual unit. + type: str + tiering_policy: + description: Tiering policy applied to this filesystem. + type: str + is_cifs_notify_on_access_enabled: + description: Indicates whether the system generates a + notification when a user accesses the file system. + type: bool + is_cifs_notify_on_write_enabled: + description: Indicates whether the system generates a notification + when the file system is written to. + type: bool + is_cifs_op_locks_enabled: + description: Indicates whether opportunistic file locks are enabled + for the file system. + type: bool + is_cifs_sync_writes_enabled: + description: Indicates whether the CIFS synchronous writes option + is enabled for the file system. + type: bool + cifs_notify_on_change_dir_depth: + description: Indicates the lowest directory level to which the + enabled notifications apply, if any. + type: int + pool: + description: The pool in which this filesystem is allocated. + type: dict + contains: + id: + description: The system ID given to the pool. + type: str + name: + description: The name of the storage pool. + type: str + nas_server: + description: The NAS Server details on which this filesystem is hosted. + type: dict + contains: + id: + description: The system ID given to the NAS Server. + type: str + name: + description: The name of the NAS Server. + type: str + snapshots: + description: The list of snapshots of this filesystem. + type: list + contains: + id: + description: The system ID given to the filesystem + snapshot. + type: str + name: + description: The name of the filesystem snapshot. + type: str + is_thin_enabled: + description: Indicates whether thin provisioning is enabled for + this filesystem. + type: bool + snap_schedule_id: + description: Indicates the id of the snap schedule associated + with the filesystem. + type: str + snap_schedule_name: + description: Indicates the name of the snap schedule associated + with the filesystem. + type: str + quota_config: + description: Details of quota configuration of the filesystem + created. + type: dict + contains: + grace_period: + description: Grace period set in quota configuration + after soft limit is reached. + type: str + default_hard_limit: + description: Default hard limit for user quotas + and tree quotas. + type: int + default_soft_limit: + description: Default soft limit for user quotas + and tree quotas. + type: int + is_user_quota_enabled: + description: Indicates whether the user quota is enabled. + type: bool + quota_policy: + description: Quota policy set in quota configuration. + type: str + replication_sessions: + description: List of replication sessions if replication is enabled. + type: dict + contains: + id: + description: ID of replication session + type: str + name: + description: Name of replication session + type: str + remote_system: + description: Remote system + type: dict + contains: + id: + description: ID of remote system + type: str + sample: { + "access_policy": "AccessPolicyEnum.UNIX", + "cifs_notify_on_change_dir_depth": 512, + "cifs_share": null, + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "description": "", + "existed": true, + "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN", + "format": "FSFormatEnum.UFS64", + "hash": 8735427610152, + "health": { + "UnityHealth": { + "hash": 8735427614928 + } + }, + "host_io_size": "HostIOSizeEnum.GENERAL_8K", + "id": "fs_65916", + "is_advanced_dedup_enabled": false, + "is_cifs_notify_on_access_enabled": false, + "is_cifs_notify_on_write_enabled": false, + "is_cifs_op_locks_enabled": false, + "is_cifs_sync_writes_enabled": false, + "is_data_reduction_enabled": false, + "is_read_only": false, + "is_smbca": false, + "is_thin_enabled": true, + "locking_policy": "FSLockingPolicyEnum.MANDATORY", + "metadata_size": 11274289152, + "metadata_size_allocated": 4294967296, + "min_size_allocated": 0, + "name": "test_fs", + "nas_server": { + "id": "nas_18", + "name": "test_nas1" + }, + "nfs_share": null, + "per_tier_size_used": [ + 6979321856, + 0, + 0 + ], + "pool": { + "id": "pool_7", + "name": "pool 7" + }, + "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES", + "quota_config": { + "default_hard_limit": "0B", + "default_soft_limit": "0B", + "grace_period": "7.0 days", + "id": "quotaconfig_171798760421_0", + "is_user_quota_enabled": false, + "quota_policy": "QuotaPolicyEnum.FILE_SIZE" + }, + "replication_sessions": { + "current_transfer_est_remain_time": 0, + "id": "***", + "last_sync_time": "2022-05-12 11:20:38+00:00", + "local_role": "ReplicationSessionReplicationRoleEnum.SOURCE", + "max_time_out_of_sync": 60, + "members": null, + "name": "local_repl_new", + "network_status": "ReplicationSessionNetworkStatusEnum.OK", + "remote_system": { + "UnityRemoteSystem": { + "hash": 8735426929707 + } + }, + "replication_resource_type": "ReplicationEndpointResourceTypeEnum.FILESYSTEM", + "src_resource_id": "res_66444", + "src_status": "ReplicationSessionStatusEnum.OK", + "status": "ReplicationOpStatusEnum.AUTO_SYNC_CONFIGURED", + "sync_progress": 0, + "sync_state": "ReplicationSessionSyncStateEnum.IDLE" + }, + "size_allocated": 283148288, + "size_allocated_total": 4578148352, + "size_preallocated": 2401173504, + "size_total": 10737418240, + "size_total_with_unit": "10.0 GB", + "size_used": 1620312064, + "snap_count": 2, + "snaps_size": 21474869248, + "snaps_size_allocated": 32768, + "snapshots": [], + "supported_protocols": "FSSupportedProtocolEnum.NFS", + "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH", + "type": "FilesystemTypeEnum.FILESYSTEM" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('filesystem') + +application_type = "Ansible/1.5.0" + + +class Filesystem(object): + """Class with FileSystem operations""" + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_filesystem_parameters()) + + mutually_exclusive = [['filesystem_name', 'filesystem_id'], + ['pool_name', 'pool_id'], + ['nas_server_name', 'nas_server_id'], + ['snap_schedule_name', 'snap_schedule_id']] + + required_one_of = [['filesystem_name', 'filesystem_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + + def get_filesystem(self, name=None, id=None, obj_nas_server=None): + """Get the details of a FileSystem. + :param filesystem_name: The name of the filesystem + :param filesystem_id: The id of the filesystem + :param obj_nas_server: NAS Server object instance + :return: instance of the respective filesystem if exist. + """ + + id_or_name = id if id else name + errormsg = "Failed to get the filesystem {0} with error {1}" + + try: + obj_fs = None + if id: + if obj_nas_server: + obj_fs = self.unity_conn.get_filesystem( + _id=id, + nas_server=obj_nas_server) + else: + obj_fs = self.unity_conn.get_filesystem(_id=id) + + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem " + "object %s ", obj_fs) + return obj_fs + elif name: + if not obj_nas_server: + err_msg = "NAS Server is required to get the FileSystem" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + obj_fs = self.unity_conn.get_filesystem( + name=name, + nas_server=obj_nas_server) + if obj_fs: + LOG.info( + "Successfully got the filesystem object %s ", obj_fs) + return obj_fs + else: + LOG.info("Failed to get the filesystem %s", id_or_name) + return None + + except utils.HttpError as e: + if e.http_status == 401: + cred_err = "Incorrect username or password , {0}".format( + e.message) + msg = errormsg.format(id_or_name, cred_err) + self.module.fail_json(msg=msg) + else: + msg = errormsg.format(id_or_name, str(e)) + self.module.fail_json(msg=msg) + + except utils.UnityResourceNotFoundError as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + return None + + except Exception as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_nas_server(self, name=None, id=None): + """Get the instance of a NAS Server. + :param name: The NAS Server name + :param id: The NAS Server id + :return: instance of the respective NAS Server if exists. + """ + + errormsg = "Failed to get the NAS Server {0} with error {1}" + id_or_name = name if name else id + + try: + obj_nas = self.unity_conn.get_nas_server(_id=id, name=name) + if id and obj_nas.existed: + LOG.info("Successfully got the nas server object %s", + obj_nas) + return obj_nas + elif name: + LOG.info("Successfully got the nas server object %s ", + obj_nas) + return obj_nas + else: + msg = "Failed to get the nas server with {0}".format( + id_or_name) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except Exception as e: + msg = errormsg.format(name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_pool(self, pool_name=None, pool_id=None): + """Get the instance of a pool. + :param pool_name: The name of the pool + :param pool_id: The id of the pool + :return: Dict containing pool details if exists + """ + + id_or_name = pool_id if pool_id else pool_name + errormsg = "Failed to get the pool {0} with error {1}" + + try: + obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id) + + if pool_id and obj_pool.existed: + LOG.info("Successfully got the pool object %s", + obj_pool) + return obj_pool + if pool_name: + LOG.info("Successfully got pool %s", obj_pool) + return obj_pool + else: + msg = "Failed to get the pool with {0}".format( + id_or_name) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except Exception as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_tiering_policy_enum(self, tiering_policy): + """Get the tiering_policy enum. + :param tiering_policy: The tiering_policy string + :return: tiering_policy enum + """ + + if tiering_policy in utils.TieringPolicyEnum.__members__: + return utils.TieringPolicyEnum[tiering_policy] + else: + errormsg = "Invalid choice {0} for tiering policy".format( + tiering_policy) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_supported_protocol_enum(self, supported_protocol): + """Get the supported_protocol enum. + :param supported_protocol: The supported_protocol string + :return: supported_protocol enum + """ + + supported_protocol = "MULTI_PROTOCOL" if \ + supported_protocol == "MULTIPROTOCOL" else supported_protocol + if supported_protocol in utils.FSSupportedProtocolEnum.__members__: + return utils.FSSupportedProtocolEnum[supported_protocol] + else: + errormsg = "Invalid choice {0} for supported_protocol".format( + supported_protocol) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_locking_policy_enum(self, locking_policy): + """Get the locking_policy enum. + :param locking_policy: The locking_policy string + :return: locking_policy enum + """ + if locking_policy in utils.FSLockingPolicyEnum.__members__: + return utils.FSLockingPolicyEnum[locking_policy] + else: + errormsg = "Invalid choice {0} for locking_policy".format( + locking_policy) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_access_policy_enum(self, access_policy): + """Get the access_policy enum. + :param access_policy: The access_policy string + :return: access_policy enum + """ + if access_policy in utils.AccessPolicyEnum.__members__: + return utils.AccessPolicyEnum[access_policy] + else: + errormsg = "Invalid choice {0} for access_policy".format( + access_policy) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_filesystem(self, name, obj_pool, obj_nas_server, size): + """Create a FileSystem. + :param name: Name of the FileSystem + :param obj_pool: Storage Pool obj instance + :param obj_nas_server: NAS Server obj instance + :param size: Total size of a filesystem in bytes + :return: FileSystem object on successful creation + """ + try: + + supported_protocol = self.module.params['supported_protocols'] + supported_protocol = self.get_supported_protocol_enum( + supported_protocol) if supported_protocol else None + is_thin = self.module.params['is_thin'] + + tiering_policy = self.module.params['tiering_policy'] + tiering_policy = self.get_tiering_policy_enum(tiering_policy) \ + if tiering_policy else None + + obj_fs = utils.UnityFileSystem.create( + self.unity_conn._cli, + pool=obj_pool, + nas_server=obj_nas_server, + name=name, + size=size, + proto=supported_protocol, + is_thin=is_thin, + tiering_policy=tiering_policy) + + LOG.info("Successfully created file system , %s", obj_fs) + return obj_fs + + except Exception as e: + errormsg = "Create filesystem {0} operation failed" \ + " with error {1}".format(name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_filesystem(self, id): + """Delete a FileSystem. + :param id: The object instance of the filesystem to be deleted + """ + + try: + obj_fs = self.get_filesystem(id=id) + obj_fs_dict = obj_fs._get_properties() + if obj_fs_dict['cifs_share'] is not None: + errormsg = "The Filesystem has SMB Shares. Hence deleting " \ + "this filesystem is not safe." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + if obj_fs_dict['nfs_share'] is not None: + errormsg = "The FileSystem has NFS Exports. Hence deleting " \ + "this filesystem is not safe." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + obj_fs.delete() + return True + + except Exception as e: + errormsg = "Delete operation of FileSystem id:{0} " \ + "failed with error {1}".format(id, + str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def is_modify_required(self, obj_fs, cap_unit): + """Checks if any modify required for filesystem attributes + :param obj_fs: filesystem instance + :param cap_unit: capacity unit + :return: filesystem to update dict + """ + try: + to_update = {} + obj_fs = obj_fs.update() + description = self.module.params['description'] + + if description is not None and description != obj_fs.description: + to_update.update({'description': description}) + + size = self.module.params['size'] + if size and cap_unit: + size_byte = int(utils.get_size_bytes(size, cap_unit)) + if size_byte < obj_fs.size_total: + self.module.fail_json(msg="Filesystem size can be " + "expanded only") + elif size_byte > obj_fs.size_total: + to_update.update({'size': size_byte}) + + tiering_policy = self.module.params['tiering_policy'] + if tiering_policy and self.get_tiering_policy_enum( + tiering_policy) != obj_fs.tiering_policy: + to_update.update({'tiering_policy': + self.get_tiering_policy_enum( + tiering_policy)}) + + is_thin = self.module.params['is_thin'] + if is_thin is not None and is_thin != obj_fs.is_thin_enabled: + to_update.update({'is_thin': is_thin}) + + data_reduction = self.module.params['data_reduction'] + if data_reduction is not None and \ + data_reduction != obj_fs.is_data_reduction_enabled: + to_update.update({'is_compression': data_reduction}) + + access_policy = self.module.params['access_policy'] + if access_policy and self.get_access_policy_enum( + access_policy) != obj_fs.access_policy: + to_update.update({'access_policy': + self.get_access_policy_enum(access_policy)}) + + locking_policy = self.module.params['locking_policy'] + if locking_policy and self.get_locking_policy_enum( + locking_policy) != obj_fs.locking_policy: + to_update.update({'locking_policy': + self.get_locking_policy_enum( + locking_policy)}) + + snap_sch = obj_fs.storage_resource.snap_schedule + + if self.snap_sch_id is not None: + if self.snap_sch_id == "": + if snap_sch and snap_sch.id != self.snap_sch_id: + to_update.update({'is_snap_schedule_paused': False}) + elif snap_sch is None or snap_sch.id != self.snap_sch_id: + to_update.update({'snap_sch_id': self.snap_sch_id}) + + smb_properties = self.module.params['smb_properties'] + if smb_properties: + sync_writes_enabled = \ + smb_properties['is_smb_sync_writes_enabled'] + oplocks_enabled = \ + smb_properties['is_smb_op_locks_enabled'] + notify_on_write = \ + smb_properties['is_smb_notify_on_write_enabled'] + notify_on_access = \ + smb_properties['is_smb_notify_on_access_enabled'] + notify_on_change_dir_depth = \ + smb_properties['smb_notify_on_change_dir_depth'] + + if sync_writes_enabled is not None and \ + sync_writes_enabled != obj_fs.is_cifs_sync_writes_enabled: + to_update.update( + {'is_cifs_sync_writes_enabled': sync_writes_enabled}) + + if oplocks_enabled is not None and \ + oplocks_enabled != obj_fs.is_cifs_op_locks_enabled: + to_update.update( + {'is_cifs_op_locks_enabled': oplocks_enabled}) + + if notify_on_write is not None and \ + notify_on_write != \ + obj_fs.is_cifs_notify_on_write_enabled: + to_update.update( + {'is_cifs_notify_on_write_enabled': notify_on_write}) + + if notify_on_access is not None and \ + notify_on_access != \ + obj_fs.is_cifs_notify_on_access_enabled: + to_update.update( + {'is_cifs_notify_on_access_enabled': + notify_on_access}) + + if notify_on_change_dir_depth is not None and \ + notify_on_change_dir_depth != \ + obj_fs.cifs_notify_on_change_dir_depth: + to_update.update( + {'cifs_notify_on_change_dir_depth': + notify_on_change_dir_depth}) + if len(to_update) > 0: + return to_update + else: + return None + + except Exception as e: + errormsg = "Failed to determine if FileSystem id: {0}" \ + " modification required with error {1}".format(obj_fs.id, + str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_filesystem(self, update_dict, obj_fs): + """ modifes attributes for a filesystem instance + :param update_dict: modify dict + :return: True on Success + """ + try: + adv_smb_params = [ + 'is_cifs_sync_writes_enabled', + 'is_cifs_op_locks_enabled', + 'is_cifs_notify_on_write_enabled', + 'is_cifs_notify_on_access_enabled', + 'cifs_notify_on_change_dir_depth'] + + cifs_fs_payload = {} + fs_update_payload = {} + + for smb_param in adv_smb_params: + if smb_param in update_dict.keys(): + cifs_fs_payload.update({smb_param: update_dict[smb_param]}) + + LOG.debug("CIFS Modify Payload: %s", cifs_fs_payload) + + cifs_fs_parameters = obj_fs.prepare_cifs_fs_parameters( + **cifs_fs_payload) + + fs_update_params = [ + 'size', + 'is_thin', + 'tiering_policy', + 'is_compression', + 'access_policy', + 'locking_policy', + 'description', + 'cifs_fs_parameters'] + + for fs_param in fs_update_params: + if fs_param in update_dict.keys(): + fs_update_payload.update({fs_param: update_dict[fs_param]}) + + if cifs_fs_parameters: + fs_update_payload.update( + {'cifs_fs_parameters': cifs_fs_parameters}) + + if "snap_sch_id" in update_dict.keys(): + fs_update_payload.update( + {'snap_schedule_parameters': {'snapSchedule': + {'id': update_dict.get('snap_sch_id')} + }} + ) + elif "is_snap_schedule_paused" in update_dict.keys(): + fs_update_payload.update( + {'snap_schedule_parameters': {'isSnapSchedulePaused': False} + }) + + obj_fs = obj_fs.update() + resp = obj_fs.modify(**fs_update_payload) + LOG.info("Successfully modified the FS with response %s", resp) + changed = True if resp else False + + except Exception as e: + errormsg = "Failed to modify FileSystem instance id: {0}" \ + " with error {1}".format(obj_fs.id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_filesystem_display_attributes(self, obj_fs): + """get display filesystem attributes + :param obj_fs: filesystem instance + :return: filesystem dict to display + """ + try: + obj_fs = obj_fs.update() + filesystem_details = obj_fs._get_properties() + filesystem_details['size_total_with_unit'] = utils. \ + convert_size_with_unit(int(filesystem_details['size_total'])) + if obj_fs.pool: + filesystem_details.update( + {'pool': {'name': obj_fs.pool.name, + 'id': obj_fs.pool.id}}) + if obj_fs.nas_server: + filesystem_details.update( + {'nas_server': {'name': obj_fs.nas_server.name, + 'id': obj_fs.nas_server.id}}) + snap_list = [] + if obj_fs.has_snap(): + for snap in obj_fs.snapshots: + d = {'name': snap.name, 'id': snap.id} + snap_list.append(d) + filesystem_details['snapshots'] = snap_list + + if obj_fs.storage_resource.snap_schedule: + filesystem_details['snap_schedule_id'] = obj_fs.storage_resource.snap_schedule.id + filesystem_details['snap_schedule_name'] = obj_fs.storage_resource.snap_schedule.name + + quota_config_obj = self.get_quota_config_details(obj_fs) + + if quota_config_obj: + + hard_limit = utils.convert_size_with_unit( + quota_config_obj.default_hard_limit) + soft_limit = utils.convert_size_with_unit( + quota_config_obj.default_soft_limit) + grace_period = get_time_with_unit( + quota_config_obj.grace_period) + + filesystem_details.update({'quota_config': + {'id': quota_config_obj.id, + 'default_hard_limit': hard_limit, + 'default_soft_limit': soft_limit, + 'is_user_quota_enabled': + quota_config_obj.is_user_quota_enabled, + 'quota_policy': quota_config_obj._get_properties()[ + 'quota_policy'], + 'grace_period': grace_period} + }) + filesystem_details['replication_sessions'] = [] + fs_repl_sessions = self.get_replication_session(obj_fs) + if fs_repl_sessions: + filesystem_details['replication_sessions'] = \ + fs_repl_sessions._get_properties() + return filesystem_details + + except Exception as e: + errormsg = "Failed to display the filesystem {0} with " \ + "error {1}".format(obj_fs.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_input_string(self): + """ validates the input string checks if it is empty string """ + invalid_string = "" + try: + for key in self.module.params: + val = self.module.params[key] + if key == "description" or key == "snap_schedule_name" \ + or key == "snap_schedule_id": + continue + if isinstance(val, str) \ + and val == invalid_string: + errmsg = 'Invalid input parameter "" for {0}'.format( + key) + self.module.fail_json(msg=errmsg) + if self.module.params['replication_params'] and self.module.params['replication_state'] is None: + self.module.fail_json(msg="Please specify replication_state along with replication_params") + except Exception as e: + errormsg = "Failed to validate the module param with " \ + "error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def resolve_to_snapschedule_id(self, params): + """ Get snapshot id for a give snap schedule name + :param params: snap schedule name or id + :return: snap schedule id after validation + """ + + try: + snap_sch_id = None + snapshot_schedule = {} + if params["name"]: + snapshot_schedule = utils.UnitySnapScheduleList.get(self.unity_conn._cli, name=params["name"]) + elif params["id"]: + snapshot_schedule = utils.UnitySnapScheduleList.get(self.unity_conn._cli, id=params["id"]) + + if snapshot_schedule: + snap_sch_id = snapshot_schedule.id[0] + + if not snap_sch_id: + errormsg = "Failed to find the snapshot schedule id against given name " \ + "or id: {0}".format(params["name"]), (params["id"]) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + return snap_sch_id + + except Exception as e: + errormsg = "Failed to find the snapshot schedules with " \ + "error {0}".format(str(e)) + + def get_quota_config_details(self, obj_fs): + """ + Get the quota config ID mapped to the filesystem + :param obj_fs: Filesystem instance + :return: Quota config object if exists else None + """ + try: + all_quota_config = self.unity_conn.get_quota_config(filesystem=obj_fs) + fs_id = obj_fs.id + + if len(all_quota_config) == 0: + LOG.error("The quota_config object for new filesystem " + "is not updated yet.") + return None + + for quota_config in range(len(all_quota_config)): + if fs_id and all_quota_config[quota_config].filesystem.id == fs_id and \ + not all_quota_config[quota_config].tree_quota: + msg = "Quota config id for filesystem %s is %s" \ + % (fs_id, all_quota_config[quota_config].id) + LOG.info(msg) + return all_quota_config[quota_config] + + except Exception as e: + errormsg = "Failed to fetch quota config for filesystem {0} " \ + " with error {1}".format(fs_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_quota_config(self, quota_config_obj, quota_config_params): + """ + Modify default quota config settings of newly created filesystem. + The default setting of quota config after filesystem creation is: + default_soft_limit and default_hard_limit are 0, + is_user_quota_enabled is false, + grace_period is 7 days and, + quota_policy is FILE_SIZE. + :param quota_config_obj: Quota config instance + :param quota_config_params: Quota config parameters to be modified + :return: Boolean whether quota config is modified + """ + + if quota_config_params: + soft_limit = quota_config_params['default_soft_limit'] + hard_limit = quota_config_params['default_hard_limit'] + is_user_quota_enabled = quota_config_params['is_user_quota_enabled'] + quota_policy = quota_config_params['quota_policy'] + grace_period = quota_config_params['grace_period'] + cap_unit = quota_config_params['cap_unit'] + gp_unit = quota_config_params['grace_period_unit'] + + if soft_limit: + soft_limit_in_bytes = utils.get_size_bytes(soft_limit, cap_unit) + else: + soft_limit_in_bytes = quota_config_obj.default_soft_limit + + if hard_limit: + hard_limit_in_bytes = utils.get_size_bytes(hard_limit, cap_unit) + else: + hard_limit_in_bytes = quota_config_obj.default_hard_limit + + if grace_period: + grace_period_in_sec = get_time_in_seconds(grace_period, gp_unit) + else: + grace_period_in_sec = quota_config_obj.grace_period + + policy_enum = None + policy_enum_val = None + if quota_policy: + if utils.QuotaPolicyEnum[quota_policy]: + policy_enum = utils.QuotaPolicyEnum[quota_policy] + policy_enum_val = \ + utils.QuotaPolicyEnum[quota_policy]._get_properties()['value'] + else: + errormsg = "Invalid choice {0} for quota policy".format( + quota_policy) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # Verify if modify is required. If not required, return False + if quota_config_obj.default_hard_limit == hard_limit_in_bytes and \ + quota_config_obj.default_soft_limit == soft_limit_in_bytes and \ + quota_config_obj.grace_period == grace_period_in_sec and \ + ((quota_policy is not None and + quota_config_obj.quota_policy == policy_enum) or + quota_policy is None) and \ + (is_user_quota_enabled is None or + (is_user_quota_enabled is not None and + is_user_quota_enabled == quota_config_obj.is_user_quota_enabled)): + return False + + try: + resp = self.unity_conn.modify_quota_config( + quota_config_id=quota_config_obj.id, + grace_period=grace_period_in_sec, + default_hard_limit=hard_limit_in_bytes, + default_soft_limit=soft_limit_in_bytes, + is_user_quota_enabled=is_user_quota_enabled, + quota_policy=policy_enum_val) + LOG.info("Successfully modified the quota config with response %s", resp) + return True + + except Exception as e: + errormsg = "Failed to modify quota config for filesystem {0} " \ + " with error {1}".format(quota_config_obj.filesystem.id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def update_replication_params(self, replication_params): + ''' Update replication params ''' + try: + if replication_params['replication_type'] == 'remote' or \ + (replication_params['replication_type'] is None and + replication_params['remote_system']): + connection_params = { + 'unispherehost': replication_params['remote_system']['remote_system_host'], + 'username': replication_params['remote_system']['remote_system_username'], + 'password': replication_params['remote_system']['remote_system_password'], + 'validate_certs': replication_params['remote_system']['remote_system_verifycert'], + 'port': replication_params['remote_system']['remote_system_port'] + } + remote_system_conn = utils.get_unity_unisphere_connection( + connection_params, application_type) + replication_params['remote_system_name'] = remote_system_conn.name + if replication_params['destination_pool_name'] is not None: + pool_object = \ + remote_system_conn.get_pool(name=replication_params['destination_pool_name']) + replication_params['destination_pool_id'] = pool_object.id + else: + if replication_params['destination_pool_name'] is not None: + pool_object = \ + self.unity_conn.get_pool(name=replication_params['destination_pool_name']) + replication_params['destination_pool_id'] = pool_object.id + except Exception as e: + errormsg = "Updating replication params failed" \ + " with error %s" % str(e) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_rpo(self, replication_params): + ''' Validates rpo based on replication mode ''' + if replication_params['replication_mode'] == 'asynchronous' and \ + replication_params['rpo'] is None: + errormsg = "rpo is required together with 'asynchronous' replication_mode." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + if (replication_params['rpo'] and (replication_params['rpo'] < 5 or replication_params['rpo'] > 1440)) \ + and (replication_params['replication_mode'] and replication_params['replication_mode'] != 'manual' or + not replication_params['replication_mode'] and replication_params['rpo'] != -1): + errormsg = "rpo value should be in range of 5 to 1440" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_replication_params(self, replication_params): + ''' Validate replication params ''' + if not replication_params: + errormsg = "Please specify replication_params to enable replication." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + if replication_params['destination_pool_id'] is not None and \ + replication_params['destination_pool_name'] is not None: + errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + self.validate_rpo(replication_params) + # Validate replication type + if replication_params['replication_type'] == 'remote' and replication_params['remote_system'] is None: + errormsg = "Remote_system is required together with 'remote' replication_type" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_create_replication_params(self, replication_params): + ''' Validate replication params ''' + + if replication_params['destination_pool_id'] is None and \ + replication_params['destination_pool_name'] is None: + errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required to enable replication." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + keys = ['replication_mode', 'replication_type'] + for key in keys: + if replication_params[key] is None: + errormsg = "Please specify %s to enable replication." % key + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_replication_session(self, obj_fs, repl_session, replication_params): + """ Modify the replication session + :param: obj_fs: Filesystem object + :param: repl_session: Replication session to be modified + :param: replication_params: Module input params + :return: True if modification is successful + """ + try: + LOG.info("Modifying replication session of filesystem %s", obj_fs.name) + modify_payload = {} + if replication_params['replication_mode'] and \ + replication_params['replication_mode'] == 'manual': + rpo = -1 + elif replication_params['rpo']: + rpo = replication_params['rpo'] + name = repl_session.name + if replication_params['new_replication_name'] and \ + name != replication_params['new_replication_name']: + name = replication_params['new_replication_name'] + + if repl_session.name != name: + modify_payload['name'] = name + if ((replication_params['replication_mode'] or replication_params['rpo']) and + repl_session.max_time_out_of_sync != rpo): + modify_payload['max_time_out_of_sync'] = rpo + + if modify_payload: + repl_session.modify(**modify_payload) + return True + + return False + except Exception as e: + errormsg = "Modifying replication session failed with error %s", e + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def enable_replication(self, obj_fs, replication_params): + """ Enable the replication session + :param: obj_fs: Filesystem object + :param: replication_params: Module input params + :return: True if enabling replication is successful + """ + try: + self.validate_replication_params(replication_params) + self.update_replication_params(replication_params) + + repl_session = \ + self.get_replication_session_on_filter(obj_fs, replication_params, "modify") + if repl_session: + return self.modify_replication_session(obj_fs, repl_session, replication_params) + + self.validate_create_replication_params(replication_params) + replication_args_list = get_replication_args_list(replication_params) + if 'remote_system_name' in replication_params: + remote_system_name = replication_params['remote_system_name'] + remote_system_list = self.unity_conn.get_remote_system() + for remote_system in remote_system_list: + if remote_system.name == remote_system_name: + replication_args_list['remote_system'] = remote_system + break + if 'remote_system' not in replication_args_list.keys(): + errormsg = "Remote system %s is not found" % (remote_system_name) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + LOG.info(("Enabling replication to the filesystem %s", obj_fs.name)) + obj_fs.replicate_with_dst_resource_provisioning(**replication_args_list) + return True + except Exception as e: + errormsg = "Enabling replication to the filesystem %s failed " \ + "with error %s" % (obj_fs.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def disable_replication(self, obj_fs, replication_params): + """ Remove replication from the filesystem + :param: replication_params: Module input params + :return: True if disabling replication is successful + """ + try: + LOG.info(("Disabling replication on the filesystem %s", obj_fs.name)) + if replication_params: + self.update_replication_params(replication_params) + repl_session = \ + self.get_replication_session_on_filter(obj_fs, replication_params, "delete") + if repl_session: + repl_session.delete() + return True + return False + except Exception as e: + errormsg = "Disabling replication on the filesystem %s failed " \ + "with error %s" % (obj_fs.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_replication_session_on_filter(self, obj_fs, replication_params, action): + if replication_params and replication_params['remote_system']: + repl_session = \ + self.get_replication_session(obj_fs, filter_key="remote_system_name", + replication_params=replication_params) + elif replication_params and replication_params['replication_name']: + repl_session = \ + self.get_replication_session(obj_fs, filter_key="name", + name=replication_params['replication_name']) + else: + repl_session = self.get_replication_session(obj_fs, action=action) + if repl_session and action and replication_params and \ + replication_params['replication_type'] == 'local' and \ + repl_session.remote_system.name != self.unity_conn.name: + return None + + return repl_session + + def get_replication_session(self, obj_fs, filter_key=None, replication_params=None, name=None, action=None): + """ Retrieves the replication sessions configured for the filesystem + :param: obj_fs: Filesystem object + :param: filter_key: Key to filter replication sessions + :param: replication_params: Module input params + :param: name: Replication session name + :param: action: Specifies modify or delete action on replication session + :return: Replication session details + """ + try: + repl_session = self.unity_conn.get_replication_session(src_resource_id=obj_fs.storage_resource.id) + if not filter_key and repl_session: + if len(repl_session) > 1: + if action: + error_msg = 'There are multiple replication sessions for the filesystem.'\ + ' Please specify replication_name in replication_params to %s.' % action + self.module.fail_json(msg=error_msg) + return repl_session + return repl_session[0] + for session in repl_session: + if filter_key == 'remote_system_name' and \ + session.remote_system.name == replication_params['remote_system_name']: + return session + if filter_key == 'name' and session.name == name: + return session + return None + except Exception as e: + errormsg = "Retrieving replication session on the filesystem failed " \ + "with error %s", str(e) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def perform_module_operation(self): + """ + Perform different actions on filesystem module based on parameters + passed in the playbook + """ + filesystem_name = self.module.params['filesystem_name'] + filesystem_id = self.module.params['filesystem_id'] + nas_server_name = self.module.params['nas_server_name'] + nas_server_id = self.module.params['nas_server_id'] + pool_name = self.module.params['pool_name'] + pool_id = self.module.params['pool_id'] + size = self.module.params['size'] + cap_unit = self.module.params['cap_unit'] + quota_config = self.module.params['quota_config'] + replication_params = self.module.params['replication_params'] + replication_state = self.module.params['replication_state'] + state = self.module.params['state'] + snap_schedule_name = self.module.params['snap_schedule_name'] + snap_schedule_id = self.module.params['snap_schedule_id'] + + # result is a dictionary to contain end state and FileSystem details + changed = False + result = dict( + changed=False, + filesystem_details={} + ) + + to_modify_dict = None + filesystem_details = None + quota_config_obj = None + + self.validate_input_string() + + if size is not None and size == 0: + self.module.fail_json(msg="Size can not be 0 (Zero)") + + if size and not cap_unit: + cap_unit = 'GB' + + if quota_config: + if (quota_config['default_hard_limit'] is not None + or quota_config['default_soft_limit'] is not None) and \ + not quota_config['cap_unit']: + quota_config['cap_unit'] = 'GB' + + if quota_config['grace_period'] is not None \ + and quota_config['grace_period_unit'] is None: + quota_config['grace_period_unit'] = 'days' + + if quota_config['grace_period'] is not None \ + and quota_config['grace_period'] <= 0: + self.module.fail_json(msg="Invalid grace_period provided. " + "Must be greater than 0.") + + if quota_config['default_soft_limit'] is not None \ + and utils.is_size_negative(quota_config['default_soft_limit']): + self.module.fail_json(msg="Invalid default_soft_limit provided. " + "Must be greater than or equal to 0.") + + if quota_config['default_hard_limit'] is not None \ + and utils.is_size_negative(quota_config['default_hard_limit']): + self.module.fail_json(msg="Invalid default_hard_limit provided. " + "Must be greater than or equal to 0.") + + if (cap_unit is not None) and not size: + self.module.fail_json(msg="cap_unit can be specified along " + "with size") + + nas_server = None + if nas_server_name or nas_server_id: + nas_server = self.get_nas_server( + name=nas_server_name, id=nas_server_id) + + obj_pool = None + if pool_name or pool_id: + obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id) + + obj_fs = None + obj_fs = self.get_filesystem(name=filesystem_name, + id=filesystem_id, + obj_nas_server=nas_server) + + self.snap_sch_id = None + if snap_schedule_name or snap_schedule_id: + snap_schedule_params = { + "name": snap_schedule_name, + "id": snap_schedule_id + } + self.snap_sch_id = self.resolve_to_snapschedule_id(snap_schedule_params) + elif snap_schedule_name == "" or snap_schedule_id == "": + self.snap_sch_id = "" + + if obj_fs: + filesystem_details = obj_fs._get_properties() + filesystem_id = obj_fs.get_id() + to_modify_dict = self.is_modify_required(obj_fs, cap_unit) + LOG.info("From Mod Op, to_modify_dict: %s", to_modify_dict) + + if state == 'present' and not filesystem_details: + if not filesystem_name: + msg_noname = "FileSystem with id {0} is not found, unable to " \ + "create a FileSystem without a valid " \ + "filesystem_name".format(filesystem_id) + self.module.fail_json(msg=msg_noname) + + if not pool_name and not pool_id: + self.module.fail_json(msg="pool_id or pool_name is required " + "to create new filesystem") + if not size: + self.module.fail_json(msg="Size is required to create" + " a filesystem") + size = utils.get_size_bytes(size, cap_unit) + + obj_fs = self.create_filesystem(name=filesystem_name, + obj_pool=obj_pool, + obj_nas_server=nas_server, + size=size) + + LOG.debug("Successfully created filesystem , %s", obj_fs) + filesystem_id = obj_fs.id + filesystem_details = obj_fs._get_properties() + to_modify_dict = self.is_modify_required(obj_fs, cap_unit) + LOG.debug("Got filesystem id , %s", filesystem_id) + changed = True + + if state == 'present' and filesystem_details and to_modify_dict: + self.modify_filesystem(update_dict=to_modify_dict, obj_fs=obj_fs) + changed = True + + """ + Set quota configuration + """ + if state == "present" and filesystem_details and quota_config: + quota_config_obj = self.get_quota_config_details(obj_fs) + + if quota_config_obj is not None: + is_quota_config_modified = self.modify_quota_config( + quota_config_obj=quota_config_obj, + quota_config_params=quota_config) + + if is_quota_config_modified: + changed = True + else: + self.module.fail_json(msg="One or more operations related" + " to this task failed because the" + " new object created could not be fetched." + " Please rerun the task for expected result.") + + if state == 'present' and filesystem_details and replication_state is not None: + if replication_state == 'enable': + changed = self.enable_replication(obj_fs, replication_params) + else: + changed = self.disable_replication(obj_fs, replication_params) + + if state == 'absent' and filesystem_details: + changed = self.delete_filesystem(filesystem_id) + filesystem_details = None + + if state == 'present' and filesystem_details: + filesystem_details = self.get_filesystem_display_attributes( + obj_fs=obj_fs) + + result['changed'] = changed + result['filesystem_details'] = filesystem_details + self.module.exit_json(**result) + + +def get_time_in_seconds(time, time_units): + """This method get time is seconds""" + min_in_sec = 60 + hour_in_sec = 60 * 60 + day_in_sec = 24 * 60 * 60 + if time is not None and time > 0: + if time_units in 'minutes': + return time * min_in_sec + elif time_units in 'hours': + return time * hour_in_sec + elif time_units in 'days': + return time * day_in_sec + else: + return time + else: + return 0 + + +def get_time_with_unit(time): + """This method sets seconds in minutes, hours or days.""" + sec_in_min = 60 + sec_in_hour = 60 * 60 + sec_in_day = 24 * 60 * 60 + + if time % sec_in_day == 0: + time = time / sec_in_day + unit = 'days' + + elif time % sec_in_hour == 0: + time = time / sec_in_hour + unit = 'hours' + + else: + time = time / sec_in_min + unit = 'minutes' + return "%s %s" % (time, unit) + + +def get_replication_args_list(replication_params): + """Returns the replication args for payload""" + replication_args_list = { + 'dst_pool_id': replication_params['destination_pool_id'] + } + + if replication_params['replication_name']: + replication_args_list['replication_name'] = replication_params['replication_name'] + if 'replication_mode' in replication_params and \ + replication_params['replication_mode'] == 'asynchronous': + replication_args_list['max_time_out_of_sync'] = replication_params['rpo'] + else: + replication_args_list['max_time_out_of_sync'] = -1 + + return replication_args_list + + +def get_filesystem_parameters(): + """This method provide parameters required for the ansible filesystem + module on Unity""" + return dict( + filesystem_name=dict(required=False, type='str'), + filesystem_id=dict(required=False, type='str'), + nas_server_name=dict(required=False, type='str'), + nas_server_id=dict(required=False, type='str'), + description=dict(required=False, type='str'), + pool_name=dict(required=False, type='str'), + pool_id=dict(required=False, type='str'), + size=dict(required=False, type='int'), + cap_unit=dict(required=False, type='str', choices=['GB', 'TB']), + is_thin=dict(required=False, type='bool'), + data_reduction=dict(required=False, type='bool'), + supported_protocols=dict(required=False, type='str', + choices=['NFS', 'CIFS', 'MULTIPROTOCOL']), + smb_properties=dict(type='dict', options=dict( + is_smb_sync_writes_enabled=dict(type='bool'), + is_smb_notify_on_access_enabled=dict(type='bool'), + is_smb_op_locks_enabled=dict(type='bool'), + is_smb_notify_on_write_enabled=dict(type='bool'), + smb_notify_on_change_dir_depth=dict(type='int') + )), + access_policy=dict(required=False, type='str', + choices=['NATIVE', 'UNIX', 'WINDOWS']), + locking_policy=dict(required=False, type='str', + choices=['ADVISORY', 'MANDATORY']), + tiering_policy=dict(required=False, type='str', choices=[ + 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']), + snap_schedule_name=dict(required=False, type='str'), + snap_schedule_id=dict(required=False, type='str'), + quota_config=dict(required=False, type='dict', options=dict( + grace_period=dict(required=False, type='int'), + grace_period_unit=dict(required=False, type='str', choices=['minutes', 'hours', 'days']), + default_hard_limit=dict(required=False, type='int'), + default_soft_limit=dict(required=False, type='int'), + is_user_quota_enabled=dict(required=False, type='bool'), + quota_policy=dict(required=False, type='str', choices=['FILE_SIZE', 'BLOCKS']), + cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']), + ), mutually_exclusive=[['is_user_quota_enabled', 'quota_policy']]), + replication_params=dict(type='dict', options=dict( + replication_name=dict(type='str'), + new_replication_name=dict(type='str'), + replication_type=dict(type='str', choices=['local', 'remote']), + replication_mode=dict(type='str', choices=['asynchronous', 'manual']), + rpo=dict(type='int'), + remote_system=dict(type='dict', + options=dict( + remote_system_host=dict(type='str', required=True), + remote_system_verifycert=dict(type='bool', required=False, + default=True), + remote_system_username=dict(type='str', required=True), + remote_system_password=dict(type='str', required=True, no_log=True), + remote_system_port=dict(type='int', required=False, default=443) + )), + destination_pool_name=dict(type='str'), + destination_pool_id=dict(type='str') + )), + replication_state=dict(type='str', choices=['enable', 'disable']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create Unity FileSystem object and perform action on it + based on user input from playbook""" + obj = Filesystem() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py new file mode 100644 index 00000000..1bcc9b89 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py @@ -0,0 +1,772 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing Filesystem Snapshots on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: filesystem_snapshot +short_description: Manage filesystem snapshot on the Unity storage system +description: +- Managing Filesystem Snapshot on the Unity storage system includes + create filesystem snapshot, get filesystem snapshot, modify filesystem + snapshot and delete filesystem snapshot. +version_added: '1.1.0' +extends_documentation_fragment: + - dellemc.unity.unity +author: +- Rajshree Khare (@kharer5) +options: + snapshot_name: + description: + - The name of the filesystem snapshot. + - Mandatory parameter for creating a filesystem snapshot. + - For all other operations either I(snapshot_name) or I(snapshot_id) + is required. + type: str + snapshot_id: + description: + - During creation snapshot_id is auto generated. + - For all other operations either I(snapshot_id) or I(snapshot_name) + is required. + type: str + filesystem_name: + description: + - The name of the Filesystem for which snapshot is created. + - For creation of filesystem snapshot either I(filesystem_name) or + I(filesystem_id) is required. + - Not required for other operations. + type: str + filesystem_id: + description: + - The ID of the Filesystem for which snapshot is created. + - For creation of filesystem snapshot either I(filesystem_id) or + I(filesystem_name) is required. + - Not required for other operations. + type: str + nas_server_name: + description: + - The name of the NAS server in which the Filesystem is created. + - For creation of filesystem snapshot either I(nas_server_name) or + I(nas_server_id) is required. + - Not required for other operations. + type: str + nas_server_id: + description: + - The ID of the NAS server in which the Filesystem is created. + - For creation of filesystem snapshot either I(filesystem_id) or + I(filesystem_name) is required. + - Not required for other operations. + type: str + auto_delete: + description: + - This option specifies whether or not the filesystem snapshot will be + automatically deleted. + - If set to C(true), the filesystem snapshot will expire based on the pool + auto deletion policy. + - If set to C(false), the filesystem snapshot will not be auto deleted + based on the pool auto deletion policy. + - Option I(auto_delete) can not be set to C(true), if I(expiry_time) is specified. + - If during creation neither I(auto_delete) nor I(expiry_time) is mentioned + then the filesystem snapshot will be created keeping I(auto_delete) as + C(true). + - Once the I(expiry_time) is set, then the filesystem snapshot cannot be + assigned to the auto delete policy. + type: bool + expiry_time: + description: + - This option is for specifying the date and time after which the + filesystem snapshot will expire. + - The time is to be mentioned in UTC timezone. + - The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits. + type: str + description: + description: + - The additional information about the filesystem snapshot can be + provided using this option. + - The description can be removed by passing an empty string. + type: str + fs_access_type: + description: + - Access type of the filesystem snapshot. + - Required only during creation of filesystem snapshot. + - If not given, snapshot's access type will be C(Checkpoint). + type: str + choices: ['Checkpoint' , 'Protocol'] + state: + description: + - The state option is used to mention the existence of the filesystem + snapshot. + type: str + required: true + choices: ['absent', 'present'] +notes: + - Filesystem snapshot cannot be deleted, if it has nfs or smb share. + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' + - name: Create Filesystem Snapshot + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + filesystem_name: "ansible_test_FS" + nas_server_name: "lglad069" + description: "Created using playbook" + auto_delete: True + fs_access_type: "Protocol" + state: "present" + + - name: Create Filesystem Snapshot with expiry time + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap_1" + filesystem_name: "ansible_test_FS_1" + nas_server_name: "lglad069" + description: "Created using playbook" + expiry_time: "04/15/2021 2:30" + fs_access_type: "Protocol" + state: "present" + + - name: Get Filesystem Snapshot Details using Name + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + state: "present" + + - name: Get Filesystem Snapshot Details using ID + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "10008000403" + state: "present" + + - name: Update Filesystem Snapshot attributes + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + description: "Description updated" + auto_delete: False + expiry_time: "04/15/2021 5:30" + state: "present" + + - name: Update Filesystem Snapshot attributes using ID + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "10008000403" + expiry_time: "04/18/2021 8:30" + state: "present" + + - name: Delete Filesystem Snapshot using Name + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "ansible_test_FS_snap" + state: "absent" + + - name: Delete Filesystem Snapshot using ID + dellemc.unity.filesystem_snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_id: "10008000403" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: true + +filesystem_snapshot_details: + description: Details of the filesystem snapshot. + returned: When filesystem snapshot exists + type: dict + contains: + access_type: + description: Access type of filesystem snapshot. + type: str + attached_wwn: + description: Attached WWN details. + type: str + creation_time: + description: Creation time of filesystem snapshot. + type: str + creator_schedule: + description: Creator schedule of filesystem snapshot. + type: str + creator_type: + description: Creator type for filesystem snapshot. + type: str + creator_user: + description: Creator user for filesystem snapshot. + type: str + description: + description: Description of the filesystem snapshot. + type: str + expiration_time: + description: Date and time after which the filesystem snapshot + will expire. + type: str + is_auto_delete: + description: Is the filesystem snapshot is auto deleted or not. + type: bool + id: + description: Unique identifier of the filesystem snapshot + instance. + type: str + name: + description: The name of the filesystem snapshot. + type: str + size: + description: Size of the filesystem snapshot. + type: int + filesystem_name: + description: Name of the filesystem for which the snapshot exists. + type: str + filesystem_id: + description: Id of the filesystem for which the snapshot exists. + type: str + nas_server_name: + description: Name of the NAS server on which filesystem exists. + type: str + nas_server_id: + description: Id of the NAS server on which filesystem exists. + type: str + sample: { + "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT", + "attached_wwn": null, + "creation_time": "2022-10-21 04:42:53.951000+00:00", + "creator_schedule": null, + "creator_type": "SnapCreatorTypeEnum.USER_CUSTOM", + "creator_user": { + "id": "user_admin" + }, + "description": "Created using playbook", + "existed": true, + "expiration_time": null, + "filesystem_id": "fs_137", + "filesystem_name": "test", + "hash": 8739894572587, + "host_access": null, + "id": "171798721695", + "io_limit_policy": null, + "is_auto_delete": true, + "is_modifiable": false, + "is_modified": false, + "is_read_only": true, + "is_system_snap": false, + "last_writable_time": null, + "lun": null, + "name": "test_FS_snap_1", + "nas_server_id": "nas_1", + "nas_server_name": "lglad072", + "parent_snap": null, + "size": 107374182400, + "snap_group": null, + "state": "SnapStateEnum.READY" + } + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils +from datetime import datetime + +LOG = utils.get_logger('filesystem_snapshot') + +application_type = "Ansible/1.5.0" + + +class FilesystemSnapshot(object): + """Class with Filesystem Snapshot operations""" + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_snapshot_parameters()) + + mutually_exclusive = [['snapshot_name', 'snapshot_id'], + ['filesystem_name', 'filesystem_id'], + ['nas_server_name', 'nas_server_id']] + + required_one_of = [['snapshot_name', 'snapshot_id']] + # initialize the ansible module + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + utils.ensure_required_libs(self.module) + + # result is a dictionary that contains changed status and + # filesystem snapshot details + self.result = {"changed": False, + 'filesystem_snapshot_details': {}} + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + self.snap_obj = utils.snap.UnitySnap(self.unity_conn) + LOG.info('Connection established with the Unity Array') + + def validate_expiry_time(self, expiry_time): + """Validates the specified expiry_time""" + try: + datetime.strptime(expiry_time, '%m/%d/%Y %H:%M') + except ValueError: + error_msg = ("expiry_time: %s, not in MM/DD/YYYY HH:MM format." % + expiry_time) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def to_update(self, fs_snapshot, description=None, auto_del=None, + expiry_time=None, fs_access_type=None): + """Determines whether to update the snapshot or not""" + snap_modify_dict = dict() + + if fs_access_type and fs_access_type != fs_snapshot.access_type: + error_message = "Modification of access type is not allowed." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + if expiry_time: + # If the snapshot has is_auto_delete True, + # Check if auto_delete in the input is either None or True + if fs_snapshot.is_auto_delete and (auto_del is None or auto_del): + self.module.fail_json(msg="expiry_time can be assigned when" + " auto delete is False.") + if auto_del is not None: + if fs_snapshot.expiration_time: + error_msg = "expiry_time for filesystem snapshot is set." \ + " Once it is set then snapshot cannot" \ + " be assigned to auto_delete policy." + self.module.fail_json(msg=error_msg) + if auto_del != fs_snapshot.is_auto_delete: + snap_modify_dict['is_auto_delete'] = auto_del + + if description is not None and description != fs_snapshot.description: + snap_modify_dict['description'] = description + + if to_update_expiry_time(fs_snapshot, expiry_time): + snap_modify_dict['expiry_time'] = expiry_time + LOG.info("Snapshot modification details: %s", snap_modify_dict) + return snap_modify_dict + + def update_filesystem_snapshot(self, fs_snapshot, snap_modify_dict): + try: + duration = None + if 'expiry_time' in snap_modify_dict \ + and snap_modify_dict['expiry_time']: + duration = convert_timestamp_to_sec( + snap_modify_dict['expiry_time'], + self.unity_conn.system_time) + if duration and duration <= 0: + self.module.fail_json(msg="expiry_time should be after" + " the current system time.") + if 'is_auto_delete' in snap_modify_dict \ + and snap_modify_dict['is_auto_delete'] is not None: + auto_delete = snap_modify_dict['is_auto_delete'] + else: + auto_delete = None + if 'description' in snap_modify_dict \ + and (snap_modify_dict['description'] + or len(snap_modify_dict['description']) == 0): + description = snap_modify_dict['description'] + else: + description = None + + fs_snapshot.modify(retentionDuration=duration, + isAutoDelete=auto_delete, + description=description) + fs_snapshot.update() + except Exception as e: + error_msg = "Failed to modify filesystem snapshot" \ + " [name: %s , id: %s] with error %s."\ + % (fs_snapshot.name, fs_snapshot.id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def create_filesystem_snapshot(self, snap_name, storage_id, + description=None, auto_del=None, + expiry_time=None, fs_access_type=None): + try: + duration = None + if expiry_time: + duration = convert_timestamp_to_sec( + expiry_time, self.unity_conn.system_time) + if duration <= 0: + self.module.fail_json(msg="expiry_time should be after" + " the current system time.") + + fs_snapshot = self.snap_obj.create( + cli=self.unity_conn._cli, storage_resource=storage_id, + name=snap_name, description=description, + is_auto_delete=auto_del, retention_duration=duration, + fs_access_type=fs_access_type) + return fs_snapshot + except Exception as e: + error_msg = "Failed to create filesystem snapshot" \ + " %s with error %s" % (snap_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def is_snap_has_share(self, fs_snap): + try: + obj = self.unity_conn.get_nfs_share(snap=fs_snap) or \ + self.unity_conn.get_cifs_share(snap=fs_snap) + if len(obj) > 0: + LOG.info("Snapshot has %s nfs/smb share/s", len(obj)) + return True + except Exception as e: + msg = "Failed to get nfs/smb share from filesystem snapshot. " \ + "error: %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + return False + + def delete_fs_snapshot(self, fs_snapshot): + try: + # Checking whether nfs/smb share created from fs_snapshot + if self.is_snap_has_share(fs_snapshot): + msg = "Filesystem snapshot cannot be deleted because it has " \ + "nfs/smb share" + LOG.error(msg) + self.module.fail_json(msg=msg) + fs_snapshot.delete() + return None + + except Exception as e: + error_msg = "Failed to delete filesystem snapshot" \ + " [name: %s, id: %s] with error %s." \ + % (fs_snapshot.name, fs_snapshot.id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_fs_snapshot_obj(self, name=None, id=None): + fs_snapshot = id if id else name + msg = "Failed to get details of filesystem snapshot %s with error %s." + try: + fs_snap_obj = self.unity_conn.get_snap(name=name, _id=id) + if fs_snap_obj and fs_snap_obj.existed: + LOG.info("Successfully got the filesystem snapshot object " + "%s.", fs_snap_obj) + else: + fs_snap_obj = None + return fs_snap_obj + + except utils.HttpError as e: + if e.http_status == 401: + cred_err = ("Incorrect username or password , %s" % e.message) + self.module.fail_json(msg=cred_err) + else: + err_msg = msg % (fs_snapshot, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + except utils.UnityResourceNotFoundError as e: + err_msg = msg % (fs_snapshot, str(e)) + LOG.error(err_msg) + return None + + except Exception as e: + err_msg = msg % (fs_snapshot, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def get_filesystem_obj(self, nas_server=None, name=None, id=None): + filesystem = id if id else name + try: + obj_fs = None + if name: + if not nas_server: + err_msg = "NAS Server is required to get the FileSystem." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + obj_fs = self.unity_conn.get_filesystem(name=name, + nas_server=nas_server) + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem object %s.", + obj_fs) + return obj_fs + if id: + if nas_server: + obj_fs = self.unity_conn\ + .get_filesystem(id=id, nas_server=nas_server) + else: + obj_fs = self.unity_conn.get_filesystem(id=id) + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem object %s.", + obj_fs) + return obj_fs + except Exception as e: + error_msg = "Failed to get filesystem %s with error %s."\ + % (filesystem, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_nas_server_obj(self, name=None, id=None): + nas_server = id if id else name + error_msg = ("Failed to get NAS server %s." % nas_server) + try: + obj_nas = self.unity_conn.get_nas_server(_id=id, name=name) + if name and obj_nas.existed: + LOG.info("Successfully got the NAS server object %s.", + obj_nas) + return obj_nas + elif id and obj_nas.existed: + LOG.info("Successfully got the NAS server object %s.", + obj_nas) + return obj_nas + else: + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + except Exception as e: + error_msg = "Failed to get NAS server %s with error %s."\ + % (nas_server, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def create_fs_snapshot_details_dict(self, fs_snapshot): + """ Add name and id of storage resource to filesystem snapshot + details """ + + snapshot_dict = fs_snapshot._get_properties() + del snapshot_dict['storage_resource'] + + snapshot_dict['filesystem_name'] = fs_snapshot.storage_resource.name + snapshot_dict['filesystem_id'] = fs_snapshot.storage_resource.filesystem.id + + obj_fs = self.unity_conn.\ + get_filesystem(id=fs_snapshot.storage_resource.filesystem.id) + if obj_fs and obj_fs.existed: + snapshot_dict['nas_server_name'] = obj_fs.nas_server[0].name + snapshot_dict['nas_server_id'] = obj_fs.nas_server[0].id + + return snapshot_dict + + def perform_module_operation(self): + """ + Perform different actions on snapshot module based on parameters + chosen in playbook + """ + snapshot_name = self.module.params['snapshot_name'] + snapshot_id = self.module.params['snapshot_id'] + filesystem_name = self.module.params['filesystem_name'] + filesystem_id = self.module.params['filesystem_id'] + nas_server_name = self.module.params['nas_server_name'] + nas_server_id = self.module.params['nas_server_id'] + auto_delete = self.module.params['auto_delete'] + expiry_time = self.module.params['expiry_time'] + description = self.module.params['description'] + fs_access_type = self.module.params['fs_access_type'] + state = self.module.params['state'] + nas_server_resource = None + filesystem_resource = None + changed = False + + LOG.info("Getting Filesystem Snapshot details.") + fs_snapshot = self.get_fs_snapshot_obj(name=snapshot_name, + id=snapshot_id) + + msg = "Filesystem Snapshot details: %s." % str(fs_snapshot) + LOG.info(msg) + + # Get NAS server Object + if nas_server_name is not None: + if nas_server_name == "" or nas_server_name.isspace(): + self.module.fail_json(msg="Invalid nas_server_name given," + " Please provide a valid name.") + nas_server_resource = self\ + .get_nas_server_obj(name=nas_server_name) + elif nas_server_id is not None: + if nas_server_id == "" or nas_server_id.isspace(): + self.module.fail_json(msg="Invalid nas_server_id given," + " Please provide a valid ID.") + nas_server_resource = self.get_nas_server_obj(id=nas_server_id) + + # Get Filesystem Object + if filesystem_name is not None: + if filesystem_name == "" or filesystem_name.isspace(): + self.module.fail_json(msg="Invalid filesystem_name given," + " Please provide a valid name.") + filesystem_resource = self\ + .get_filesystem_obj(nas_server=nas_server_resource, + name=filesystem_name) + fs_res_id = filesystem_resource.storage_resource.id + elif filesystem_id is not None: + if filesystem_id == "" or filesystem_id.isspace(): + self.module.fail_json(msg="Invalid filesystem_id given," + " Please provide a valid ID.") + filesystem_resource = self\ + .get_filesystem_obj(id=filesystem_id) + fs_res_id = filesystem_resource[0].storage_resource.id + + # Check for error, if user tries to create a filesystem snapshot + # with the same name. + if fs_snapshot and filesystem_resource and \ + (fs_snapshot.storage_resource.id + != fs_res_id): + self.module.fail_json( + msg="Snapshot %s is of %s storage resource. Cannot create new" + " snapshot with same name for %s storage resource." + % (fs_snapshot.name, fs_snapshot.storage_resource.name, + filesystem_resource.storage_resource.name)) + + # check for valid expiry_time + if expiry_time is not None and \ + (expiry_time == "" or expiry_time.isspace()): + self.module.fail_json(msg="Please provide valid expiry_time," + " empty expiry_time given.") + if expiry_time: + self.validate_expiry_time(expiry_time) + + # Check if in input auto_delete is True and expiry_time is not None + if expiry_time and auto_delete: + error_msg = "Cannot set expiry_time if auto_delete given as True." + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # check for fs_access_type + if fs_access_type is not None: + if (fs_access_type == "" or fs_access_type.isspace()): + self.module.fail_json(msg="Please provide valid " + "fs_access_type, empty " + "fs_access_type given.") + if fs_access_type == "Checkpoint": + fs_access_type = utils.FilesystemSnapAccessTypeEnum.CHECKPOINT + elif fs_access_type == "Protocol": + fs_access_type = utils.FilesystemSnapAccessTypeEnum.PROTOCOL + + # Check whether to modify the filesystem snapshot or not + fs_snap_modify_dict = dict() + if state == 'present' and fs_snapshot: + fs_snap_modify_dict = self\ + .to_update(fs_snapshot, description=description, + auto_del=auto_delete, expiry_time=expiry_time, + fs_access_type=fs_access_type) + + # Create Filesystem Snapshot + if not fs_snapshot and state == "present": + LOG.info("Creating the filesystem snapshot.") + + if snapshot_id: + self.module.fail_json(msg="Creation of Filesystem Snapshot is" + " allowed using snapshot_name only," + " snapshot_id given.") + if snapshot_name == "" or snapshot_name.isspace(): + self.module.fail_json(msg="snapshot_name is required for" + " creation of the filesystem" + " snapshot, empty snapshot_name" + " given.") + if not filesystem_resource: + self.module.fail_json(msg="filesystem_name or filesystem_id" + " required to create a snapshot.") + + fs_snapshot = self.create_filesystem_snapshot( + snapshot_name, + fs_res_id, + description, + auto_delete, + expiry_time, + fs_access_type) + changed = True + + # Update the Snapshot + if fs_snapshot and state == "present" and fs_snap_modify_dict: + LOG.info("Updating the Filesystem Snapshot.") + self.update_filesystem_snapshot(fs_snapshot, fs_snap_modify_dict) + changed = True + + # Delete the Filesystem Snapshot + if state == "absent" and fs_snapshot: + fs_snapshot = self.delete_fs_snapshot(fs_snapshot) + changed = True + + # Add filesystem snapshot details to the result. + if fs_snapshot: + fs_snapshot.update() + self.result["filesystem_snapshot_details"] = \ + self.create_fs_snapshot_details_dict(fs_snapshot) + else: + self.result["filesystem_snapshot_details"] = {} + + self.result["changed"] = changed + self.module.exit_json(**self.result) + + +def to_update_expiry_time(fs_snapshot, expiry_time=None): + """ Check whether to update expiry_time or not""" + if not expiry_time: + return False + if fs_snapshot.expiration_time is None: + return True + if convert_timestamp_to_sec(expiry_time, fs_snapshot.expiration_time)\ + != 0: + return True + return False + + +def convert_timestamp_to_sec(expiry_time, snap_time): + """Converts the time difference to seconds""" + snap_time_str = snap_time.strftime('%m/%d/%Y %H:%M') + snap_timestamp = datetime.strptime(snap_time_str, '%m/%d/%Y %H:%M') + expiry_timestamp = datetime.strptime(expiry_time, "%m/%d/%Y %H:%M") + return int((expiry_timestamp - snap_timestamp).total_seconds()) + + +def get_snapshot_parameters(): + """This method provide parameter required for the ansible filesystem + snapshot module on Unity""" + return dict( + snapshot_name=dict(required=False, type='str'), + snapshot_id=dict(required=False, type='str'), + filesystem_name=dict(required=False, type='str'), + filesystem_id=dict(required=False, type='str'), + nas_server_name=dict(required=False, type='str'), + nas_server_id=dict(required=False, type='str'), + auto_delete=dict(required=False, type='bool'), + expiry_time=dict(required=False, type='str'), + description=dict(required=False, type='str'), + fs_access_type=dict(required=False, type='str', + choices=['Checkpoint', 'Protocol']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create Unity Filesystem Snapshot object and perform actions on it + based on user input from playbook""" + obj = FilesystemSnapshot() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/host.py b/ansible_collections/dellemc/unity/plugins/modules/host.py new file mode 100644 index 00000000..7710b5f7 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/host.py @@ -0,0 +1,1025 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing host on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: host + +version_added: '1.1.0' + +short_description: Manage Host operations on Unity + +description: +- The Host module contains the operations + Creation of a Host, + Addition of initiators to Host, + Removal of initiators from Host, + Modification of host attributes, + Get details of a Host, + Deletion of a Host, + Addition of network address to Host, + Removal of network address from Host. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Rajshree Khare (@kharer5) + +options: + host_name: + description: + - Name of the host. + - Mandatory for host creation. + type: str + + host_id: + description: + - Unique identifier of the host. + - Host Id is auto generated during creation. + - Except create, all other operations require either I(host_id) or Ihost_name). + type: str + + description: + description: + - Host description. + type: str + + host_os: + description: + - Operating system running on the host. + choices: ['AIX', 'Citrix XenServer', 'HP-UX', 'IBM VIOS', 'Linux', + 'Mac OS', 'Solaris', 'VMware ESXi', 'Windows Client', 'Windows Server'] + type: str + + new_host_name: + description: + - New name for the host. + - Only required in rename host operation. + type: str + + initiators: + description: + - List of initiators to be added/removed to/from host. + type: list + elements: str + + initiator_state: + description: + - State of the initiator. + choices: [present-in-host , absent-in-host] + type: str + + network_address: + description: + - Network address to be added/removed to/from the host. + - Enter valid IPV4 or host name. + type: str + + network_address_state: + description: + - State of the Network address. + choices: [present-in-host , absent-in-host] + type: str + + state: + description: + - State of the host. + choices: [present , absent] + type: str + required: true + +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create empty Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host" + host_os: "Linux" + description: "ansible-test-host" + state: "present" + +- name: Create Host with Initiators + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-1" + host_os: "Linux" + description: "ansible-test-host-1" + initiators: + - "iqn.1994-05.com.redhat:c38e6e8cfd81" + - "20:00:00:90:FA:13:81:8D:10:00:00:90:FA:13:81:8D" + initiator_state: "present-in-host" + state: "present" + +- name: Modify Host using host_id + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_id: "Host_253" + new_host_name: "ansible-test-host-2" + host_os: "Mac OS" + description: "Ansible tesing purpose" + state: "present" + +- name: Add Initiators to Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-2" + initiators: + - "20:00:00:90:FA:13:81:8C:10:00:00:90:FA:13:81:8C" + initiator_state: "present-in-host" + state: "present" + +- name: Get Host details using host_name + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-2" + state: "present" + +- name: Get Host details using host_id + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_id: "Host_253" + state: "present" + +- name: Delete Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "ansible-test-host-2" + state: "absent" + +- name: Add network address to Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "{{host_name}}" + network_address: "192.168.1.2" + network_address_state: "present-in-host" + state: "present" + +- name: Delete network address from Host + dellemc.unity.host: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + host_name: "{{host_name}}" + network_address: "192.168.1.2" + network_address_state: "absent-in-host" + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: true + +host_details: + description: Details of the host. + returned: When host exists. + type: dict + contains: + id: + description: The system ID given to the host. + type: str + name: + description: The name of the host. + type: str + description: + description: Description about the host. + type: str + fc_host_initiators: + description: Details of the FC initiators associated with + the host. + type: list + contains: + id: + description: Unique identifier of the FC initiator path. + type: str + name: + description: FC Qualified Name (WWN) of the initiator. + type: str + paths: + description: Details of the paths associated with the FC initiator. + type: list + contains: + id: + description: Unique identifier of the path. + type: str + is_logged_in: + description: Indicates whether the host initiator is logged into the storage system. + type: bool + iscsi_host_initiators: + description: Details of the ISCSI initiators associated + with the host. + type: list + contains: + id: + description: Unique identifier of the ISCSI initiator path. + type: str + name: + description: ISCSI Qualified Name (IQN) of the initiator. + type: str + paths: + description: Details of the paths associated with the ISCSI initiator. + type: list + contains: + id: + description: Unique identifier of the path. + type: str + is_logged_in: + description: Indicates whether the host initiator is logged into the storage system. + type: bool + network_addresses: + description: List of network addresses mapped to the host. + type: list + os_type: + description: Operating system running on the host. + type: str + type: + description: HostTypeEnum of the host. + type: str + host_luns: + description: Details of luns attached to host. + type: list + sample: { + "auto_manage_type": "HostManageEnum.UNKNOWN", + "datastores": null, + "description": "ansible-test-host-1", + "existed": true, + "fc_host_initiators": [ + { + "id": "HostInitiator_1", + "name": "HostName_1", + "paths": [ + { + "id": "HostInitiator_1_Id1", + "is_logged_in": true + }, + { + "id": "HostInitiator_1_Id2", + "is_logged_in": true + } + ] + } + ], + "hash": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "health": { + "UnityHealth": { + "hash": 8764429420954 + } + }, + "host_container": null, + "host_luns": [], + "host_polled_uuid": null, + "host_pushed_uuid": null, + "host_uuid": null, + "host_v_vol_datastore": null, + "id": "Host_2198", + "iscsi_host_initiators": [ + { + "id": "HostInitiator_2", + "name": "HostName_2", + "paths": [ + { + "id": "HostInitiator_2_Id1", + "is_logged_in": true + }, + { + "id": "HostInitiator_2_Id2", + "is_logged_in": true + } + ] + } + ], + "last_poll_time": null, + "name": "ansible-test-host-1", + "network_addresses": [], + "os_type": "Linux", + "registration_type": null, + "storage_resources": null, + "tenant": null, + "type": "HostTypeEnum.HOST_MANUAL", + "vms": null + } +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils +import ipaddress + +LOG = utils.get_logger('host') + +application_type = "Ansible/1.5.0" + + +class Host(object): + """Class with Host operations""" + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_host_parameters()) + + mutually_exclusive = [['host_name', 'host_id']] + required_one_of = [['host_name', 'host_id']] + required_together = [['network_address', 'network_address_state']] + + """ initialize the ansible module """ + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of) + utils.ensure_required_libs(self.module) + + self.unity = utils.get_unity_unisphere_connection(self.module.params, application_type) + LOG.info('Got the unity instance for provisioning on Unity') + + def get_host_count(self, host_name): + """ To get the count of hosts with same host_name """ + + hosts = [] + host_count = 0 + hosts = utils.host.UnityHostList.get(cli=self.unity._cli, + name=host_name) + host_count = len(hosts) + return host_count + + def get_host_details(self, host_id=None, host_name=None): + """ Get details of a given host """ + + host_id_or_name = host_id if host_id else host_name + try: + LOG.info("Getting host %s details", host_id_or_name) + if host_id: + host_details = self.unity.get_host(_id=host_id) + if host_details.name is None: + return None + if host_name: + + ''' get the count of hosts with same host_name ''' + host_count = self.get_host_count(host_name) + + if host_count < 1: + return None + elif host_count > 1: + error_message = "Duplicate hosts found: There are "\ + + host_count + " hosts(s) with the same" \ + " host_name: " + host_name + LOG.error(error_message) + self.module.fail_json(msg=error_message) + else: + host_details = self.unity.get_host(name=host_name) + + return host_details + except utils.HttpError as e: + if e.http_status == 401: + msg = 'Incorrect username or password provided.' + LOG.error(msg) + self.module.fail_json(msg=msg) + else: + msg = "Got HTTP Connection Error while getting host " \ + "details %s : Error %s " % (host_id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + except utils.UnityResourceNotFoundError as e: + error_message = "Failed to get details of host " \ + "{0} with error {1}".format(host_id_or_name, + str(e)) + LOG.error(error_message) + return None + except Exception as e: + error_message = "Got error %s while getting details of host %s" \ + % (str(e), host_id_or_name) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def create_host(self, host_name): + """ Create a new host """ + try: + description = self.module.params['description'] + host_os = self.module.params['host_os'] + host_type = utils.HostTypeEnum.HOST_MANUAL + initiators = self.module.params['initiators'] + initiator_state = self.module.params['initiator_state'] + empty_initiators_flag = False + + if (initiators and initiator_state == 'absent-in-host'): + error_message = "Incorrect 'initiator_state' given." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + if (initiators is None or len(initiators) == 0 + or not initiator_state + or initiator_state == 'absent-in-host'): + empty_initiators_flag = True + + """ if any of the Initiators is invalid or already mapped """ + if (initiators and initiator_state == 'present-in-host'): + unmapped_initiators \ + = self.get_list_unmapped_initiators(initiators) + if unmapped_initiators is None \ + or len(unmapped_initiators) < len(initiators): + error_message = "Provide valid initiators." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + if not empty_initiators_flag: + self.validate_initiators(initiators) + LOG.info("Creating empty host %s ", host_name) + new_host = utils.host.UnityHost.create(self.unity._cli, name=host_name, desc=description, + os=host_os, host_type=host_type) + if not empty_initiators_flag: + host_details = self.unity.get_host(name=host_name) + LOG.info("Adding initiators to %s host", host_name) + result, new_host \ + = self.add_initiator_to_host(host_details, initiators) + return True, new_host + except Exception as e: + error_message = "Got error %s while creation of host %s" \ + % (str(e), host_name) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def validate_initiators(self, initiators): + results = [] + for item in initiators: + results.append(utils.is_initiator_valid(item)) + if False in results: + error_message = "One or more initiator provided is not valid, please provide valid initiators" + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def get_host_initiators_list(self, host_details): + """ Get the list of existing initiators in host""" + + existing_initiators = [] + if host_details.fc_host_initiators is not None: + fc_len = len(host_details.fc_host_initiators) + if fc_len > 0: + for count in range(fc_len): + """ get initiator 'wwn' id""" + ini_id \ + = host_details.fc_host_initiators.initiator_id[count] + + """ update existing_initiators list with 'wwn' """ + existing_initiators.append(ini_id) + + if host_details.iscsi_host_initiators is not None: + iscsi_len = len(host_details.iscsi_host_initiators) + if iscsi_len > 0: + for count in range(iscsi_len): + """ get initiator 'iqn' id""" + ini_id \ + = host_details.iscsi_host_initiators.\ + initiator_id[count] + + """ update existing_initiators list with 'iqn' """ + existing_initiators.append(ini_id) + return existing_initiators + + def is_host_modified(self, host_details): + """ Determines whether the Host details are to be updated or not """ + LOG.info("Checking host attribute values.") + modified_flag = False + + if (self.module.params['description'] is not None + and self.module.params['description'] + != host_details.description) \ + or (self.module.params['host_os'] is not None + and self.module.params['host_os'] != host_details.os_type) \ + or (self.module.params['new_host_name'] is not None + and self.module.params[ + 'new_host_name'] != host_details.name) \ + or (self.module.params['initiators'] is not None + and self.module.params['initiators'] + != self.get_host_initiators_list(host_details)): + LOG.info("Modification required.") + modified_flag = True + + return modified_flag + + def modify_host(self, host_details, new_host_name=None, description=None, + host_os=None): + """ Modify a host """ + try: + hosts = utils.host.UnityHostList.get(cli=self.unity._cli) + host_names_list = hosts.name + for name in host_names_list: + if new_host_name == name: + error_message = "Cannot modify name, new_host_name: " \ + + new_host_name + " already in use." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + host_details.modify(name=new_host_name, desc=description, + os=host_os) + return True + + except Exception as e: + error_message = "Got error %s while modifying host %s" \ + % (str(e), host_details.name) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def get_list_unmapped_initiators(self, initiators, host_id=None): + """ Get the list of those initiators which are + not mapped to any host""" + + unmapped_initiators = [] + for id in initiators: + initiator_details = utils.host.UnityHostInitiatorList \ + .get(cli=self.unity._cli, initiator_id=id) \ + ._get_properties() + + """ if an already existing initiator is passed along with an + unmapped initiator""" + if None in initiator_details["parent_host"]: + unmapped_initiators.append(initiator_details + ["initiator_id"][0]) + elif not initiator_details["parent_host"]: + unmapped_initiators.append(id) + else: + error_message = "Initiator " + id + " mapped to another Host." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + return unmapped_initiators + + def add_initiator_to_host(self, host_details, initiators): + """ Add initiator to host """ + + try: + existing_initiators = self.get_host_initiators_list(host_details) + + """ if current and exisitng initiators are same""" + if initiators \ + and (set(initiators).issubset(set(existing_initiators))): + LOG.info("Initiators are already present in host: %s", + host_details.name) + return False, host_details + + """ get the list of non-mapped initiators out of the + given initiators""" + host_id = host_details.id + unmapped_initiators \ + = self.get_list_unmapped_initiators(initiators, host_id) + + """ if any of the Initiators is invalid or already mapped """ + if unmapped_initiators is None \ + or len(unmapped_initiators) < len(initiators): + error_message = "Provide valid initiators." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + LOG.info("Adding initiators to host %s", host_details.name) + for id in unmapped_initiators: + host_details.add_initiator(uid=id) + updated_host \ + = self.unity.get_host(name=host_details.name) + return True, updated_host + + except Exception as e: + error_message = "Got error %s while adding initiator to host %s" \ + % (str(e), host_details.name) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def remove_initiator_from_host(self, host_details, initiators): + """ Remove initiator from host """ + + try: + existing_initiators = self.get_host_initiators_list(host_details) + + if existing_initiators is None: + LOG.info("No exisiting initiators in host: %s", + host_details.name) + return False, host_details + + if not (set(initiators).issubset(set(existing_initiators))): + LOG.info("Initiators already absent in host: %s", + host_details.name) + return False, host_details + + LOG.info("Removing initiators from host %s", host_details.name) + + if len(initiators) > 1: + self.check_if_initiators_logged_in(initiators) + + for id in initiators: + initiator_details = utils.host.UnityHostInitiatorList \ + .get(cli=self.unity._cli, initiator_id=id) \ + ._get_properties() + + """ if initiator has no active paths, then remove it """ + if initiator_details["paths"][0] is None: + LOG.info("Initiator Path does not exist.") + host_details.delete_initiator(uid=id) + updated_host \ + = self.unity.get_host(name=host_details.name) + + else: + """ Checking for initiator logged_in state """ + for path in initiator_details["paths"][0]["UnityHostInitiatorPathList"]: + path_id = path["UnityHostInitiatorPath"]["id"] + + path_id_obj = utils.host.UnityHostInitiatorPathList \ + .get(cli=self.unity._cli, _id=path_id) + + path_id_details = path_id_obj._get_properties() + + """ if is_logged_in is True, can't remove initiator""" + if (path_id_details["is_logged_in"]): + error_message = "Cannot remove initiator "\ + + id + ", as it is logged in " \ + "the with host." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + elif (not path_id_details["is_logged_in"]): + """ if is_logged_in is False, remove initiator """ + path_id_obj.delete() + + else: + """ if logged_in state does not exist """ + error_message = " logged_in state does not " \ + "exist for initiator " + id + "." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + host_details.delete_initiator(uid=id) + updated_host \ + = self.unity.get_host(name=host_details.name) + + return True, updated_host + + except Exception as e: + error_message = "Got error %s while removing initiator from " \ + "host %s" \ + % (str(e), host_details.name) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def check_if_initiators_logged_in(self, initiators): + """ Checks if any of the initiators is of type logged-in""" + + for item in initiators: + initiator_details = (utils.host.UnityHostInitiatorList + .get(cli=self.unity._cli, initiator_id=item) + ._get_properties()) + if initiator_details["paths"][0] is not None and "UnityHostInitiatorPathList" in initiator_details["paths"][0]: + error_message = "Removal operation cannot be done since host has logged in initiator(s)" + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def delete_host(self, host_details): + """ Delete an existing host """ + + try: + host_details.delete() + return True + except Exception as e: + error_message = "Got error %s while deletion of host %s" \ + % (str(e), host_details.name) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def get_iscsi_host_initiators_details(self, iscsi_host_initiators): + """ Get the details of existing ISCSI initiators in host""" + + iscsi_initiator_list = [] + for iscsi in iscsi_host_initiators: + iscsi_initiator_details = self.unity.get_initiator(_id=iscsi.id) + iscsi_path_list = [] + if iscsi_initiator_details.paths is not None: + for path in iscsi_initiator_details.paths: + iscsi_path_list.append({ + 'id': path.id, + 'is_logged_in': path.is_logged_in + }) + iscsi_initiator_list.append({ + 'id': iscsi_initiator_details.id, + 'name': iscsi_initiator_details.initiator_id, + 'paths': iscsi_path_list + }) + return iscsi_initiator_list + + def get_host_network_address_list(self, host_details): + network_address_list = [] + if host_details and host_details.host_ip_ports is not None: + for port in host_details.host_ip_ports: + network_address_list.append(port.address) + return network_address_list + + def manage_network_address(self, host_details, network_address_list, network_address, network_address_state): + try: + is_mapped = False + changed = False + for addr in network_address_list: + if addr.lower() == network_address.lower(): + is_mapped = True + break + if not is_mapped and network_address_state == 'present-in-host': + LOG.info("Adding network address %s to Host %s", network_address, + host_details.name) + host_details.add_ip_port(network_address) + changed = True + elif is_mapped and network_address_state == 'absent-in-host': + LOG.info("Deleting network address %s from Host %s", network_address, + host_details.name) + host_details.delete_ip_port(network_address) + changed = True + + if changed: + updated_host = self.unity.get_host(name=host_details.name) + network_address_list = self.get_host_network_address_list(updated_host) + return network_address_list, changed + except Exception as e: + error_message = "Got error %s while modifying network address %s of host %s" \ + % (str(e), network_address, host_details.name) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def get_host_lun_list(self, host_details): + """ Get luns attached to host""" + host_luns_list = [] + if host_details and host_details.host_luns is not None: + for lun in host_details.host_luns.lun: + host_lun = {"name": lun.name, "id": lun.id} + host_luns_list.append(host_lun) + return host_luns_list + + def get_fc_host_initiators_details(self, fc_host_initiators): + """ Get the details of existing FC initiators in host""" + + fc_initiator_list = [] + for fc in fc_host_initiators: + fc_initiator_details = self.unity.get_initiator(_id=fc.id) + fc_path_list = [] + if fc_initiator_details.paths is not None: + for path in fc_initiator_details.paths: + fc_path_list.append({ + 'id': path.id, + 'is_logged_in': path.is_logged_in + }) + fc_initiator_list.append({ + 'id': fc_initiator_details.id, + 'name': fc_initiator_details.initiator_id, + 'paths': fc_path_list + }) + return fc_initiator_list + + def perform_module_operation(self): + """ Perform different actions on host based on user parameter + chosen in playbook """ + + host_name = self.module.params['host_name'] + host_id = self.module.params['host_id'] + description = self.module.params['description'] + host_os = self.module.params['host_os'] + new_host_name = self.module.params['new_host_name'] + initiator_state = self.module.params['initiator_state'] + initiators = self.module.params['initiators'] + network_address = self.module.params['network_address'] + network_address_state = self.module.params['network_address_state'] + state = self.module.params['state'] + + if host_name and len(host_name) > 255: + err_msg = "'host_name' is greater than 255 characters." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if new_host_name and len(new_host_name) > 255: + err_msg = "'new_host_name' is greater than 255 characters." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if description and len(description) > 255: + err_msg = "'description' is greater than 255 characters." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if not initiators and initiator_state: + err_msg = "'initiator_state' is given, " \ + "'initiators' are not specified" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if not initiator_state and initiators: + err_msg = "'initiators' are given, " \ + "'initiator_state' is not specified" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + # result is a dictionary that contains changed status and + # host details + result = dict( + changed=False, + host_details={} + ) + + ''' Get host details based on host_name/host_id''' + host_details = self.get_host_details(host_id, host_name) + if not host_details and state == 'present': + if host_id: + err_msg = "Invalid argument 'host_id' while " \ + "creating a host" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + if not host_name: + err_msg = "host_name is required to create a host" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + if new_host_name: + err_msg = "Invalid argument 'new_host_name' while " \ + "creating a host" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if (initiators and initiator_state == 'absent-in-host'): + error_message = "Incorrect 'initiator_state' given." + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + # Create new host + LOG.info("Creating host: %s", host_name) + result['changed'], host_details \ + = self.create_host(host_name) + result['host_details'] = host_details._get_properties() + + # Modify host (Attributes and ADD/REMOVE Initiators) + elif (state == 'present' and host_details): + modified_flag = self.is_host_modified(host_details) + if modified_flag: + + # Modify host + result['changed'] = self.modify_host(host_details, + new_host_name, + description, + host_os) + if new_host_name: + host_details = self.get_host_details(host_id, + new_host_name) + else: + host_details = self.get_host_details(host_id, host_name) + result['host_details'] = host_details._get_properties() + + # Add Initiators to host + if (initiator_state == 'present-in-host' and initiators + and len(initiators) > 0): + LOG.info("Adding Initiators to Host %s", + host_details.name) + result['changed'], host_details \ + = self.add_initiator_to_host(host_details, initiators) + result['host_details'] = host_details._get_properties() + + else: + LOG.info('Host modification is not applicable, ' + 'as none of the attributes has changed.') + result['changed'] = False + result['host_details'] = host_details._get_properties() + + # Remove initiators from host + if (host_details and initiator_state == 'absent-in-host' + and initiators and len(initiators) > 0): + LOG.info("Removing Initiators from Host %s", + host_details.name) + result['changed'], host_details \ + = self.remove_initiator_from_host(host_details, + initiators) + result['host_details'] = host_details._get_properties() + + """ display WWN/IQN w.r.t. initiators mapped to host, + if host exists """ + if host_details and host_details.fc_host_initiators is not None: + host_details.fc_host_initiators = self.get_fc_host_initiators_details(host_details.fc_host_initiators) + result['host_details'] = host_details._get_properties() + if host_details and host_details.iscsi_host_initiators is not None: + host_details.iscsi_host_initiators = self.get_iscsi_host_initiators_details(host_details.iscsi_host_initiators) + result['host_details'] = host_details._get_properties() + + ''' Get host luns details and network addresses''' + if result['host_details']: + result['host_details']['host_luns'] = self.get_host_lun_list(host_details) + result['host_details']['network_addresses'] = self.get_host_network_address_list(host_details) + if 'host_ip_ports' in result['host_details']: + del result['host_details']['host_ip_ports'] + + # manage network address + if host_details is not None and network_address_state is not None: + self.validate_network_address_params(network_address) + network_address_list, changed = self.manage_network_address( + host_details, + result['host_details']['network_addresses'], + network_address, + network_address_state) + result['host_details']['network_addresses'] = network_address_list + result['changed'] = changed + + # Delete a host + if state == 'absent': + if host_details: + LOG.info("Deleting host %s", host_details.name) + result['changed'] = self.delete_host(host_details) + else: + result['changed'] = False + result['host_details'] = [] + + self.module.exit_json(**result) + + def validate_network_address_params(self, network_address): + if '.' in network_address and not is_valid_ip(network_address): + err_msg = 'Please enter valid IPV4 address for network address' + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if len(network_address) < 1 or len(network_address) > 63: + err_msg = "'network_address' should be in range of 1 to 63 characters." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + if utils.has_special_char(network_address) or ' ' in network_address: + err_msg = 'Please enter valid IPV4 address or host name for network address' + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + +def is_valid_ip(address): + try: + ipaddress.ip_address(address) + return True + except ValueError: + return False + + +def get_host_parameters(): + """This method provides parameters required for the ansible host + module on Unity""" + return dict( + host_name=dict(required=False, type='str'), + host_id=dict(required=False, type='str'), + description=dict(required=False, type='str'), + host_os=dict(required=False, type='str', + choices=['AIX', 'Citrix XenServer', 'HP-UX', + 'IBM VIOS', 'Linux', 'Mac OS', 'Solaris', + 'VMware ESXi', 'Windows Client', + 'Windows Server']), + new_host_name=dict(required=False, type='str'), + initiators=dict(required=False, type='list', elements='str'), + initiator_state=dict(required=False, type='str', + choices=['present-in-host', + 'absent-in-host']), + network_address=dict(required=False, type='str'), + network_address_state=dict(required=False, type='str', + choices=['present-in-host', + 'absent-in-host']), + state=dict(required=True, type='str', + choices=['present', 'absent']) + ) + + +def main(): + """ Create Unity host object and perform action on it + based on user input from playbook""" + obj = Host() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/info.py b/ansible_collections/dellemc/unity/plugins/modules/info.py new file mode 100644 index 00000000..3f48b84c --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/info.py @@ -0,0 +1,1784 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for Gathering information about Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: info + +version_added: '1.1.0' + +short_description: Gathering information about Unity + +description: +- Gathering information about Unity storage system includes + Get the details of Unity array, + Get list of Hosts in Unity array, + Get list of FC initiators in Unity array, + Get list of iSCSI initiators in Unity array, + Get list of Consistency groups in Unity array, + Get list of Storage pools in Unity array, + Get list of Volumes in Unity array, + Get list of Snapshot schedules in Unity array, + Get list of NAS servers in Unity array, + Get list of File systems in Unity array, + Get list of Snapshots in Unity array, + Get list of SMB shares in Unity array, + Get list of NFS exports in Unity array, + Get list of User quotas in Unity array, + Get list of Quota tree in Unity array, + Get list of NFS Servers in Unity array, + Get list of CIFS Servers in Unity array. + Get list of Ethernet ports in Unity array. + Get list of File interfaces used in Unity array. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Rajshree Khare (@kharer5) +- Akash Shendge (@shenda1) +- Meenakshi Dembi (@dembim) + +options: + gather_subset: + description: + - List of string variables to specify the Unity storage system entities + for which information is required. + choices: [host, fc_initiator, iscsi_initiator, cg, storage_pool, vol, + snapshot_schedule, nas_server, file_system, snapshot, nfs_export, + smb_share, user_quota, tree_quota, disk_group, nfs_server, cifs_server, ethernet_port, file_interface] + type: list + elements: str + +notes: + - The I(check_mode) is supported. +''' + +EXAMPLES = r''' + - name: Get detailed list of Unity entities + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - host + - fc_initiator + - iscsi_initiator + - cg + - storage_pool + - vol + - snapshot_schedule + - nas_server + - file_system + - snapshot + - nfs_export + - smb_share + - user_quota + - tree_quota + - disk_group + - nfs_server + - cifs_server + - ethernet_port + - file_interface + + - name: Get information of Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + + - name: Get list of hosts on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - host + + - name: Get list of FC initiators on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - fc_initiator + + - name: Get list of ISCSI initiators on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - iscsi_initiator + + - name: Get list of consistency groups on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - cg + + - name: Get list of storage pools on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - storage_pool + + - name: Get list of volumes on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - vol + + - name: Get list of snapshot schedules on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - snapshot_schedule + + - name: Get list of NAS Servers on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - nas_server + + - name: Get list of File Systems on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - file_system + + - name: Get list of Snapshots on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - snapshot + + - name: Get list of NFS exports on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - nfs_export + + - name: Get list of SMB shares on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - smb_share + + - name: Get list of user quotas on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - user_quota + + - name: Get list of quota trees on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - tree_quota + + - name: Get list of disk groups on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - disk_group + + - name: Get list of NFS Servers on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - nfs_server + + - name: Get list of CIFS Servers on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - cifs_server + + - name: Get list of ethernet ports on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - ethernet_port + + - name: Get list of file interfaces on Unity array + dellemc.unity.info: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + gather_subset: + - file_interface +''' + +RETURN = r''' +Array_Details: + description: Details of the Unity Array. + returned: always + type: dict + contains: + api_version: + description: The current api version of the Unity Array. + type: str + earliest_api_version: + description: The earliest api version of the Unity Array. + type: str + model: + description: The model of the Unity Array. + type: str + name: + description: The name of the Unity Array. + type: str + software_version: + description: The software version of the Unity Array. + type: str + sample: { + "api_version": "12.0", + "earliest_api_version": "4.0", + "existed": true, + "hash": 8766644083532, + "id": "0", + "model": "Unity 480", + "name": "APM00213404195", + "software_version": "5.2.1" + } + +Hosts: + description: Details of the hosts. + returned: When hosts exist. + type: list + contains: + id: + description: The ID of the host. + type: str + name: + description: The name of the host. + type: str + sample: [ + { + "auto_manage_type": "HostManageEnum.UNKNOWN", + "datastores": null, + "description": "", + "existed": true, + "fc_host_initiators": null, + "hash": 8762200072289, + "health": { + "UnityHealth": { + "hash": 8762200072352 + } + }, + "host_container": null, + "host_ip_ports": { + "UnityHostIpPortList": [ + { + "UnityHostIpPort": { + "hash": 8762200072361 + } + } + ] + }, + "host_luns": null, + "host_polled_uuid": null, + "host_pushed_uuid": null, + "host_uuid": null, + "host_v_vol_datastore": null, + "id": "Host_2191", + "iscsi_host_initiators": null, + "last_poll_time": null, + "name": "10.225.2.153", + "os_type": "Linux", + "registration_type": null, + "storage_resources": null, + "tenant": null, + "type": "HostTypeEnum.HOST_MANUAL", + "vms": null + } + ] + +FC_initiators: + description: Details of the FC initiators. + returned: When FC initiator exist. + type: list + contains: + WWN: + description: The WWN of the FC initiator. + type: str + id: + description: The id of the FC initiator. + type: str + sample: [ + { + "WWN": "20:00:00:0E:1E:E9:B8:FC:21:00:00:0E:1E:E9:B8:FC", + "id": "HostInitiator_3" + }, + { + "WWN": "20:00:00:0E:1E:E9:B8:F7:21:00:00:0E:1E:E9:B8:F7", + "id": "HostInitiator_4" + } + ] + +ISCSI_initiators: + description: Details of the ISCSI initiators. + returned: When ISCSI initiators exist. + type: list + contains: + IQN: + description: The IQN of the ISCSI initiator. + type: str + id: + description: The id of the ISCSI initiator. + type: str + sample: [ + { + "IQN": "iqn.1994-05.com.redhat:634d768090f", + "id": "HostInitiator_1" + }, + { + "IQN": "iqn.1994-05.com.redhat:2835ba62cc6d", + "id": "HostInitiator_2" + } + ] + +Consistency_Groups: + description: Details of the Consistency Groups. + returned: When Consistency Groups exist. + type: list + contains: + id: + description: The ID of the Consistency Group. + type: str + name: + description: The name of the Consistency Group. + type: str + sample: [ + { + "advanced_dedup_status": "DedupStatusEnum.DISABLED", + "block_host_access": { + "UnityBlockHostAccessList": [ + { + "UnityBlockHostAccess": { + "hash": 8745385821206 + } + }, + { + "UnityBlockHostAccess": { + "hash": 8745386530115 + } + }, + { + "UnityBlockHostAccess": { + "hash": 8745386530124 + } + } + ] + }, + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "data_reduction_status": "DataReductionStatusEnum.DISABLED", + "datastores": null, + "dedup_status": null, + "description": "CG has created with all parametres.", + "esx_filesystem_block_size": null, + "esx_filesystem_major_version": null, + "existed": true, + "filesystem": null, + "hash": 8745385801328, + "health": { + "UnityHealth": { + "hash": 8745386647098 + } + }, + "host_v_vol_datastore": null, + "id": "res_93", + "is_replication_destination": false, + "is_snap_schedule_paused": false, + "luns": { + "UnityLunList": [ + { + "UnityLun": { + "hash": 8745389830024, + "id": "sv_64" + } + }, + { + "UnityLun": { + "hash": 8745386526751, + "id": "sv_63" + } + } + ] + }, + "metadata_size": 8858370048, + "metadata_size_allocated": 7516192768, + "name": "CG1_Ansible_Test_SS", + "per_tier_size_used": [ + 11811160064, + 0, + 0 + ], + "pools": { + "UnityPoolList": [ + { + "UnityPool": { + "hash": 8745386552375, + "id": "pool_3" + } + } + ] + }, + "relocation_policy": "TieringPolicyEnum.AUTOTIER", + "replication_type": "ReplicationTypeEnum.NONE", + "size_allocated": 99418112, + "size_total": 268435456000, + "size_used": null, + "snap_count": 1, + "snap_schedule": { + "UnitySnapSchedule": { + "hash": 8745386550224, + "id": "snapSch_66" + } + }, + "snaps_size_allocated": 8888320, + "snaps_size_total": 108675072, + "thin_status": "ThinStatusEnum.TRUE", + "type": "StorageResourceTypeEnum.CONSISTENCY_GROUP", + "virtual_volumes": null, + "vmware_uuid": null + }, + ] + +Storage_Pools: + description: Details of the Storage Pools. + returned: When Storage Pools exist. + type: list + contains: + id: + description: The ID of the Storage Pool. + type: str + name: + description: The name of the Storage Pool. + type: str + sample: [ + { + "alert_threshold": 70, + "creation_time": "2021-10-18 12:45:12+00:00", + "description": "", + "existed": true, + "harvest_state": "UsageHarvestStateEnum.PAUSED_COULD_NOT_REACH_HWM", + "hash": 8741501012399, + "health": { + "UnityHealth": { + "hash": 8741501012363 + } + }, + "id": "pool_2", + "is_all_flash": false, + "is_empty": false, + "is_fast_cache_enabled": false, + "is_harvest_enabled": true, + "is_snap_harvest_enabled": false, + "metadata_size_subscribed": 312458870784, + "metadata_size_used": 244544700416, + "name": "fastVP_pool", + "object_id": 12884901891, + "pool_fast_vp": { + "UnityPoolFastVp": { + "hash": 8741501228023 + } + }, + "pool_space_harvest_high_threshold": 95.0, + "pool_space_harvest_low_threshold": 85.0, + "pool_type": "StoragePoolTypeEnum.TRADITIONAL", + "raid_type": "RaidTypeEnum.RAID5", + "rebalance_progress": null, + "size_free": 2709855928320, + "size_subscribed": 2499805044736, + "size_total": 3291018690560, + "size_used": 455513956352, + "snap_size_subscribed": 139720515584, + "snap_size_used": 66002944, + "snap_space_harvest_high_threshold": 25.0, + "snap_space_harvest_low_threshold": 20.0, + "tiers": { + "UnityPoolTierList": [ + { + "UnityPoolTier": { + "hash": 8741500996410 + } + }, + { + "UnityPoolTier": { + "hash": 8741501009430 + } + }, + { + "UnityPoolTier": { + "hash": 8741501009508 + } + } + ] + } + }, + ] + +Volumes: + description: Details of the Volumes. + returned: When Volumes exist. + type: list + contains: + id: + description: The ID of the Volume. + type: str + name: + description: The name of the Volume. + type: str + sample: [ + { + "current_node": "NodeEnum.SPB", + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "default_node": "NodeEnum.SPB", + "description": null, + "effective_io_limit_max_iops": null, + "effective_io_limit_max_kbps": null, + "existed": true, + "family_base_lun": { + "UnityLun": { + "hash": 8774260820794, + "id": "sv_27" + } + }, + "family_clone_count": 0, + "hash": 8774260854260, + "health": { + "UnityHealth": { + "hash": 8774260812499 + } + }, + "host_access": { + "UnityBlockHostAccessList": [ + { + "UnityBlockHostAccess": { + "hash": 8774260826387 + } + } + ] + }, + "id": "sv_27", + "io_limit_policy": null, + "is_advanced_dedup_enabled": false, + "is_compression_enabled": null, + "is_data_reduction_enabled": false, + "is_replication_destination": false, + "is_snap_schedule_paused": false, + "is_thin_clone": false, + "is_thin_enabled": false, + "metadata_size": 4294967296, + "metadata_size_allocated": 4026531840, + "name": "VSI-UNITY-test-task", + "per_tier_size_used": [ + 111400714240, + 0, + 0 + ], + "pool": { + "UnityPool": { + "hash": 8774260811427 + } + }, + "size_allocated": 107374182400, + "size_total": 107374182400, + "size_used": null, + "snap_count": 0, + "snap_schedule": null, + "snap_wwn": "60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97", + "snaps_size": 0, + "snaps_size_allocated": 0, + "storage_resource": { + "UnityStorageResource": { + "hash": 8774267822228 + } + }, + "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH", + "type": "LUNTypeEnum.VMWARE_ISCSI", + "wwn": "60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2" + }, + ] + +Snapshot_Schedules: + description: Details of the Snapshot Schedules. + returned: When Snapshot Schedules exist. + type: list + contains: + id: + description: The ID of the Snapshot Schedule. + type: str + name: + description: The name of the Snapshot Schedule. + type: str + sample: [ + { + "existed": true, + "hash": 8775599492651, + "id": "snapSch_1", + "is_default": true, + "is_modified": null, + "is_sync_replicated": false, + "luns": null, + "modification_time": "2021-08-18 19:10:33.774000+00:00", + "name": "CEM_DEFAULT_SCHEDULE_DEFAULT_PROTECTION", + "rules": { + "UnitySnapScheduleRuleList": [ + { + "UnitySnapScheduleRule": { + "hash": 8775599498593 + } + } + ] + }, + "storage_resources": { + "UnityStorageResourceList": [ + { + "UnityStorageResource": { + "hash": 8775599711597, + "id": "res_88" + } + }, + { + "UnityStorageResource": { + "hash": 8775599711528, + "id": "res_3099" + } + } + ] + }, + "version": "ScheduleVersionEnum.LEGACY" + }, + ] + +NAS_Servers: + description: Details of the NAS Servers. + returned: When NAS Servers exist. + type: list + contains: + id: + description: The ID of the NAS Server. + type: str + name: + description: The name of the NAS Server. + type: str + sample: [ + { + "allow_unmapped_user": null, + "cifs_server": null, + "current_sp": { + "UnityStorageProcessor": { + "hash": 8747629920422, + "id": "spb" + } + }, + "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NONE", + "default_unix_user": null, + "default_windows_user": null, + "existed": true, + "file_dns_server": null, + "file_interface": { + "UnityFileInterfaceList": [ + { + "UnityFileInterface": { + "hash": 8747626606870, + "id": "if_6" + } + } + ] + }, + "filesystems": { + "UnityFileSystemList": [ + { + "UnityFileSystem": { + "hash": 8747625901355, + "id": "fs_6892" + } + }, + ] + }, + "hash": 8747625900370, + "health": { + "UnityHealth": { + "hash": 8747625900493 + } + }, + "home_sp": { + "UnityStorageProcessor": { + "hash": 8747625877420, + "id": "spb" + } + }, + "id": "nas_1", + "is_backup_only": false, + "is_multi_protocol_enabled": false, + "is_packet_reflect_enabled": false, + "is_replication_destination": false, + "is_replication_enabled": false, + "is_windows_to_unix_username_mapping_enabled": null, + "name": "lglad072", + "pool": { + "UnityPool": { + "hash": 8747629920479, + "id": "pool_3" + } + }, + "preferred_interface_settings": { + "UnityPreferredInterfaceSettings": { + "hash": 8747626625166, + "id": "preferred_if_1" + } + }, + "replication_type": "ReplicationTypeEnum.NONE", + "size_allocated": 2952790016, + "tenant": null, + "virus_checker": { + "UnityVirusChecker": { + "hash": 8747626604144, + "id": "cava_1" + } + } + }, + ] + +File_Systems: + description: Details of the File Systems. + returned: When File Systems exist. + type: list + contains: + id: + description: The ID of the File System. + type: str + name: + description: The name of the File System. + type: str + sample: [ + { + "access_policy": "AccessPolicyEnum.UNIX", + "cifs_notify_on_change_dir_depth": 512, + "cifs_share": null, + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "description": "", + "existed": true, + "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN", + "format": "FSFormatEnum.UFS64", + "hash": 8786518053735, + "health": { + "UnityHealth": { + "hash": 8786518049091 + } + }, + "host_io_size": "HostIOSizeEnum.GENERAL_8K", + "id": "fs_12", + "is_advanced_dedup_enabled": false, + "is_cifs_notify_on_access_enabled": false, + "is_cifs_notify_on_write_enabled": false, + "is_cifs_op_locks_enabled": true, + "is_cifs_sync_writes_enabled": false, + "is_data_reduction_enabled": false, + "is_read_only": false, + "is_smbca": false, + "is_thin_enabled": true, + "locking_policy": "FSLockingPolicyEnum.MANDATORY", + "metadata_size": 4294967296, + "metadata_size_allocated": 3758096384, + "min_size_allocated": 0, + "name": "vro-daniel-test", + "nas_server": { + "UnityNasServer": { + "hash": 8786517296113, + "id": "nas_1" + } + }, + "nfs_share": null, + "per_tier_size_used": [ + 6442450944, + 0, + 0 + ], + "pool": { + "UnityPool": { + "hash": 8786518259493, + "id": "pool_3" + } + }, + "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES", + "size_allocated": 283148288, + "size_allocated_total": 4041244672, + "size_preallocated": 2401206272, + "size_total": 107374182400, + "size_used": 1620312064, + "snap_count": 0, + "snaps_size": 0, + "snaps_size_allocated": 0, + "storage_resource": { + "UnityStorageResource": { + "hash": 8786518044167, + "id": "res_20" + } + }, + "supported_protocols": "FSSupportedProtocolEnum.NFS", + "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH", + "type": "FilesystemTypeEnum.FILESYSTEM" + }, + ] + +Snapshots: + description: Details of the Snapshots. + returned: When Snapshots exist. + type: list + contains: + id: + description: The ID of the Snapshot. + type: str + name: + description: The name of the Snapshot. + type: str + sample: [ + { + "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT", + "attached_wwn": null, + "creation_time": "2022-04-06 11:19:26.818000+00:00", + "creator_schedule": null, + "creator_type": "SnapCreatorTypeEnum.REP_V2", + "creator_user": null, + "description": "", + "existed": true, + "expiration_time": null, + "hash": 8739100256648, + "host_access": null, + "id": "38654716464", + "io_limit_policy": null, + "is_auto_delete": false, + "is_modifiable": false, + "is_modified": false, + "is_read_only": true, + "is_system_snap": true, + "last_writable_time": null, + "lun": { + "UnityLun": { + "hash": 8739100148962, + "id": "sv_301" + } + }, + "name": "42949677504_APM00213404195_0000.ckpt000_9508038064690266.2_238", + "parent_snap": null, + "size": 3221225472, + "snap_group": null, + "state": "SnapStateEnum.READY", + "storage_resource": { + "UnityStorageResource": { + "hash": 8739100173002, + "id": "sv_301" + } + } + }, + ] + +NFS_Exports: + description: Details of the NFS Exports. + returned: When NFS Exports exist. + type: list + contains: + id: + description: The ID of the NFS Export. + type: str + name: + description: The name of the NFS Export. + type: str + sample: [ + { + "anonymous_gid": 4294967294, + "anonymous_uid": 4294967294, + "creation_time": "2021-12-01 06:21:48.381000+00:00", + "default_access": "NFSShareDefaultAccessEnum.NO_ACCESS", + "description": "", + "existed": true, + "export_option": 1, + "export_paths": [ + "10.230.24.20:/zack_nfs_01" + ], + "filesystem": { + "UnityFileSystem": { + "hash": 8747298565566, + "id": "fs_67" + } + }, + "hash": 8747298565548, + "host_accesses": null, + "id": "NFSShare_29", + "is_read_only": null, + "min_security": "NFSShareSecurityEnum.SYS", + "modification_time": "2022-04-01 11:44:17.553000+00:00", + "name": "zack_nfs_01", + "nfs_owner_username": null, + "no_access_hosts": null, + "no_access_hosts_string": "10.226.198.207,10.226.198.25,10.226.198.44,10.226.198.85,Host1, +Host2,Host4,Host5,Host6,10.10.0.0/255.255.240.0", + "path": "/", + "read_only_hosts": null, + "read_only_hosts_string": "", + "read_only_root_access_hosts": null, + "read_only_root_hosts_string": "", + "read_write_hosts": null, + "read_write_hosts_string": "", + "read_write_root_hosts_string": "", + "role": "NFSShareRoleEnum.PRODUCTION", + "root_access_hosts": null, + "snap": null, + "type": "NFSTypeEnum.NFS_SHARE" + }, + ] + +SMB_Shares: + description: Details of the SMB Shares. + returned: When SMB Shares exist. + type: list + contains: + id: + description: The ID of the SMB Share. + type: str + name: + description: The name of the SMB Share. + type: str + sample: [ + { + "creation_time": "2022-03-17 11:56:54.867000+00:00", + "description": "", + "existed": true, + "export_paths": [ + "\\\\multi-prot-pie.extreme1.com\\multi-prot-hui", + "\\\\10.230.24.26\\multi-prot-hui" + ], + "filesystem": { + "UnityFileSystem": { + "hash": 8741295638110, + "id": "fs_140" + } + }, + "hash": 8741295638227, + "id": "SMBShare_20", + "is_abe_enabled": false, + "is_ace_enabled": false, + "is_branch_cache_enabled": false, + "is_continuous_availability_enabled": false, + "is_dfs_enabled": false, + "is_encryption_enabled": false, + "is_read_only": null, + "modified_time": "2022-03-17 11:56:54.867000+00:00", + "name": "multi-prot-hui", + "offline_availability": "CifsShareOfflineAvailabilityEnum.NONE", + "path": "/", + "snap": null, + "type": "CIFSTypeEnum.CIFS_SHARE", + "umask": "022" + }, + ] + +User_Quotas: + description: Details of the user quotas. + returned: When user quotas exist. + type: list + contains: + id: + description: The ID of the user quota. + type: str + uid: + description: The UID of the user quota. + type: str + sample: [ + { + "id": "userquota_171798694698_0_60000", + "uid": 60000 + }, + { + "id": "userquota_171798694939_0_5001", + "uid": 5001 + } + ] + +Tree_Quotas: + description: Details of the quota trees. + returned: When quota trees exist. + type: list + contains: + id: + description: The ID of the quota tree. + type: str + path: + description: The path of the quota tree. + type: str + sample: [ + { + "id": "treequota_171798709589_1", + "path": "/vro-ui-fs-rkKfimmN" + }, + { + "id": "treequota_171798709590_1", + "path": "/vro-ui-fs-mGYXAMqk" + } + ] + +Disk_Groups: + description: Details of the disk groups. + returned: When disk groups exist. + type: list + contains: + id: + description: The ID of the disk group. + type: str + name: + description: The name of the disk group. + type: str + tier_type: + description: The tier type of the disk group. + type: str + sample: [ + { + "id": "dg_3", + "name": "400 GB SAS Flash 2", + "tier_type": "EXTREME_PERFORMANCE" + }, + { + "id": "dg_16", + "name": "600 GB SAS 10K", + "tier_type": "PERFORMANCE" + } + ] + +NFS_Servers: + description: Details of the NFS Servers. + returned: When NFS Servers exist. + type: list + contains: + id: + description: The ID of the NFS Servers. + type: str + sample: [ + { + "id": "nfs_3", + }, + { + "id": "nfs_4", + }, + { + "id": "nfs_9", + } + ] +CIFS_Servers: + description: Details of the CIFS Servers. + returned: When CIFS Servers exist. + type: list + contains: + id: + description: The ID of the CIFS Servers. + type: str + name: + description: The name of the CIFS server. + type: str + sample: [ + { + "id": "cifs_3", + "name": "test_cifs_1" + }, + { + "id": "cifs_4", + "name": "test_cifs_2" + }, + { + "id": "cifs_9", + "name": "test_cifs_3" + } + ] +Ethernet_ports: + description: Details of the ethernet ports. + returned: When ethernet ports exist. + type: list + contains: + id: + description: The ID of the ethernet port. + type: str + name: + description: The name of the ethernet port. + type: str + sample: [ + { + "id": "spa_mgmt", + "name": "SP A Management Port" + }, + { + "id": "spa_ocp_0_eth0", + "name": "SP A 4-Port Card Ethernet Port 0" + }, + { + "id": "spa_ocp_0_eth1", + "name": "SP A 4-Port Card Ethernet Port 1" + } + ] +File_interfaces: + description: Details of the file inetrfaces. + returned: When file inetrface exist. + type: list + contains: + id: + description: The ID of the file inetrface. + type: str + name: + description: The name of the file inetrface. + type: str + ip_address: + description: IP address of the file inetrface. + type: str + sample: [ + { + "id": "if_3", + "ip_address": "xx.xx.xx.xx", + "name": "1_APMXXXXXXXXXX" + }, + { + "id": "if_3", + "ip_address": "xx.xx.xx.xx", + "name": "2_APMXXXXXXXXXX" + }, + { + "id": "if_3", + "ip_address": "xx.xx.xx.xx", + "name": "3_APMXXXXXXXXXX" + } + ] +''' + +from re import sub +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('info') +SUCCESSFULL_LISTED_MSG = 'Successfully listed.' + +application_type = "Ansible/1.5.0" + + +class Info(object): + """Class with Info operations""" + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_info_parameters()) + + """ initialize the ansible module """ + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=True) + utils.ensure_required_libs(self.module) + + self.unity = utils.get_unity_unisphere_connection(self.module.params, + application_type) + LOG.info('Got the unity instance for provisioning on Unity') + + def get_array_details(self): + """ Get the list of snapshot schedules on a given Unity storage + system """ + + try: + LOG.info('Getting array details ') + array_details = self.unity.info + return array_details._get_properties() + + except utils.HttpError as e: + if e.http_status == 401: + msg = 'Incorrect username or password provided.' + LOG.error(msg) + self.module.fail_json(msg=msg) + else: + msg = str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + except Exception as e: + msg = 'Get array details from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_hosts_list(self): + """ Get the list of hosts on a given Unity storage system """ + + try: + LOG.info('Getting hosts list ') + hosts = self.unity.get_host() + return result_list(hosts) + + except Exception as e: + msg = 'Get hosts list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_fc_initiators_list(self): + """ Get the list of FC Initiators on a given Unity storage system """ + + try: + LOG.info('Getting FC initiators list ') + fc_initiator = utils.host.UnityHostInitiatorList \ + .get(cli=self.unity._cli, type=utils.HostInitiatorTypeEnum.FC) + return fc_initiators_result_list(fc_initiator) + + except Exception as e: + msg = 'Get FC initiators list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_iscsi_initiators_list(self): + """ Get the list of ISCSI initiators on a given Unity storage + system """ + + try: + LOG.info('Getting ISCSI initiators list ') + iscsi_initiator = utils.host.UnityHostInitiatorList \ + .get(cli=self.unity._cli, type=utils.HostInitiatorTypeEnum. + ISCSI) + return iscsi_initiators_result_list(iscsi_initiator) + + except Exception as e: + msg = 'Get ISCSI initiators list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_consistency_groups_list(self): + """ Get the list of consistency groups on a given Unity storage + system """ + + try: + LOG.info('Getting consistency groups list ') + consistency_groups = utils.cg.UnityConsistencyGroupList \ + .get(self.unity._cli) + return result_list(consistency_groups) + + except Exception as e: + msg = 'Get consistency groups list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_storage_pools_list(self): + """ Get the list of storage pools on a given Unity storage + system """ + + try: + LOG.info('Getting storage pools list ') + storage_pools = self.unity.get_pool() + return result_list(storage_pools) + + except Exception as e: + msg = 'Get storage pools list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_volumes_list(self): + """ Get the list of volumes on a given Unity storage + system """ + + try: + LOG.info('Getting volumes list ') + volumes = self.unity.get_lun() + return result_list(volumes) + + except Exception as e: + msg = 'Get volumes list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_snapshot_schedules_list(self): + """ Get the list of snapshot schedules on a given Unity storage + system """ + + try: + LOG.info('Getting snapshot schedules list ') + snapshot_schedules = utils.snap_schedule.UnitySnapScheduleList \ + .get(cli=self.unity._cli) + return result_list(snapshot_schedules) + + except Exception as e: + msg = 'Get snapshot schedules list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_nas_servers_list(self): + """Get the list of NAS servers on a given Unity storage system""" + + try: + LOG.info("Getting NAS servers list") + nas_servers = self.unity.get_nas_server() + return result_list(nas_servers) + + except Exception as e: + msg = 'Get NAS servers list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_file_systems_list(self): + """Get the list of file systems on a given Unity storage system""" + + try: + LOG.info("Getting file systems list") + file_systems = self.unity.get_filesystem() + return result_list(file_systems) + + except Exception as e: + msg = 'Get file systems list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_snapshots_list(self): + """Get the list of snapshots on a given Unity storage system""" + + try: + LOG.info("Getting snapshots list") + snapshots = self.unity.get_snap() + return result_list(snapshots) + + except Exception as e: + msg = 'Get snapshots from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_nfs_exports_list(self): + """Get the list of NFS exports on a given Unity storage system""" + + try: + LOG.info("Getting NFS exports list") + nfs_exports = self.unity.get_nfs_share() + return result_list(nfs_exports) + + except Exception as e: + msg = 'Get NFS exports from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_smb_shares_list(self): + """Get the list of SMB shares on a given Unity storage system""" + + try: + LOG.info("Getting SMB shares list") + smb_shares = self.unity.get_cifs_share() + return result_list(smb_shares) + + except Exception as e: + msg = 'Get SMB shares from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_user_quota_list(self): + """Get the list of user quotas on a given Unity storage system""" + + try: + LOG.info("Getting user quota list") + user_quotas = self.unity.get_user_quota() + return user_quota_result_list(user_quotas) + + except Exception as e: + msg = 'Get user quotas from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_tree_quota_list(self): + """Get the list of quota trees on a given Unity storage system""" + + try: + LOG.info("Getting quota tree list") + tree_quotas = self.unity.get_tree_quota() + return tree_quota_result_list(tree_quotas) + + except Exception as e: + msg = 'Get quota trees from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_disk_groups_list(self): + """Get the list of disk group details on a given Unity storage system""" + + try: + LOG.info("Getting disk group list") + pool_disk_list = [] + disk_instances = utils.UnityDiskGroupList(cli=self.unity._cli) + if disk_instances: + for disk in disk_instances: + pool_disk = {"id": disk.id, "name": disk.name, + "tier_type": disk.tier_type.name} + pool_disk_list.append(pool_disk) + return pool_disk_list + except Exception as e: + msg = 'Get disk group from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_nfs_server_list(self): + """Get the list of NFS servers on a given Unity storage system""" + + try: + LOG.info("Getting NFS servers list") + nfs_servers = self.unity.get_nfs_server() + return nfs_server_result_list(nfs_servers) + + except Exception as e: + msg = 'Get NFS servers list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_cifs_server_list(self): + """Get the list of CIFS servers on a given Unity storage system""" + + try: + LOG.info("Getting CIFS servers list") + cifs_servers = self.unity.get_cifs_server() + return result_list(cifs_servers) + + except Exception as e: + msg = 'Get CIFS servers list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_ethernet_port_list(self): + """Get the list of ethernet ports on a given Unity storage system""" + + try: + LOG.info("Getting ethernet ports list") + ethernet_port = self.unity.get_ethernet_port() + return result_list(ethernet_port) + + except Exception as e: + msg = 'Get ethernet port list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_file_interface_list(self): + """Get the list of file interfaces on a given Unity storage system""" + + try: + LOG.info("Getting file interfaces list") + file_interface = self.unity.get_file_interface() + return file_interface_result_list(file_interface) + + except Exception as e: + msg = 'Get file interface list from unity array failed with' \ + ' error %s' % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def perform_module_operation(self): + """ Perform different actions on Info based on user parameter + chosen in playbook """ + + """ Get the array details a given Unity storage system """ + + array_details = self.get_array_details() + host = [] + fc_initiator = [] + iscsi_initiator = [] + cg = [] + storage_pool = [] + vol = [] + snapshot_schedule = [] + nas_server = [] + file_system = [] + snapshot = [] + nfs_export = [] + smb_share = [] + user_quota = [] + tree_quota = [] + disk_group = [] + nfs_server = [] + cifs_server = [] + ethernet_port = [] + file_interface = [] + + subset = self.module.params['gather_subset'] + if subset is not None: + if 'host' in subset: + host = self.get_hosts_list() + if 'fc_initiator' in subset: + fc_initiator = self.get_fc_initiators_list() + if 'iscsi_initiator' in subset: + iscsi_initiator = self.get_iscsi_initiators_list() + if 'cg' in subset: + cg = self.get_consistency_groups_list() + if 'storage_pool' in subset: + storage_pool = self.get_storage_pools_list() + if 'vol' in subset: + vol = self.get_volumes_list() + if 'snapshot_schedule' in subset: + snapshot_schedule = self.get_snapshot_schedules_list() + if 'nas_server' in subset: + nas_server = self.get_nas_servers_list() + if 'file_system' in subset: + file_system = self.get_file_systems_list() + if 'snapshot' in subset: + snapshot = self.get_snapshots_list() + if 'nfs_export' in subset: + nfs_export = self.get_nfs_exports_list() + if 'smb_share' in subset: + smb_share = self.get_smb_shares_list() + if 'user_quota' in subset: + user_quota = self.get_user_quota_list() + if 'tree_quota' in subset: + tree_quota = self.get_tree_quota_list() + if 'disk_group' in subset: + disk_group = self.get_disk_groups_list() + if 'nfs_server' in subset: + nfs_server = self.get_nfs_server_list() + if 'cifs_server' in subset: + cifs_server = self.get_cifs_server_list() + if 'ethernet_port' in subset: + ethernet_port = self.get_ethernet_port_list() + if 'file_interface' in subset: + file_interface = self.get_file_interface_list() + + self.module.exit_json( + Array_Details=array_details, + Hosts=host, + FC_initiators=fc_initiator, + ISCSI_initiators=iscsi_initiator, + Consistency_Groups=cg, + Storage_Pools=storage_pool, + Volumes=vol, + Snapshot_Schedules=snapshot_schedule, + NAS_Servers=nas_server, + File_Systems=file_system, + Snapshots=snapshot, + NFS_Exports=nfs_export, + SMB_Shares=smb_share, + User_Quotas=user_quota, + Tree_Quotas=tree_quota, + Disk_Groups=disk_group, + NFS_Servers=nfs_server, + CIFS_Servers=cifs_server, + Ethernet_ports=ethernet_port, + File_interfaces=file_interface + ) + + +def result_list(entity): + """ Get the name and id associated with the Unity entities """ + result = [] + + if entity: + LOG.info(SUCCESSFULL_LISTED_MSG) + for item in entity: + result.append( + item._get_properties() + ) + return result + else: + return None + + +def fc_initiators_result_list(entity): + """ Get the WWN and id associated with the Unity FC initiators """ + result = [] + + if entity: + LOG.info(SUCCESSFULL_LISTED_MSG) + for item in entity: + result.append( + { + "WWN": item.initiator_id, + "id": item.id + } + ) + return result + else: + return None + + +def iscsi_initiators_result_list(entity): + """ Get the IQN and id associated with the Unity ISCSI initiators """ + result = [] + + if entity: + LOG.info(SUCCESSFULL_LISTED_MSG) + for item in entity: + result.append( + { + "IQN": item.initiator_id, + "id": item.id + } + ) + return result + else: + return None + + +def user_quota_result_list(entity): + """ Get the id and uid associated with the Unity user quotas """ + result = [] + + if entity: + LOG.info(SUCCESSFULL_LISTED_MSG) + for item in entity: + result.append( + { + "uid": item.uid, + "id": item.id + } + ) + return result + else: + return None + + +def tree_quota_result_list(entity): + """ Get the id and path associated with the Unity quota trees """ + result = [] + + if entity: + LOG.info(SUCCESSFULL_LISTED_MSG) + for item in entity: + result.append( + { + "path": item.path, + "id": item.id + } + ) + return result + else: + return None + + +def nfs_server_result_list(entity): + """ Get the id of NFS Server """ + result = [] + + if entity: + LOG.info(SUCCESSFULL_LISTED_MSG) + for item in entity: + result.append( + item._get_properties() + ) + return result + else: + return None + + +def file_interface_result_list(entity): + """ Get the id, name and IP of File Interfaces """ + result = [] + + if entity: + LOG.info(SUCCESSFULL_LISTED_MSG) + for item in entity: + result.append( + item._get_properties() + ) + return result + else: + return None + + +def get_info_parameters(): + """This method provides parameters required for the ansible + info module on Unity""" + return dict(gather_subset=dict(type='list', required=False, + elements='str', + choices=['host', 'fc_initiator', + 'iscsi_initiator', 'cg', + 'storage_pool', 'vol', + 'snapshot_schedule', 'nas_server', + 'file_system', 'snapshot', + 'nfs_export', 'smb_share', + 'user_quota', 'tree_quota', 'disk_group', 'nfs_server', 'cifs_server', + 'ethernet_port', 'file_interface'])) + + +def main(): + """ Create Unity Info object and perform action on it + based on user input from playbook""" + obj = Info() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/interface.py b/ansible_collections/dellemc/unity/plugins/modules/interface.py new file mode 100644 index 00000000..47707baa --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/interface.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing Interfaces on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +module: interface +version_added: '1.4.0' +short_description: Manage Interfaces on Unity storage system +description: +- Managing the Interfaces on the Unity storage system includes adding Interfaces to NAS Server, getting + details of interface and deleting configured interfaces. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Meenakshi Dembi (@dembim) + +options: + nas_server_name: + description: + - Name of the NAS server for which interface will be configured. + type: str + nas_server_id: + description: + - ID of the NAS server for which interface will be configured. + type: str + ethernet_port_name: + description: + - Name of the ethernet port. + type: str + ethernet_port_id: + description: + - ID of the ethernet port. + type: str + role: + description: + - Indicates whether interface is configured as production or backup. + choices: [PRODUCTION, BACKUP] + type: str + interface_ip: + description: + - IP of network interface. + required: true + type: str + netmask: + description: + - Netmask of network interface. + type: str + prefix_length: + description: + - Prefix length is mutually exclusive with I(netmask). + type: int + gateway: + description: + - Gateway of network interface. + type: str + vlan_id: + description: + - Vlan id of the interface. + type: int + state: + description: + - Define whether the interface should exist or not. + choices: [present, absent] + required: true + type: str +notes: +- The I(check_mode) is supported. +- Modify operation for interface is not supported. +''' + +EXAMPLES = r''' + + - name: Add Interface as Backup to NAS Server + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + ethernet_port_name: "SP A 4-Port Card Ethernet Port 0" + role: "BACKUP" + interface_ip: "xx.xx.xx.xx" + netmask: "xx.xx.xx.xx" + gateway: "xx.xx.xx.xx" + vlan_id: 324 + state: "present" + + - name: Add Interface as Production to NAS Server + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + ethernet_port_name: "SP A 4-Port Card Ethernet Port 0" + role: "PRODUCTION" + interface_ip: "xx.xx.xx.xx" + netmask: "xx.xx.xx.xx" + gateway: "xx.xx.xx.xx" + vlan_id: 324 + state: "present" + + - name: Get interface details + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + interface_ip: "xx.xx.xx.xx" + state: "present" + + - name: Delete Interface + dellemc.unity.interface: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + interface_ip: "xx.xx.xx.xx" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: true +interface_details: + description: Details of the interface. + returned: When interface is configured for NAS Server. + type: dict + contains: + existed: + description: Indicates if interface exists. + type: bool + gateway: + description: Gateway of network interface. + type: str + id: + description: Unique identifier interface. + type: str + ip_address: + description: IP address of interface. + type: str + ip_port: + description: Port on which network interface is configured. + type: dict + contains: + id: + description: ID of ip_port. + type: str + ip_protocol_version: + description: IP protocol version. + type: str + is_disabled: + description: Indicates whether interface is disabled. + type: bool + is_preferred: + description: Indicates whether interface is preferred. + type: bool + mac_address: + description: Mac address of ip_port. + type: bool + name: + description: System configured name of interface. + type: bool + nas_server: + description: Details of NAS server where interface is configured. + type: dict + contains: + id: + description: ID of NAS Server. + type: str + sample: { + "existed": true, + "gateway": "xx.xx.xx.xx", + "hash": 8785300560421, + "health": { + "UnityHealth": { + "hash": 8785300565468 + } + }, + "id": "if_69", + "ip_address": "10.10.10.10", + "ip_port": { + "UnityIpPort": { + "hash": 8785300565300, + "id": "spb_ocp_0_eth0" + } + }, + "ip_protocol_version": "IpProtocolVersionEnum.IPv4", + "is_disabled": false, + "is_preferred": true, + "mac_address": "0C:48:C6:9F:57:BF", + "name": "36_APM00213404194", + "nas_server": { + "UnityNasServer": { + "hash": 8785300565417, + "id": "nas_10" + } + }, + "netmask": "10.10.10.10", + "replication_policy": null, + "role": "FileInterfaceRoleEnum.PRODUCTION", + "source_parameters": null, + "v6_prefix_length": null, + "vlan_id": 324 + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils +import ipaddress +from ipaddress import ip_network + +LOG = utils.get_logger('interface') + +application_type = "Ansible/1.5.0" + + +class Interface(object): + """Class with Interface operations""" + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_interface_parameters()) + + mutually_exclusive = [['nas_server_name', 'nas_server_id'], ['ethernet_port_id', 'ethernet_port_name'], ['netmask', 'prefix_length']] + required_one_of = [['nas_server_name', 'nas_server_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of + ) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + LOG.info('Check Mode Flag %s', self.module.check_mode) + + def get_interface_details(self, nas_server_obj): + """Get interface details. + :param: nas_server_obj: NAS server object. + :return: Returns interface details configured on NAS server. + """ + + try: + nas_server_obj_properties = nas_server_obj._get_properties() + if nas_server_obj_properties['file_interface']: + for item in nas_server_obj_properties['file_interface']['UnityFileInterfaceList']: + interface_id = self.unity_conn.get_file_interface(_id=item['UnityFileInterface']['id']) + if interface_id.ip_address == self.module.params['interface_ip']: + return interface_id + return None + except Exception as e: + error_msg = "Getting Interface details failed" \ + " with error %s" % (str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_nas_server_obj(self, nas_server_name, nas_server_id): + """Get NAS server ID. + :param: nas_server_name: The name of NAS server + :param: nas_server_id: ID of NAS server + :return: Return NAS server object if exists + """ + + LOG.info("Getting NAS server object") + try: + if nas_server_name: + obj_nas = self.unity_conn.get_nas_server(name=nas_server_name) + return obj_nas + elif nas_server_id: + obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id) + if obj_nas._get_properties()['existed']: + return obj_nas + else: + msg = "NAS server with id %s does not exist" % (nas_server_id) + LOG.error(msg) + self.module.fail_json(msg=msg) + except Exception as e: + msg = "Failed to get details of NAS server with error: %s" % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def add_interface(self, nas_server_obj, ethernet_port_id=None, ethernet_port_name=None, role=None, interface_ip=None, + netmask=None, prefix_length=None, gateway=None, vlan_id=None): + """Adding interface to NAS server. + :param: nas_server_obj: The NAS server object. + :param: ethernet_port_id: ID of ethernet port. + :param: ethernet_port_name: Name of ethernet port. + :param: role: Role of the interface. + :param: interface_ip: IP of interface. + :param: netmask: Netmask for interface. + :param: prefix_length: Prefix length. + :param: gateway: Gateway for interface. + :param: vlan_id: vlan_id for interface. + :return: Return True if interface is configured successfully. + """ + + LOG.info("Adding interface to NAS Server") + try: + nas_server_obj_properties = nas_server_obj._get_properties() + if nas_server_obj_properties['file_interface']: + for item in nas_server_obj_properties['file_interface']['UnityFileInterfaceList']: + interface_id = self.unity_conn.get_file_interface(_id=item['UnityFileInterface']['id']) + if interface_id._get_properties()['ip_address'] == self.module.params['interface_ip']: + return False + if role: + role_value = get_role_enum(role) + if ethernet_port_name: + ethernet_port_info = self.unity_conn.get_ethernet_port(name=ethernet_port_name) + ethernet_port_id = ethernet_port_info.id + if not self.module.check_mode: + utils.UnityFileInterface.create(cli=self.unity_conn._cli, nas_server=nas_server_obj.get_id(), ip_port=ethernet_port_id, + role=role_value, ip=interface_ip, netmask=netmask, v6_prefix_length=prefix_length, + gateway=gateway, vlan_id=vlan_id) + return True + except Exception as e: + msg = "Failed to add interface to NAS Server with error: %s" % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def is_modification_required(self, interface_details): + """Check if modification is required in existing interface/s configured for NAS Server + :param: interface_details: Existing interface details + :return: True if modification is required + """ + key_list = ['vlan_id', 'gateway', 'netmask'] + for item in key_list: + if self.module.params[item] and self.module.params[item] != interface_details[item]: + return True + return False + + def delete_interface(self, interface_obj): + """Delete NFS server. + :param: interface_obj: Interface object. + :return: Return True if interface is deleted. + """ + + LOG.info("Deleting interface") + try: + if not self.module.check_mode: + interface_obj.delete() + return True + except Exception as e: + msg = "Failed to delete interface with error: %s" % (str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_input_params(self): + """Validates input parameters""" + param_list = ["nas_server_id", "nas_server_name", "ethernet_port_name", "ethernet_port_id", "role", "interface_ip", + "netmask", "gateway"] + + for param in param_list: + msg = "Please provide valid value for: %s" % param + if self.module.params[param] is not None and len(self.module.params[param].strip()) == 0: + errmsg = msg.format(param) + self.module.fail_json(msg=errmsg) + + if self.module.params['vlan_id'] is not None: + if self.module.params['vlan_id'] <= 3 or self.module.params['vlan_id'] >= 4094: + self.module.fail_json(msg='vlan_id should be in the range of 3 to 4094') + + if self.module.params['interface_ip'] and \ + not is_valid_ip(self.module.params['interface_ip']): + self.module.fail_json(msg='The value for interface ip is invalid') + + if self.module.params['gateway'] and \ + not is_valid_ip(self.module.params['gateway']): + self.module.fail_json(msg='The value for gateway is invalid') + + if self.module.params['netmask'] and not \ + utils.is_valid_netmask(self.module.params['netmask']): + self.module.fail_json(msg='Invalid IPV4 address specified for netmask') + + if self.module.params['interface_ip'] and (get_ip_version(self.module.params['interface_ip']) == 6): + self.module.fail_json(msg='IPv6 format is not supported') + + def validate_create_params(self): + """Validates input parameters for adding interface""" + if self.module.params['role'] is None: + self.module.fail_json(msg='Role is a mandatory parameter for adding interface to NAS Server.') + if self.module.params['ethernet_port_name'] is None and self.module.params['ethernet_port_id'] is None: + self.module.fail_json(msg='ethernet_port_name/ethernet_port_id is mandatory parameter for adding interface to NAS Server.') + + def perform_module_operation(self): + """ + Perform different actions on Interface module based on parameters + passed in the playbook + """ + nas_server_id = self.module.params['nas_server_id'] + nas_server_name = self.module.params['nas_server_name'] + ethernet_port_name = self.module.params['ethernet_port_name'] + ethernet_port_id = self.module.params['ethernet_port_id'] + role = self.module.params['role'] + interface_ip = self.module.params['interface_ip'] + netmask = self.module.params['netmask'] + prefix_length = self.module.params['prefix_length'] + gateway = self.module.params['gateway'] + vlan_id = self.module.params['vlan_id'] + state = self.module.params['state'] + + # result is a dictionary that contains changed status and Interface details + result = dict( + changed=False, + interface_details={} + ) + modify_flag = False + + self.validate_input_params() + + interface_details = None + + nas_server_obj = self.get_nas_server_obj(nas_server_name, nas_server_id) + + interface_obj = self.get_interface_details(nas_server_obj) + + if interface_obj and state == 'present': + interface_details = interface_obj._get_properties() + modify_flag = self.is_modification_required(interface_details) + if modify_flag: + self.module.fail_json(msg="Modification of Interfaces for NAS server is not supported through Ansible module") + + if not interface_obj and state == 'present': + self.validate_create_params() + + result['changed'] = self.add_interface(nas_server_obj, ethernet_port_id, ethernet_port_name, role, + interface_ip, netmask, prefix_length, gateway, vlan_id) + + if interface_obj and state == 'absent': + result['changed'] = self.delete_interface(interface_obj) + + if result['changed']: + nas_server_obj = self.get_nas_server_obj(nas_server_name, nas_server_id) + interface_obj = self.get_interface_details(nas_server_obj) + if interface_obj: + interface_details = interface_obj._get_properties() + + result['interface_details'] = interface_details + + self.module.exit_json(**result) + + +def get_interface_parameters(): + """This method provide parameters required for the ansible + Interface module on Unity""" + return dict( + nas_server_id=dict(type='str'), + nas_server_name=dict(type='str'), + ethernet_port_name=dict(type='str'), + ethernet_port_id=dict(type='str'), + role=dict(type='str', choices=['PRODUCTION', 'BACKUP']), + interface_ip=dict(required=True, type='str'), + netmask=dict(type='str'), + prefix_length=dict(type='int'), + gateway=dict(type='str'), + vlan_id=dict(type='int'), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def get_role_enum(role): + """Getting correct enum values for role + :param: role: Indicates role of interface. + :return: enum value for role. + """ + if utils.FileInterfaceRoleEnum[role]: + role = utils.FileInterfaceRoleEnum[role] + return role + + +def is_valid_ip(address): + """Validating IP address format + :param: address: IP address to be validated for format. + """ + try: + ipaddress.ip_address(address) + return True + except ValueError: + return False + + +def get_ip_version(val): + """Returns IP address version + :param: val: IP address to be validated for version. + """ + try: + val = u'{0}'.format(val) + ip = ip_network(val, strict=False) + return ip.version + except ValueError: + return 0 + + +def main(): + """Create Unity Interface object and perform action on it + based on user input from playbook""" + obj = Interface() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/nasserver.py b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py new file mode 100644 index 00000000..4bd7c619 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py @@ -0,0 +1,1151 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: nasserver +version_added: '1.1.0' +short_description: Manage NAS servers on Unity storage system +extends_documentation_fragment: +- dellemc.unity.unity +author: +- P Srinivas Rao (@srinivas-rao5) +description: +- Managing NAS servers on Unity storage system includes get, + modification to the NAS servers. +options: + nas_server_id: + description: + - The ID of the NAS server. + - Either I(nas_server_name) or I(nas_server_id) is required to perform the task. + - The parameters I(nas_server_name) and I(nas_server_id) are mutually exclusive. + type: str + nas_server_name: + description: + - The Name of the NAS server. + - Either I(nas_server_name) or I(nas_server_id) is required to perform the task. + - The parameters I(nas_server_name) and I(nas_server_id) are mutually exclusive. + type: str + nas_server_new_name: + description: + - The new name of the NAS server. + - It can be mentioned during modification of the NAS server. + type: str + is_replication_destination: + description: + - It specifies whether the NAS server is a replication destination. + - It can be mentioned during modification of the NAS server. + type: bool + is_backup_only: + description: + - It specifies whether the NAS server is used as backup only. + - It can be mentioned during modification of the NAS server. + type: bool + is_multiprotocol_enabled: + description: + - This parameter indicates whether multiprotocol sharing mode is enabled. + - It can be mentioned during modification of the NAS server. + type: bool + allow_unmapped_user: + description: + - This flag is used to mandatorily disable access in case of any user + mapping failure. + - If C(true), then enable access in case of any user mapping failure. + - If C(false), then disable access in case of any user mapping failure. + - It can be mentioned during modification of the NAS server. + type: bool + default_windows_user: + description: + - Default windows user name used for granting access in the case of Unix + to Windows user mapping failure. + - It can be mentioned during modification of the NAS server. + type: str + default_unix_user: + description: + - Default Unix user name used for granting access in the case of Windows + to Unix user mapping failure. + - It can be mentioned during modification of the NAS server. + type: str + enable_windows_to_unix_username_mapping: + description: + - This parameter indicates whether a Unix to/from Windows user name + mapping is enabled. + - It can be mentioned during modification of the NAS server. + type: bool + is_packet_reflect_enabled: + description: + - If the packet has to be reflected, then this parameter + has to be set to C(true). + - It can be mentioned during modification of the NAS server. + type: bool + current_unix_directory_service: + description: + - This is the directory service used for querying identity information + for UNIX (such as UIDs, GIDs, net groups). + - It can be mentioned during modification of the NAS server. + type: str + choices: ["NONE", "NIS", "LOCAL", "LDAP", "LOCAL_THEN_NIS", "LOCAL_THEN_LDAP"] + replication_params: + description: + - Settings required for enabling replication. + type: dict + suboptions: + destination_nas_server_name: + description: + - Name of the destination nas server. + - Default value will be source nas server name prefixed by 'DR_'. + type: str + replication_mode: + description: + - The replication mode. + - This is mandatory to enable replication. + type: str + choices: ['asynchronous', 'manual'] + rpo: + description: + - Maximum time to wait before the system syncs the source and destination LUNs. + - The I(rpo) option should be specified if the I(replication_mode) is C(asynchronous). + - The value should be in range of C(5) to C(1440). + type: int + replication_type: + description: + - Type of replication. + choices: ['local', 'remote'] + type: str + remote_system: + description: + - Details of remote system to which the replication is being configured. + - The I(remote_system) option should be specified if the + I(replication_type) is C(remote). + type: dict + suboptions: + remote_system_host: + required: true + description: + - IP or FQDN for remote Unity unisphere Host. + type: str + remote_system_username: + type: str + required: true + description: + - User name of remote Unity unisphere Host. + remote_system_password: + type: str + required: true + description: + - Password of remote Unity unisphere Host. + remote_system_verifycert: + type: bool + default: true + description: + - Boolean variable to specify whether or not to validate SSL + certificate of remote Unity unisphere Host. + - C(true) - Indicates that the SSL certificate should be verified. + - C(false) - Indicates that the SSL certificate should not be + verified. + remote_system_port: + description: + - Port at which remote Unity unisphere is hosted. + type: int + default: 443 + destination_pool_name: + description: + - Name of pool to allocate destination Luns. + - Mutually exclusive with I(destination_pool_id). + type: str + destination_pool_id: + description: + - Id of pool to allocate destination Luns. + - Mutually exclusive with I(destination_pool_name). + type: str + destination_sp: + description: + - Storage process of destination nas server + choices: ['SPA', 'SPB'] + type: str + is_backup: + description: + - Indicates if the destination nas server is backup. + type: bool + replication_name: + description: + - User defined name for replication session. + type: str + new_replication_name: + description: + - Replication name to rename the session to. + type: str + replication_state: + description: + - State of the replication. + choices: ['enable', 'disable'] + type: str + replication_reuse_resource: + description: + - This parameter indicates if existing NAS Server is to be used for replication. + type: bool + state: + description: + - Define the state of NAS server on the array. + - The value present indicates that NAS server should exist on the system after + the task is executed. + - In this release deletion of NAS server is not supported. Hence, if state is + set to C(absent) for any existing NAS server then error will be thrown. + - For any non-existing NAS server, if state is set to C(absent) then it will return None. + type: str + required: true + choices: ['present', 'absent'] + +notes: +- The I(check_mode) is not supported. +''' + +EXAMPLES = r''' + + - name: Get Details of NAS Server + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "{{nas_server_name}}" + state: "present" + + - name: Modify Details of NAS Server + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "{{nas_server_name}}" + nas_server_new_name: "updated_sample_nas_server" + is_replication_destination: False + is_backup_only: False + is_multiprotocol_enabled: True + allow_unmapped_user: True + default_unix_user: "default_unix_sample_user" + default_windows_user: "default_windows_sample_user" + enable_windows_to_unix_username_mapping: True + current_unix_directory_service: "LDAP" + is_packet_reflect_enabled: True + state: "present" + + - name: Enable replication for NAS Server on Local System + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_id: "nas_10" + replication_reuse_resource: False + replication_params: + replication_name: "test_replication" + destination_nas_server_name: "destination_nas" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "local" + destination_pool_name: "Pool_Ansible_Neo_DND" + destination_sp: "SPA" + is_backup: True + replication_state: "enable" + state: "present" + + - name: Enable replication for NAS Server on Remote System + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_reuse_resource: False + replication_params: + replication_name: "test_replication" + destination_nas_server_name: "destination_nas" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "remote" + remote_system: + remote_system_host: '10.10.10.10' + remote_system_verifycert: False + remote_system_username: 'test1' + remote_system_password: 'test1!' + destination_pool_name: "fastVP_pool" + destination_sp: "SPA" + is_backup: True + replication_state: "enable" + state: "present" + + - name: Enable replication for NAS Server on Remote System in existing NAS Server + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_reuse_resource: True + replication_params: + destination_nas_server_name: "destination_nas" + replication_mode: "asynchronous" + rpo: 60 + replication_type: "remote" + replication_name: "test_replication" + remote_system: + remote_system_host: '10.10.10.10' + remote_system_verifycert: False + remote_system_username: 'test1' + remote_system_password: 'test1!' + destination_pool_name: "fastVP_pool" + replication_state: "enable" + state: "present" + + - name: Modify replication on the nasserver + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_params: + replication_name: "test_repl" + new_replication_name: "test_repl_updated" + replication_mode: "asynchronous" + rpo: 50 + replication_state: "enable" + state: "present" + + - name: Disable replication on the nasserver + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_state: "disable" + state: "present" + + - name: Disable replication by specifying replication_name on the nasserver + dellemc.unity.nasserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + replication_params: + replication_name: "test_replication" + replication_state: "disable" + state: "present" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: True +nas_server_details: + description: The NAS server details. + type: dict + returned: When NAS server exists. + contains: + name: + description: Name of the NAS server. + type: str + id: + description: ID of the NAS server. + type: str + allow_unmapped_user: + description: Enable/disable access status in case of any user + mapping failure. + type: bool + current_unix_directory_service: + description: Directory service used for querying identity + information for UNIX (such as UIDs, GIDs, net groups). + type: str + default_unix_user: + description: Default Unix user name used for granting access + in the case of Windows to Unix user mapping failure. + type: str + default_windows_user: + description: Default windows user name used for granting + access in the case of Unix to Windows user mapping + failure. + type: str + is_backup_only: + description: Whether the NAS server is used as backup only. + type: bool + is_multi_protocol_enabled: + description: Indicates whether multiprotocol sharing mode is + enabled. + type: bool + is_packet_reflect_enabled: + description: If the packet reflect has to be enabled. + type: bool + is_replication_destination: + description: If the NAS server is a replication destination + then True. + type: bool + is_windows_to_unix_username_mapping_enabled: + description: Indicates whether a Unix to/from Windows user name + mapping is enabled. + type: bool + sample: { + "allow_unmapped_user": null, + "cifs_server": { + "UnityCifsServerList": [ + { + "UnityCifsServer": { + "hash": 8761756885270, + "id": "cifs_34" + } + } + ] + }, + "current_sp": { + "UnityStorageProcessor": { + "hash": 8761756885273, + "id": "spb" + } + }, + "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NIS", + "default_unix_user": null, + "default_windows_user": null, + "existed": true, + "file_dns_server": { + "UnityFileDnsServer": { + "hash": 8761756885441, + "id": "dns_12" + } + }, + "file_interface": { + "UnityFileInterfaceList": [ + { + "UnityFileInterface": { + "hash": 8761756889908, + "id": "if_37" + } + } + ] + }, + "filesystems": null, + "hash": 8761757005084, + "health": { + "UnityHealth": { + "hash": 8761756867588 + } + }, + "home_sp": { + "UnityStorageProcessor": { + "hash": 8761756867618, + "id": "spb" + } + }, + "id": "nas_10", + "is_backup_only": false, + "is_multi_protocol_enabled": false, + "is_packet_reflect_enabled": false, + "is_replication_destination": false, + "is_replication_enabled": true, + "is_windows_to_unix_username_mapping_enabled": null, + "name": "dummy_nas", + "pool": { + "UnityPool": { + "hash": 8761756885360, + "id": "pool_7" + } + }, + "preferred_interface_settings": { + "UnityPreferredInterfaceSettings": { + "hash": 8761756885438, + "id": "preferred_if_10" + } + }, + "replication_type": "ReplicationTypeEnum.REMOTE", + "size_allocated": 3489660928, + "tenant": null, + "virus_checker": { + "UnityVirusChecker": { + "hash": 8761756885426, + "id": "cava_10" + } + } + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils +LOG = utils.get_logger('nasserver') + +application_type = "Ansible/1.5.0" + + +class NASServer(object): + """Class with NAS Server operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_nasserver_parameters()) + + # initialize the ansible module + mut_ex_args = [['nas_server_name', 'nas_server_id']] + required_one_of = [['nas_server_name', 'nas_server_id']] + + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of + ) + utils.ensure_required_libs(self.module) + + # result is a dictionary that contains changed status and + # nas server details + self.result = {"changed": False, + 'nas_server_details': {}} + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + self.nas_server_conn_obj = utils.nas_server.UnityNasServer( + self.unity_conn) + LOG.info('Connection established with the Unity Array') + + def get_current_uds_enum(self, current_uds): + """ + Get the enum of the Offline Availability parameter. + :param current_uds: Current Unix Directory Service string + :return: current_uds enum + """ + if current_uds in \ + utils.NasServerUnixDirectoryServiceEnum.__members__: + return utils.NasServerUnixDirectoryServiceEnum[current_uds] + else: + error_msg = "Invalid value {0} for Current Unix Directory" \ + " Service provided".format(current_uds) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_nas_server(self, nas_server_name, nas_server_id): + """ + Get the NAS Server Object using NAME/ID of the NAS Server. + :param nas_server_name: Name of the NAS Server + :param nas_server_id: ID of the NAS Server + :return: NAS Server object. + """ + nas_server = nas_server_name if nas_server_name else nas_server_id + try: + obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id, + name=nas_server_name) + if nas_server_id and obj_nas and not obj_nas.existed: + # if obj_nas is not None and existed is observed as False, + # then None will be returned. + LOG.error("NAS Server object does not exist" + " with ID: %s ", nas_server_id) + return None + return obj_nas + except utils.HttpError as e: + if e.http_status == 401: + cred_err = "Incorrect username or password , {0}".format( + e.message) + self.module.fail_json(msg=cred_err) + else: + err_msg = "Failed to get details of NAS Server" \ + " {0} with error {1}".format(nas_server, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + except utils.UnityResourceNotFoundError as e: + err_msg = "Failed to get details of NAS Server" \ + " {0} with error {1}".format(nas_server, str(e)) + LOG.error(err_msg) + return None + + except Exception as e: + nas_server = nas_server_name if nas_server_name \ + else nas_server_id + err_msg = "Failed to get nas server details {0} with" \ + " error {1}".format(nas_server, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def to_update(self, nas_server_obj, current_uds): + LOG.info("Checking Whether the parameters are modified or not.") + + # Checking all parameters individually because the nas obj return + # names are different compared to ansible parameter names. + + # Current Unix Directory Service + if current_uds is not None and \ + current_uds != nas_server_obj.current_unix_directory_service: + return True + + # Rename NAS Server + if self.module.params['nas_server_new_name'] is not None and \ + self.module.params['nas_server_new_name'] != \ + nas_server_obj.name: + return True + + # Is Replication Destination + if self.module.params["is_replication_destination"] is not None: + if nas_server_obj.is_replication_destination is None: + return True + elif self.module.params["is_replication_destination"] != \ + nas_server_obj.is_replication_destination: + return True + + # Is Multiprotocol Enabled + if self.module.params["is_multiprotocol_enabled"] is not None: + if nas_server_obj.is_multi_protocol_enabled is None: + return True + elif self.module.params["is_multiprotocol_enabled"] != \ + nas_server_obj.is_multi_protocol_enabled: + return True + + # Is Back Up Enabled + if self.module.params["is_backup_only"] is not None: + if nas_server_obj.is_backup_only is None: + return True + elif self.module.params["is_backup_only"] != \ + nas_server_obj.is_backup_only: + return True + + # Is Packet Reflect Enabled + if self.module.params["is_packet_reflect_enabled"] is not None: + if nas_server_obj.is_packet_reflect_enabled is None: + return True + elif self.module.params["is_packet_reflect_enabled"] != \ + nas_server_obj.is_packet_reflect_enabled: + return True + + # Allow Unmapped User + if self.module.params["allow_unmapped_user"] is not None: + if nas_server_obj.allow_unmapped_user is None: + return True + elif self.module.params["allow_unmapped_user"] != \ + nas_server_obj.allow_unmapped_user: + return True + + # Enable Windows To Unix User Mapping Flag + nas_win_flag = \ + nas_server_obj.is_windows_to_unix_username_mapping_enabled + input_win_flag = \ + self.module.params["enable_windows_to_unix_username_mapping"] + if input_win_flag is not None: + if nas_win_flag is None: + return True + elif nas_win_flag != input_win_flag: + return True + + # Default Windows User + if self.module.params["default_windows_user"] is not None: + if nas_server_obj.default_windows_user is None: + return True + elif self.module.params["default_windows_user"] != \ + nas_server_obj.default_windows_user: + return True + + # Default Unix User + if self.module.params["default_unix_user"] is not None: + if nas_server_obj.default_unix_user is None: + return True + elif self.module.params["default_unix_user"] != \ + nas_server_obj.default_unix_user: + return True + + return False + + def update_nas_server(self, nas_server_obj, new_name=None, + default_unix_user=None, default_windows_user=None, + is_rep_dest=None, is_multiprotocol_enabled=None, + allow_unmapped_user=None, is_backup_only=None, + is_packet_reflect_enabled=None, current_uds=None, + enable_win_to_unix_name_map=None): + """ + The Details of the NAS Server will be updated in the function. + """ + try: + nas_server_obj.modify( + name=new_name, + is_replication_destination=is_rep_dest, + is_backup_only=is_backup_only, + is_multi_protocol_enabled=is_multiprotocol_enabled, + default_unix_user=default_unix_user, + default_windows_user=default_windows_user, + allow_unmapped_user=allow_unmapped_user, + is_packet_reflect_enabled=is_packet_reflect_enabled, + enable_windows_to_unix_username=enable_win_to_unix_name_map, + current_unix_directory_service=current_uds) + + except Exception as e: + error_msg = "Failed to Update parameters of NAS Server" \ + " %s with error %s" % (nas_server_obj.name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def modify_replication_session(self, nas_server_obj, repl_session, replication_params): + """ Modify the replication session + :param: nas_server_obj: NAS server object + :param: repl_session: Replication session to be modified + :param: replication_params: Module input params + :return: True if modification is successful + """ + try: + LOG.info("Modifying replication session of nas server %s", nas_server_obj.name) + modify_payload = {} + if replication_params['replication_mode'] and \ + replication_params['replication_mode'] == 'manual': + rpo = -1 + elif replication_params['rpo']: + rpo = replication_params['rpo'] + name = repl_session.name + if replication_params['new_replication_name'] and \ + name != replication_params['new_replication_name']: + name = replication_params['new_replication_name'] + + if repl_session.name != name: + modify_payload['name'] = name + if ((replication_params['replication_mode'] or replication_params['rpo']) and + repl_session.max_time_out_of_sync != rpo): + modify_payload['max_time_out_of_sync'] = rpo + + if modify_payload: + repl_session.modify(**modify_payload) + return True + + return False + except Exception as e: + errormsg = "Modifying replication session failed with error %s", e + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def enable_replication(self, nas_server_obj, replication, replication_reuse_resource): + """ Enable replication on NAS Server + :param: nas_server_obj: NAS Server object. + :param: replication: Dict which has all the replication parameter values. + :return: True if replication is enabled else False. + """ + try: + # Validate replication params + self.validate_nas_server_replication_params(replication) + self.update_replication_params(replication, replication_reuse_resource) + + repl_session = \ + self.get_replication_session_on_filter(nas_server_obj, replication, "modify") + if repl_session: + return self.modify_replication_session(nas_server_obj, repl_session, replication) + + self.validate_create_replication_params(replication) + replication_args_list = get_replication_args_list(replication) + + # Get remote system + if 'replication_type' in replication and replication['replication_type'] == 'remote': + self.get_remote_system(replication, replication_args_list) + + # Form parameters when replication_reuse_resource is False + if not replication_reuse_resource: + update_replication_arg_list(replication, replication_args_list, nas_server_obj) + nas_server_obj.replicate_with_dst_resource_provisioning(**replication_args_list) + else: + replication_args_list['dst_nas_server_id'] = replication['destination_nas_server_id'] + nas_server_obj.replicate(**replication_args_list) + return True + + if 'replication_type' in replication and replication['replication_type'] == 'local': + update_replication_arg_list(replication, replication_args_list, nas_server_obj) + nas_server_obj.replicate_with_dst_resource_provisioning(**replication_args_list) + return True + + except Exception as e: + errormsg = "Enabling replication to the nas server %s failed " \ + "with error %s" % (nas_server_obj.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def disable_replication(self, obj_nas, replication_params): + """ Remove replication from the nas server + :param: replication_params: Module input params + :param: obj_nas: NAS Server object + :return: True if disabling replication is successful + """ + try: + LOG.info(("Disabling replication on the nas server %s", obj_nas.name)) + if replication_params: + self.update_replication_params(replication_params, False) + repl_session = \ + self.get_replication_session_on_filter(obj_nas, replication_params, "delete") + if repl_session: + repl_session.delete() + return True + return False + except Exception as e: + errormsg = "Disabling replication on the nas server %s failed " \ + "with error %s" % (obj_nas.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_replication_session_on_filter(self, obj_nas, replication_params, action): + """ Retrieves replication session on nas server + :param: obj_nas: NAS server object + :param: replication_params: Module input params + :param: action: Specifies action as modify or delete + :return: Replication session based on filter + """ + if replication_params and replication_params['remote_system']: + repl_session = \ + self.get_replication_session(obj_nas, filter_key="remote_system_name", + replication_params=replication_params) + elif replication_params and replication_params['replication_name']: + repl_session = \ + self.get_replication_session(obj_nas, filter_key="name", + name=replication_params['replication_name']) + else: + repl_session = self.get_replication_session(obj_nas, action=action) + if repl_session and action and replication_params and \ + replication_params['replication_type'] == 'local' and \ + repl_session.remote_system.name != self.unity_conn.name: + return None + return repl_session + + def get_replication_session(self, obj_nas, filter_key=None, replication_params=None, name=None, action=None): + """ Retrieves the replication sessions configured for the nas server + :param: obj_nas: NAS server object + :param: filter_key: Key to filter replication sessions + :param: replication_params: Module input params + :param: name: Replication session name + :param: action: Specifies modify or delete action on replication session + :return: Replication session details + """ + try: + repl_session = self.unity_conn.get_replication_session(src_resource_id=obj_nas.id) + if not filter_key and repl_session: + if len(repl_session) > 1: + if action: + error_msg = 'There are multiple replication sessions for the nas server.'\ + ' Please specify replication_name in replication_params to %s.' % action + self.module.fail_json(msg=error_msg) + return repl_session + return repl_session[0] + for session in repl_session: + if filter_key == 'remote_system_name' and \ + session.remote_system.name == replication_params['remote_system_name']: + return session + if filter_key == 'name' and session.name == name: + return session + return None + except Exception as e: + errormsg = "Retrieving replication session on the nas server failed " \ + "with error %s", str(e) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_remote_system(self, replication, replication_args_list): + remote_system_name = replication['remote_system_name'] + remote_system_list = self.unity_conn.get_remote_system() + for remote_system in remote_system_list: + if remote_system.name == remote_system_name: + replication_args_list['remote_system'] = remote_system + break + if 'remote_system' not in replication_args_list.keys(): + errormsg = "Remote system %s is not found" % (remote_system_name) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def update_replication_params(self, replication, replication_reuse_resource): + """ Update replication dict with remote system information + :param: replication: Dict which has all the replication parameter values + :return: Updated replication Dict + """ + try: + if 'replication_type' in replication and replication['replication_type'] == 'remote': + connection_params = { + 'unispherehost': replication['remote_system']['remote_system_host'], + 'username': replication['remote_system']['remote_system_username'], + 'password': replication['remote_system']['remote_system_password'], + 'validate_certs': replication['remote_system']['remote_system_verifycert'], + 'port': replication['remote_system']['remote_system_port'] + } + remote_system_conn = utils.get_unity_unisphere_connection( + connection_params, application_type) + replication['remote_system_name'] = remote_system_conn.name + if replication['destination_pool_name'] is not None: + pool_object = remote_system_conn.get_pool(name=replication['destination_pool_name']) + replication['destination_pool_id'] = pool_object.id + if replication['destination_nas_server_name'] is not None and replication_reuse_resource: + nas_object = remote_system_conn.get_nas_server(name=replication['destination_nas_server_name']) + replication['destination_nas_server_id'] = nas_object.id + else: + replication['remote_system_name'] = self.unity_conn.name + if replication['destination_pool_name'] is not None: + pool_object = self.unity_conn.get_pool(name=replication['destination_pool_name']) + replication['destination_pool_id'] = pool_object.id + except Exception as e: + errormsg = "Updating replication params failed with error %s" % str(e) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_rpo(self, replication): + if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous' \ + and replication['rpo'] is None: + errormsg = "rpo is required together with 'asynchronous' replication_mode." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + if (replication['rpo'] and (replication['rpo'] < 5 or replication['rpo'] > 1440)) \ + and (replication['replication_mode'] and replication['replication_mode'] != 'manual' or + not replication['replication_mode'] and replication['rpo'] != -1): + errormsg = "rpo value should be in range of 5 to 1440" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_nas_server_replication_params(self, replication): + """ Validate NAS server replication params + :param: replication: Dict which has all the replication parameter values + """ + + # Valdiate replication + if replication is None: + errormsg = "Please specify replication_params to enable replication." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # validate destination pool info + if replication['destination_pool_id'] is not None and replication['destination_pool_name'] is not None: + errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # Validate replication mode + self.validate_rpo(replication) + # Validate replication type + if replication['replication_type'] == 'remote' and replication['remote_system'] is None: + errormsg = "Remote_system is required together with 'remote' replication_type" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + # Validate destination NAS server name + if 'destination_nas_name' in replication and replication['destination_nas_name'] is not None: + dst_nas_server_name_length = len(replication['destination_nas_name']) + if dst_nas_server_name_length == 0 or dst_nas_server_name_length > 95: + errormsg = "destination_nas_name value should be in range of 1 to 95" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_create_replication_params(self, replication): + ''' Validate replication params ''' + if replication['destination_pool_id'] is None and replication['destination_pool_name'] is None: + errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + keys = ['replication_mode', 'replication_type'] + for key in keys: + if replication[key] is None: + errormsg = "Please specify %s to enable replication." % key + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def perform_module_operation(self): + """ + Perform different actions on NAS Server based on user parameters + chosen in playbook + """ + state = self.module.params['state'] + nas_server_name = self.module.params['nas_server_name'] + nas_server_id = self.module.params['nas_server_id'] + nas_server_new_name = self.module.params['nas_server_new_name'] + default_unix_user = self.module.params['default_unix_user'] + default_windows_user = self.module.params['default_windows_user'] + + is_replication_destination = \ + self.module.params['is_replication_destination'] + is_multiprotocol_enabled = \ + self.module.params['is_multiprotocol_enabled'] + allow_unmapped_user = self.module.params['allow_unmapped_user'] + enable_windows_to_unix_username_mapping = \ + self.module.params['enable_windows_to_unix_username_mapping'] + + is_backup_only = self.module.params['is_backup_only'] + is_packet_reflect_enabled = \ + self.module.params['is_packet_reflect_enabled'] + + current_uds = self.module.params['current_unix_directory_service'] + replication = self.module.params['replication_params'] + replication_state = self.module.params['replication_state'] + replication_reuse_resource = self.module.params['replication_reuse_resource'] + # Get the enum for the corresponding offline_availability + if current_uds: + current_uds = \ + self.get_current_uds_enum(current_uds) + + changed = False + + if replication and replication_state is None: + self.module.fail_json(msg="Please specify replication_state along with replication_params") + + ''' + Get details of NAS Server. + ''' + nas_server_obj = None + if nas_server_name or nas_server_id: + nas_server_obj = self.get_nas_server(nas_server_name, + nas_server_id) + + # As creation is not supported and if NAS Server does not exist + # along with state as present, then error will be thrown. + if not nas_server_obj and state == "present": + msg = "NAS Server Resource not found. Please enter a valid " \ + "Name/ID to get or modify the parameters of nas server." + LOG.error(msg) + self.module.fail_json(msg=msg) + + ''' + Update the parameters of NAS Server + ''' + if nas_server_obj and state == "present": + update_flag = self.to_update(nas_server_obj, current_uds) + if update_flag: + self.update_nas_server( + nas_server_obj, nas_server_new_name, default_unix_user, + default_windows_user, is_replication_destination, + is_multiprotocol_enabled, allow_unmapped_user, + is_backup_only, is_packet_reflect_enabled, + current_uds, enable_windows_to_unix_username_mapping) + changed = True + + # As deletion is not supported and if NAS Server exists along with + # state as absent, then error will be thrown. + if nas_server_obj and state == 'absent': + self.module.fail_json(msg="Deletion of NAS Server is " + "currently not supported.") + + if state == 'present' and nas_server_obj and replication_state is not None: + if replication_state == 'enable': + changed = self.enable_replication(nas_server_obj, replication, replication_reuse_resource) + else: + changed = self.disable_replication(nas_server_obj, replication) + + ''' + Update the changed state and NAS Server details + ''' + nas_server_details = None + if nas_server_obj: + nas_server_details = self.get_nas_server( + None, nas_server_obj.id)._get_properties() + + self.result["changed"] = changed + self.result["nas_server_details"] = nas_server_details + self.module.exit_json(**self.result) + + +def get_nasserver_parameters(): + """ + This method provides parameters required for the ansible NAS Server + modules on Unity + """ + + return dict( + nas_server_name=dict(), nas_server_id=dict(), + nas_server_new_name=dict(), + default_unix_user=dict(), + default_windows_user=dict(), + current_unix_directory_service=dict( + choices=["NIS", "LDAP", "LOCAL_THEN_NIS", + "LOCAL_THEN_LDAP", "NONE", "LOCAL"]), + is_replication_destination=dict(type='bool'), + is_backup_only=dict(type='bool'), + is_multiprotocol_enabled=dict(type='bool'), + allow_unmapped_user=dict(type='bool'), + enable_windows_to_unix_username_mapping=dict(type='bool'), + is_packet_reflect_enabled=dict(type='bool'), + replication_params=dict(type='dict', options=dict( + destination_nas_server_name=dict(type='str'), + replication_mode=dict(type='str', choices=['asynchronous', 'manual']), + rpo=dict(type='int'), + replication_type=dict(type='str', choices=['local', 'remote']), + remote_system=dict(type='dict', + options=dict( + remote_system_host=dict(type='str', required=True, no_log=True), + remote_system_verifycert=dict(type='bool', required=False, + default=True), + remote_system_username=dict(type='str', required=True), + remote_system_password=dict(type='str', required=True, no_log=True), + remote_system_port=dict(type='int', required=False, default=443, no_log=True) + )), + destination_pool_name=dict(type='str'), + destination_pool_id=dict(type='str'), + destination_sp=dict(type='str', choices=['SPA', 'SPB']), + is_backup=dict(type='bool'), + replication_name=dict(type='str'), + new_replication_name=dict(type='str') + )), + replication_reuse_resource=dict(type='bool'), + replication_state=dict(type='str', choices=['enable', 'disable']), + state=dict(required=True, choices=['present', 'absent'], type='str') + ) + + +def get_sp_enum(destination_sp): + """Getting correct enum values for Storage Processor + :param: destination_sp: Storage Processor to be used in Destination NAS Server. + :return: enum value for Storage Processor. + """ + if utils.NodeEnum[destination_sp]: + destination_sp_enum = utils.NodeEnum[destination_sp] + return destination_sp_enum + + +def get_replication_args_list(replication_params): + """Returns the replication args for payload""" + replication_args_list = {} + + if replication_params['replication_name']: + replication_args_list['replication_name'] = replication_params['replication_name'] + if 'replication_mode' in replication_params and \ + replication_params['replication_mode'] == 'asynchronous': + replication_args_list['max_time_out_of_sync'] = replication_params['rpo'] + else: + replication_args_list['max_time_out_of_sync'] = -1 + + return replication_args_list + + +def update_replication_arg_list(replication, replication_args_list, nas_server_obj): + """ Update replication arg list + :param: replication: Dict which has all the replication parameter values + :param: replication_args_list: the existing list which should be updated + :param: nas_server_obj: NAS Server object on which replication is to be enabled + :return: Updated replication_args_list + """ + if 'destination_sp' in replication and replication['destination_sp']: + dst_sp_enum = get_sp_enum(replication['destination_sp']) + replication_args_list['dst_sp'] = dst_sp_enum + + replication_args_list['dst_pool_id'] = replication['destination_pool_id'] + + if 'is_backup' in replication and replication['is_backup']: + replication_args_list['is_backup_only'] = replication['is_backup'] + + if replication['replication_type'] == 'local': + replication_args_list['dst_nas_server_name'] = "DR_" + nas_server_obj.name + if 'destination_nas_server_name' in replication and replication['destination_nas_server_name'] is not None: + replication_args_list['dst_nas_server_name'] = replication['destination_nas_server_name'] + else: + if replication['destination_nas_server_name'] is None: + replication_args_list['dst_nas_server_name'] = nas_server_obj.name + + +def main(): + """ Create Unity NAS Server object and perform action on it + based on user input from playbook""" + obj = NASServer() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfs.py b/ansible_collections/dellemc/unity/plugins/modules/nfs.py new file mode 100644 index 00000000..8b8abe77 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/nfs.py @@ -0,0 +1,1667 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing nfs export on Unity""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: nfs +version_added: '1.1.0' +short_description: Manage NFS export on Unity storage system +description: +- Managing NFS export on Unity storage system includes- + Create new NFS export, + Modify NFS export attributes, + Display NFS export details, + Delete NFS export. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Vivek Soni (@v-soni11) + +options: + nfs_export_name: + description: + - Name of the nfs export. + - Mandatory for create operation. + - Specify either I(nfs_export_name) or I(nfs_export_id) (but not both) for any + operation. + type: str + nfs_export_id: + description: + - ID of the nfs export. + - This is a unique ID generated by Unity storage system. + type: str + filesystem_name: + description: + - Name of the filesystem for which NFS export will be created. + - Either filesystem or snapshot is required for creation of the NFS. + - If I(filesystem_name) is specified, then I(nas_server) is required to uniquely + identify the filesystem. + - If filesystem parameter is provided, then snapshot cannot be specified. + type: str + filesystem_id: + description: + - ID of the filesystem. + - This is a unique ID generated by Unity storage system. + type: str + snapshot_name: + description: + - Name of the snapshot for which NFS export will be created. + - Either filesystem or snapshot is required for creation of the NFS + export. + - If snapshot parameter is provided, then filesystem cannot be specified. + type: str + snapshot_id: + description: + - ID of the snapshot. + - This is a unique ID generated by Unity storage system. + type: str + nas_server_name: + description: + - Name of the NAS server on which filesystem will be hosted. + type: str + nas_server_id: + description: + - ID of the NAS server on which filesystem will be hosted. + type: str + path: + description: + - Local path to export relative to the NAS server root. + - With NFS, each export of a file_system or file_snap must have a unique + local path. + - Mandatory while creating NFS export. + type: str + description: + description: + - Description of the NFS export. + - Optional parameter when creating a NFS export. + - To modify description, pass the new value in I(description) field. + - To remove description, pass the empty value in I(description) field. + type: str + host_state: + description: + - Define whether the hosts can access the NFS export. + - Required when adding or removing access of hosts from the export. + type: str + choices: ['present-in-export', 'absent-in-export'] + anonymous_uid: + description: + - Specifies the user ID of the anonymous account. + - If not specified at the time of creation, it will be set to 4294967294. + type: int + anonymous_gid: + description: + - Specifies the group ID of the anonymous account. + - If not specified at the time of creation, it will be set to 4294967294. + type: int + state: + description: + - State variable to determine whether NFS export will exist or not. + required: true + type: str + choices: ['absent', 'present'] + default_access: + description: + - Default access level for all hosts that can access the NFS export. + - For hosts that need different access than the default, + they can be configured by adding to the list. + - If I(default_access) is not mentioned during creation, then NFS export will + be created with C(NO_ACCESS). + type: str + choices: ['NO_ACCESS', 'READ_ONLY', 'READ_WRITE', 'ROOT', + 'READ_ONLY_ROOT'] + min_security: + description: + - NFS enforced security type for users accessing a NFS export. + - If not specified at the time of creation, it will be set to C(SYS). + type: str + choices: ['SYS', 'KERBEROS', 'KERBEROS_WITH_INTEGRITY', + 'KERBEROS_WITH_ENCRYPTION'] + adv_host_mgmt_enabled: + description: + - If C(false), allows you to specify hosts without first having to register them. + - Mandatory while adding access hosts. + type: bool + no_access_hosts: + description: + - Hosts with no access to the NFS export. + - List of dictionaries. Each dictionary will have any of the keys from + I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address). + type: list + elements: dict + suboptions: + host_name: + description: + - Name of the host. + type: str + host_id: + description: + - ID of the host. + type: str + ip_address: + description: + - IP address of the host. + type: str + subnet: + description: + - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'. + type: str + netgroup: + description: + - Netgroup that is defined in NIS or the local netgroup file. + type: str + domain: + description: + - DNS domain, where all NFS clients in the domain are included in the host list. + type: str + read_only_hosts: + description: + - Hosts with read-only access to the NFS export. + - List of dictionaries. Each dictionary will have any of the keys from + I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address). + type: list + elements: dict + suboptions: + host_name: + description: + - Name of the host. + type: str + host_id: + description: + - ID of the host. + type: str + ip_address: + description: + - IP address of the host. + type: str + subnet: + description: + - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'. + type: str + netgroup: + description: + - Netgroup that is defined in NIS or the local netgroup file. + type: str + domain: + description: + - DNS domain, where all NFS clients in the domain are included in the host list. + type: str + read_only_root_hosts: + description: + - Hosts with read-only for root user access to the NFS export. + - List of dictionaries. Each dictionary will have any of the keys from + I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address). + type: list + elements: dict + suboptions: + host_name: + description: + - Name of the host. + type: str + host_id: + description: + - ID of the host. + type: str + ip_address: + description: + - IP address of the host. + type: str + subnet: + description: + - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'. + type: str + netgroup: + description: + - Netgroup that is defined in NIS or the local netgroup file. + type: str + domain: + description: + - DNS domain, where all NFS clients in the domain are included in the host list. + type: str + read_write_hosts: + description: + - Hosts with read and write access to the NFS export. + - List of dictionaries. Each dictionary will have any of the keys from + I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address). + type: list + elements: dict + suboptions: + host_name: + description: + - Name of the host. + type: str + host_id: + description: + - ID of the host. + type: str + ip_address: + description: + - IP address of the host. + type: str + subnet: + description: + - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'. + type: str + netgroup: + description: + - Netgroup that is defined in NIS or the local netgroup file. + type: str + domain: + description: + - DNS domain, where all NFS clients in the domain are included in the host list. + type: str + read_write_root_hosts: + description: + - Hosts with read and write for root user access to the NFS export. + - List of dictionaries. Each dictionary will have any of the keys from + I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address). + - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address). + type: list + elements: dict + suboptions: + host_name: + description: + - Name of the host. + type: str + host_id: + description: + - ID of the host. + type: str + ip_address: + description: + - IP address of the host. + type: str + subnet: + description: + - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'. + type: str + netgroup: + description: + - Netgroup that is defined in NIS or the local netgroup file. + type: str + domain: + description: + - DNS domain, where all NFS clients in the domain are included in the host list. + type: str +notes: +- The I(check_mode) is not supported. +""" + +EXAMPLES = r""" +- name: Create nfs export from filesystem + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + path: '/' + filesystem_id: "fs_377" + state: "present" + +- name: Create nfs export from snapshot + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_snap" + path: '/' + snapshot_name: "ansible_fs_snap" + state: "present" + +- name: Modify nfs export + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + nas_server_id: "nas_3" + description: "" + default_access: "READ_ONLY_ROOT" + anonymous_gid: 4294967290 + anonymous_uid: 4294967290 + state: "present" + +- name: Add host in nfs export with adv_host_mgmt_enabled as true + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: true + no_access_hosts: + - host_id: "Host_1" + read_only_hosts: + - host_id: "Host_2" + read_only_root_hosts: + - host_name: "host_name1" + read_write_hosts: + - host_name: "host_name2" + read_write_root_hosts: + - ip_address: "1.1.1.1" + host_state: "present-in-export" + state: "present" + +- name: Remove host in nfs export with adv_host_mgmt_enabled as true + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: true + no_access_hosts: + - host_id: "Host_1" + read_only_hosts: + - host_id: "Host_2" + read_only_root_hosts: + - host_name: "host_name1" + read_write_hosts: + - host_name: "host_name2" + read_write_root_hosts: + - ip_address: "1.1.1.1" + host_state: "absent-in-export" + state: "present" + +- name: Add host in nfs export with adv_host_mgmt_enabled as false + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: false + no_access_hosts: + - domain: "google.com" + read_only_hosts: + - netgroup: "netgroup_admin" + read_only_root_hosts: + - host_name: "host5" + read_write_hosts: + - subnet: "168.159.57.4/255.255.255.0" + read_write_root_hosts: + - ip_address: "10.255.2.4" + host_state: "present-in-export" + state: "present" + +- name: Remove host in nfs export with adv_host_mgmt_enabled as false + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_from_fs" + filesystem_id: "fs_377" + adv_host_mgmt_enabled: false + no_access_hosts: + - domain: "google.com" + read_only_hosts: + - netgroup: "netgroup_admin" + read_only_root_hosts: + - host_name: "host5" + read_write_hosts: + - subnet: "168.159.57.4/255.255.255.0" + read_write_root_hosts: + - ip_address: "10.255.2.4" + host_state: "absent-in-export" + state: "present" + +- name: Get nfs details + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_id: "NFSShare_291" + state: "present" + +- name: Delete nfs export by nfs name + dellemc.unity.nfs: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nfs_export_name: "ansible_nfs_name" + nas_server_name: "ansible_nas_name" + state: "absent" +""" + +RETURN = r""" +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: "false" + +nfs_share_details: + description: Details of the nfs export. + returned: When nfs export exists. + type: dict + contains: + anonymous_uid: + description: User ID of the anonymous account + type: int + anonymous_gid: + description: Group ID of the anonymous account + type: int + default_access: + description: Default access level for all hosts that can access export + type: str + description: + description: Description about the nfs export + type: str + id: + description: ID of the nfs export + type: str + min_security: + description: NFS enforced security type for users accessing an export + type: str + name: + description: Name of the nfs export + type: str + no_access_hosts_string: + description: Hosts with no access to the nfs export + type: str + read_only_hosts_string: + description: Hosts with read-only access to the nfs export + type: str + read_only_root_hosts_string: + description: Hosts with read-only for root user access to the nfs export + type: str + read_write_hosts_string: + description: Hosts with read and write access to the nfs export + type: str + read_write_root_hosts_string: + description: Hosts with read and write for root user access to export + type: str + type: + description: NFS export type. i.e. filesystem or snapshot + type: str + export_paths: + description: Export paths that can be used to mount and access export + type: list + filesystem: + description: Details of the filesystem on which nfs export is present + type: dict + contains: + UnityFileSystem: + description: filesystem details + type: dict + contains: + id: + description: ID of the filesystem + type: str + name: + description: Name of the filesystem + type: str + nas_server: + description: Details of the nas server + type: dict + contains: + UnityNasServer: + description: NAS server details + type: dict + contains: + id: + description: ID of the nas server + type: str + name: + description: Name of the nas server + type: str + sample: { + 'anonymous_gid': 4294967294, + 'anonymous_uid': 4294967294, + 'creation_time': '2022-03-09 15:05:34.720000+00:00', + 'default_access': 'NFSShareDefaultAccessEnum.NO_ACCESS', + 'description': '', + 'export_option': 1, + 'export_paths': [ + '**.***.**.**:/dummy-share-123' + ], + 'filesystem': { + 'UnityFileSystem': { + 'id': 'fs_id_1', + 'name': 'fs_name_1' + } + }, + 'host_accesses': None, + 'id': 'NFSShare_14393', + 'is_read_only': None, + 'min_security': 'NFSShareSecurityEnum.SYS', + 'modification_time': '2022-04-25 08:12:28.179000+00:00', + 'name': 'dummy-share-123', + 'nfs_owner_username': None, + 'no_access_hosts': None, + 'no_access_hosts_string': 'host1,**.***.*.*', + 'path': '/', + 'read_only_hosts': None, + 'read_only_hosts_string': '', + 'read_only_root_access_hosts': None, + 'read_only_root_hosts_string': '', + 'read_write_hosts': None, + 'read_write_hosts_string': '', + 'read_write_root_hosts_string': '', + 'role': 'NFSShareRoleEnum.PRODUCTION', + 'root_access_hosts': None, + 'snap': None, + 'type': 'NFSTypeEnum.NFS_SHARE', + 'existed': True, + 'nas_server': { + 'UnityNasServer': { + 'id': 'nas_id_1', + 'name': 'dummy_nas_server' + } + } + } +""" + +import re +import traceback + +try: + from ipaddress import ip_network, IPv4Network, IPv6Network + HAS_IPADDRESS, IP_ADDRESS_IMP_ERR = True, None +except ImportError: + HAS_IPADDRESS, IP_ADDRESS_IMP_ERR = False, traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('nfs') + +DEFAULT_ACCESS_LIST = ['NO_ACCESS', 'READ_ONLY', 'READ_WRITE', 'ROOT', + 'READ_ONLY_ROOT'] +MIN_SECURITY_LIST = ['SYS', 'KERBEROS', 'KERBEROS_WITH_INTEGRITY', + 'KERBEROS_WITH_ENCRYPTION'] +HOST_DICT = dict(type='list', required=False, elements='dict', + options=dict(host_name=dict(), + host_id=dict(), + ip_address=dict(), + subnet=dict(), + netgroup=dict(), + domain=dict())) +HOST_STATE_LIST = ['present-in-export', 'absent-in-export'] +STATE_LIST = ['present', 'absent'] + +application_type = "Ansible/1.5.0" + + +class NFS(object): + """Class with nfs export operations""" + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_nfs_parameters()) + + mutually_exclusive = [['nfs_export_id', 'nas_server_id'], + ['nfs_export_id', 'nas_server_name'], + ['filesystem_id', 'filesystem_name', + 'snapshot_id', 'snapshot_name'], + ['nas_server_id', 'nas_server_name']] + required_one_of = [['nfs_export_id', 'nfs_export_name']] + + """ initialize the ansible module """ + self.module = AnsibleModule( + argument_spec=self.module_params, supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + utils.ensure_required_libs(self.module) + + if not HAS_IPADDRESS: + self.module.fail_json(msg=missing_required_lib("ipaddress"), + exception=IP_ADDRESS_IMP_ERR) + + self.unity = utils.get_unity_unisphere_connection(self.module.params, + application_type) + self.cli = self.unity._cli + + self.is_given_nfs_for_fs = None + if self.module.params['filesystem_name'] or \ + self.module.params['filesystem_id']: + self.is_given_nfs_for_fs = True + elif self.module.params['snapshot_name'] or \ + self.module.params['snapshot_id']: + self.is_given_nfs_for_fs = False + + # Contain hosts input & output parameters + self.host_param_mapping = { + 'no_access_hosts': 'no_access_hosts_string', + 'read_only_hosts': 'read_only_hosts_string', + 'read_only_root_hosts': 'read_only_root_hosts_string', + 'read_write_hosts': 'read_write_hosts_string', + 'read_write_root_hosts': 'read_write_root_hosts_string' + } + + # Default_access mapping. keys are giving by user & values are + # accepted by SDK + self.default_access = {'READ_ONLY_ROOT': 'RO_ROOT'} + + LOG.info('Got the unity instance for provisioning on Unity') + + def validate_host_access_data(self, host_dict): + """ + Validate host access data + :param host_dict: Host access data + :return None + """ + fqdn_pat = re.compile(r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{0,62}' + r'[a-zA-Z0-9]\.)+[a-zA-Z]{2,63}$)') + + if host_dict.get('host_name'): + version = get_ip_version(host_dict.get('host_name')) + if version in (4, 6): + msg = "IP4/IP6: %s given in host_name instead " \ + "of name" % host_dict.get('host_name') + LOG.error(msg) + self.module.fail_json(msg=msg) + if host_dict.get('ip_address'): + ip_or_fqdn = host_dict.get('ip_address') + version = get_ip_version(ip_or_fqdn) + if version == 0: + # validate its FQDN or not + if not fqdn_pat.match(ip_or_fqdn): + msg = "%s is not a valid FQDN" % ip_or_fqdn + LOG.error(msg) + self.module.fail_json(msg=msg) + if host_dict.get('subnet'): + subnet = host_dict.get('subnet') + subnet_info = subnet.split("/") + if len(subnet_info) != 2: + msg = "Subnet should be in format 'IP address/netmask' or 'IP address/prefix length'" + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_adv_host_mgmt_enabled_check(self, host_dict): + """ + Validate adv_host_mgmt_enabled check + :param host_dict: Host access data + :return None + """ + host_dict_keys_set = set(host_dict.keys()) + adv_host_mgmt_enabled_true_set = {'host_name', 'host_id', 'ip_address'} + adv_host_mgmt_enabled_false_set = {'host_name', 'subnet', 'domain', 'netgroup', 'ip_address'} + adv_host_mgmt_enabled_true_diff = host_dict_keys_set - adv_host_mgmt_enabled_true_set + adv_host_mgmt_enabled_false_diff = host_dict_keys_set - adv_host_mgmt_enabled_false_set + if self.module.params['adv_host_mgmt_enabled'] and adv_host_mgmt_enabled_true_diff != set(): + msg = "If 'adv_host_mgmt_enabled' is true then host access should only have %s" % adv_host_mgmt_enabled_true_set + LOG.error(msg) + self.module.fail_json(msg=msg) + elif not self.module.params['adv_host_mgmt_enabled'] and adv_host_mgmt_enabled_false_diff != set(): + msg = "If 'adv_host_mgmt_enabled' is false then host access should only have %s" % adv_host_mgmt_enabled_false_set + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_host_access_input_params(self): + """ + Validate host access params + :return None + """ + for param in list(self.host_param_mapping.keys()): + if self.module.params[param] and (not self.module.params[ + 'host_state'] or self.module.params[ + 'adv_host_mgmt_enabled'] is None): + msg = "'host_state' and 'adv_host_mgmt_enabled' is required along with: %s" % param + LOG.error(msg) + self.module.fail_json(msg=msg) + elif self.module.params[param]: + for host_dict in self.module.params[param]: + host_dict = {k: v for k, v in host_dict.items() if v} + self.validate_adv_host_mgmt_enabled_check(host_dict) + self.validate_host_access_data(host_dict) + + def validate_module_attributes(self): + """ + Validate module attributes + :return None + """ + param_list = ['nfs_export_name', 'nfs_export_id', 'filesystem_name', + 'filesystem_id', 'nas_server_id', + 'snapshot_name', 'snapshot_id', 'path'] + + for param in param_list: + if self.module.params[param] and \ + len(self.module.params[param].strip()) == 0: + msg = "Please provide valid value for: %s" % param + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_input(self): + """ Validate input parameters """ + + if self.module.params['nfs_export_name']: + if not self.module.params['snapshot_name'] and not \ + self.module.params['snapshot_id']: + if ((self.module.params['filesystem_name']) and + (not self.module.params['nas_server_id'] and + not self.module.params['nas_server_name'])): + msg = "Please provide nas server id or name along with " \ + "filesystem name and nfs name" + LOG.error(msg) + self.module.fail_json(msg=msg) + + if ((not self.module.params['nas_server_id']) and + (not self.module.params['nas_server_name']) and + (not self.module.params['filesystem_id'])): + msg = "Please provide either nas server id/name or " \ + "filesystem id" + LOG.error(msg) + self.module.fail_json(msg=msg) + self.validate_module_attributes() + self.validate_host_access_input_params() + + def get_nfs_id_or_name(self): + """ Provide nfs_export_id or nfs_export_name user given value + + :return: value provided by user in nfs_export_id/nfs_export_name + :rtype: str + """ + if self.module.params['nfs_export_id']: + return self.module.params['nfs_export_id'] + return self.module.params['nfs_export_name'] + + def get_nas_from_given_input(self): + """ Get nas server object + + :return: nas server object + :rtype: UnityNasServer + """ + LOG.info("Getting nas server details") + if not self.module.params['nas_server_id'] and not \ + self.module.params['nas_server_name']: + return None + id_or_name = self.module.params['nas_server_id'] if \ + self.module.params['nas_server_id'] else self.module.params[ + 'nas_server_name'] + try: + nas = self.unity.get_nas_server( + _id=self.module.params['nas_server_id'], + name=self.module.params['nas_server_name']) + except utils.UnityResourceNotFoundError as e: + # In case of incorrect name + msg = "Given nas server not found error: %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + except utils.HTTPClientError as e: + if e.http_status == 401: + msg = "Failed to get nas server: %s due to incorrect " \ + "username/password error: %s" % (id_or_name, str(e)) + else: + msg = "Failed to get nas server: %s error: %s" % ( + id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + except Exception as e: + msg = "Failed to get nas server: %s error: %s" % ( + id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + if nas and not nas.existed: + # In case of incorrect id, sdk return nas object whose attribute + # existed=false, instead of raising UnityResourceNotFoundError + msg = "Please check nas details it does not exists" + LOG.error(msg) + self.module.fail_json(msg=msg) + + LOG.info("Got nas server details") + return nas + + def get_nfs_share(self, id=None, name=None): + """ Get the nfs export + + :return: nfs_export object if nfs exists else None + :rtype: UnityNfsShare or None + """ + try: + if not id and not name: + msg = "Please give nfs id/name" + LOG.error(msg) + self.module.fail_json(msg=msg) + + id_or_name = id if id else name + LOG.info("Getting nfs export: %s", id_or_name) + if id: + # Get nfs details from nfs ID + if self.is_given_nfs_for_fs: + nfs = self.unity.get_nfs_share( + _id=id, filesystem=self.fs_obj) + elif self.is_given_nfs_for_fs is False: + # nfs from snap + nfs = self.unity.get_nfs_share(_id=id, snap=self.snap_obj) + else: + nfs = self.unity.get_nfs_share(_id=id) + else: + # Get nfs details from nfs name + if self.is_given_nfs_for_fs: + nfs = self.unity.get_nfs_share( + name=name, filesystem=self.fs_obj) + elif self.is_given_nfs_for_fs is False: + # nfs from snap + nfs = self.unity.get_nfs_share( + name=name, snap=self.snap_obj) + else: + nfs = self.unity.get_nfs_share(name=name) + + if isinstance(nfs, utils.UnityNfsShareList): + # This block will be executed, when we are trying to get nfs + # details using nfs name & nas server. + nfs_list = nfs + LOG.info("Multiple nfs export with same name: %s " + "found", id_or_name) + if self.nas_obj: + for n in nfs_list: + if n.filesystem.nas_server == self.nas_obj: + return n + msg = "Multiple nfs share with same name: %s found. " \ + "Given nas server is not correct. Please check" + else: + msg = "Multiple nfs share with same name: %s found. " \ + "Please give nas server" + else: + # nfs is instance of UnityNfsShare class + if nfs and nfs.existed: + if self.nas_obj and nfs.filesystem.nas_server != \ + self.nas_obj: + msg = "nfs found but nas details given is incorrect" + LOG.error(msg) + self.module.fail_json(msg=msg) + LOG.info("Successfully got nfs share for: %s", id_or_name) + return nfs + elif nfs and not nfs.existed: + # in case of incorrect id, sdk returns nfs object whose + # attribute existed=False + msg = "Please check incorrect nfs id is given" + else: + msg = "Failed to get nfs share: %s" % id_or_name + except utils.UnityResourceNotFoundError as e: + msg = "NFS share: %(id_or_name)s not found " \ + "error: %(err)s" % {'id_or_name': id_or_name, 'err': str(e)} + LOG.info(str(msg)) + return None + except utils.HTTPClientError as e: + if e.http_status == 401: + msg = "Failed to get nfs share: %s due to incorrect " \ + "username/password error: %s" % (id_or_name, str(e)) + else: + msg = "Failed to get nfs share: %s error: %s" % (id_or_name, + str(e)) + except utils.StoropsConnectTimeoutError as e: + msg = "Failed to get nfs share: %s check unispherehost IP: %s " \ + "error: %s" % (id_or_name, + self.module.params['nfs_export_id'], str(e)) + except Exception as e: + msg = "Failed to get nfs share: %s error: %s" % (id_or_name, + str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def delete_nfs_share(self, nfs_obj): + """ Delete nfs share + + :param nfs: NFS share obj + :type nfs: UnityNfsShare + :return: None + """ + try: + LOG.info("Deleting nfs share: %s", self.get_nfs_id_or_name()) + nfs_obj.delete() + LOG.info("Deleted nfs share") + except Exception as e: + msg = "Failed to delete nfs share, error: %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_filesystem(self): + """ Get filesystem obj + + :return: filesystem obj + :rtype: UnityFileSystem + """ + if self.module.params['filesystem_id']: + id_or_name = self.module.params['filesystem_id'] + elif self.module.params['filesystem_name']: + id_or_name = self.module.params['filesystem_name'] + else: + msg = "Please provide filesystem ID/name, to get filesystem" + LOG.error(msg) + self.module.fail_json(msg=msg) + + try: + if self.module.params['filesystem_name']: + if not self.nas_obj: + err_msg = "NAS Server is required to get the filesystem" + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + LOG.info("Getting filesystem by name: %s", id_or_name) + fs_obj = self.unity.get_filesystem( + name=self.module.params['filesystem_name'], + nas_server=self.nas_obj) + elif self.module.params['filesystem_id']: + LOG.info("Getting filesystem by ID: %s", id_or_name) + fs_obj = self.unity.get_filesystem( + _id=self.module.params['filesystem_id']) + except utils.UnityResourceNotFoundError as e: + msg = "Filesystem: %s not found error: %s" % ( + id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + except utils.HTTPClientError as e: + if e.http_status == 401: + msg = "Failed to get filesystem due to incorrect " \ + "username/password error: %s" % str(e) + else: + msg = "Failed to get filesystem error: %s" % str(e) + LOG.error(msg) + except Exception as e: + msg = "Failed to get filesystem: %s error: %s" % ( + id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + if fs_obj and fs_obj.existed: + LOG.info("Got the filesystem: %s", id_or_name) + return fs_obj + else: + msg = "Filesystem: %s does not exists" % id_or_name + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_snapshot(self): + """ Get snapshot obj + + :return: Snapshot obj + :rtype: UnitySnap + """ + if self.module.params['snapshot_id']: + id_or_name = self.module.params['snapshot_id'] + elif self.module.params['snapshot_name']: + id_or_name = self.module.params['snapshot_name'] + else: + msg = "Please provide snapshot ID/name, to get snapshot" + LOG.error(msg) + self.module.fail_json(msg=msg) + + LOG.info("Getting snapshot: %s", id_or_name) + try: + if id_or_name: + snap_obj = self.unity.get_snap( + _id=self.module.params['snapshot_id'], + name=self.module.params['snapshot_name']) + else: + msg = "Failed to get the snapshot. Please provide snapshot " \ + "details" + LOG.error(msg) + self.module.fail_json(msg=msg) + except utils.UnityResourceNotFoundError as e: + msg = "Failed to get snapshot: %s error: %s" % (id_or_name, + str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + except utils.HTTPClientError as e: + if e.http_status == 401: + msg = "Failed to get snapshot due to incorrect " \ + "username/password error: %s" % str(e) + else: + msg = "Failed to get snapshot error: %s" % str(e) + LOG.error(msg) + except Exception as e: + msg = "Failed to get snapshot: %s error: %s" % (id_or_name, + str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + if snap_obj and snap_obj.existed: + LOG.info("Successfully got the snapshot: %s", id_or_name) + return snap_obj + else: + msg = "Snapshot: %s does not exists" % id_or_name + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_host_name_by_id(self, host_id): + """ Get host name by host ID + + :param host_id: str + :return: unity host name + :rtype: str + """ + LOG.info("Getting host name from ID: %s", host_id) + try: + host_obj = self.unity.get_host(_id=host_id) + if host_obj and host_obj.existed: + LOG.info("Successfully got host name: %s", host_obj.name) + return host_obj.name + else: + msg = "Host ID: %s does not exists" % host_id + LOG.error(msg) + self.module.fail_json(msg=msg) + except Exception as e: + msg = "Failed to get host name by ID: %s error: %s" % ( + host_id, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_host_access_string_value(self, host_dict): + """ + Form host access string + :host_dict Host access type info + :return Host access data in string + """ + if host_dict.get("host_id"): + return self.get_host_name_by_id( + host_dict.get("host_id")) + ',' + elif host_dict.get("host_name"): + return host_dict.get( + "host_name") + ',' + elif host_dict.get("ip_address"): + return host_dict.get( + "ip_address") + ',' + elif host_dict.get("subnet"): + return host_dict.get( + "subnet") + ',' + elif host_dict.get("domain"): + return "*." + host_dict.get( + "domain") + ',' + elif host_dict.get("netgroup"): + return "@" + host_dict.get( + "netgroup") + ',' + + def get_host_dict_from_pb(self): + """ Traverse all given hosts params and provides with host dict, + which has respective host str param name with its value + required by SDK + + :return: dict with key named as respective host str param name & value + required by SDK + :rtype: dict + """ + result_host = {} + LOG.info("Getting host parameters") + if self.module.params['host_state']: + for param in list(self.host_param_mapping.keys()): + if self.module.params[param]: + result_host[param] = '' + for host_dict in self.module.params[param]: + result_host[param] += self.get_host_access_string_value(host_dict) + + if result_host: + # Since we are supporting HOST STRING parameters instead of HOST + # parameters, so lets change given input HOST parameter name to + # HOST STRING parameter name and strip trailing ',' + result_host = {self.host_param_mapping[k]: v[:-1] + for k, v in result_host.items()} + return result_host + + def get_adv_param_from_pb(self): + """ Provide all the advance parameters named as required by SDK + + :return: all given advanced parameters + :rtype: dict + """ + param = {} + LOG.info("Getting all given advance parameter") + host_dict = self.get_host_dict_from_pb() + if host_dict: + param.update(host_dict) + + fields = ('description', 'anonymous_uid', 'anonymous_gid') + for field in fields: + if self.module.params[field] is not None: + param[field] = self.module.params[field] + + if self.module.params['min_security'] and self.module.params[ + 'min_security'] in utils.NFSShareSecurityEnum.__members__: + LOG.info("Getting min_security object from NFSShareSecurityEnum") + param['min_security'] = utils.NFSShareSecurityEnum[ + self.module.params['min_security']] + + if self.module.params['default_access']: + param['default_access'] = self.get_default_access() + + LOG.info("Successfully got advance parameter: %s", param) + return param + + def get_default_access(self): + LOG.info("Getting default_access object from " + "NFSShareDefaultAccessEnum") + default_access = self.default_access.get( + self.module.params['default_access'], + self.module.params['default_access']) + try: + return utils.NFSShareDefaultAccessEnum[default_access] + except KeyError as e: + msg = "default_access: %s not found error: %s" % ( + default_access, str(e)) + LOG.error(msg) + self.module.fail_json(msg) + + def correct_payload_as_per_sdk(self, payload, nfs_details=None): + """ Correct payload keys as required by SDK + + :param payload: Payload used for create/modify operation + :type payload: dict + :param nfs_details: NFS details + :type nfs_details: dict + :return: Payload required by SDK + :rtype: dict + """ + ouput_host_param = self.host_param_mapping.values() + if set(payload.keys()) & set(ouput_host_param): + if not nfs_details or (nfs_details and nfs_details['export_option'] != 1): + payload['export_option'] = 1 + if 'read_write_root_hosts_string' in payload: + # SDK have param named 'root_access_hosts_string' instead of + # 'read_write_root_hosts_string' + payload['root_access_hosts_string'] = payload.pop( + 'read_write_root_hosts_string') + + return payload + + def create_nfs_share_from_filesystem(self): + """ Create nfs share from given filesystem + + :return: nfs_share object + :rtype: UnityNfsShare + """ + + name = self.module.params['nfs_export_name'] + path = self.module.params['path'] + + if not name or not path: + msg = "Please provide name and path both for create" + LOG.error(msg) + self.module.fail_json(msg=msg) + + param = self.get_adv_param_from_pb() + if 'default_access' in param: + # create nfs from FILESYSTEM take 'share_access' as param in SDK + param['share_access'] = param.pop('default_access') + LOG.info("Param name: 'share_access' is used instead of " + "'default_access' in SDK so changed") + + param = self.correct_payload_as_per_sdk(param) + + LOG.info("Creating nfs share from filesystem with param: %s", param) + try: + nfs_obj = utils.UnityNfsShare.create( + cli=self.cli, name=name, fs=self.fs_obj, path=path, **param) + LOG.info("Successfully created nfs share: %s", nfs_obj) + return nfs_obj + except utils.UnityNfsShareNameExistedError as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + except Exception as e: + msg = "Failed to create nfs share: %s error: %s" % (name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def create_nfs_share_from_snapshot(self): + """ Create nfs share from given snapshot + + :return: nfs_share object + :rtype: UnityNfsShare + """ + + name = self.module.params['nfs_export_name'] + path = self.module.params['path'] + + if not name or not path: + msg = "Please provide name and path both for create" + LOG.error(msg) + self.module.fail_json(msg=msg) + + param = self.get_adv_param_from_pb() + + param = self.correct_payload_as_per_sdk(param) + + LOG.info("Creating nfs share from snap with param: %s", param) + try: + nfs_obj = utils.UnityNfsShare.create_from_snap( + cli=self.cli, name=name, snap=self.snap_obj, path=path, **param) + LOG.info("Successfully created nfs share: %s", nfs_obj) + return nfs_obj + except utils.UnityNfsShareNameExistedError as e: + LOG.error(str(e)) + self.module.fail_json(msg=str(e)) + except Exception as e: + msg = "Failed to create nfs share: %s error: %s" % (name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def create_nfs_share(self): + """ Create nfs share from either filesystem/snapshot + + :return: nfs_share object + :rtype: UnityNfsShare + """ + if self.is_given_nfs_for_fs: + # Share to be created from filesystem + return self.create_nfs_share_from_filesystem() + elif self.is_given_nfs_for_fs is False: + # Share to be created from snapshot + return self.create_nfs_share_from_snapshot() + else: + msg = "Please provide filesystem or filesystem snapshot to create NFS export" + LOG.error(msg) + self.module.fail_json(msg=msg) + + def convert_host_str_to_list(self, host_str): + """ Convert host_str which have comma separated hosts to host_list with + ip4/ip6 host obj if IP4/IP6 like string found + + :param host_str: hosts str separated by comma + :return: hosts list, which may contains IP4/IP6 object if given in + host_str + :rytpe: list + """ + if not host_str: + LOG.info("Empty host_str given") + return [] + + host_list = [] + try: + for h in host_str.split(","): + version = get_ip_version(h) + if version == 4: + h = u'{0}'.format(h) + h = IPv4Network(h, strict=False) + elif version == 6: + h = u'{0}'.format(h) + h = IPv6Network(h, strict=False) + host_list.append(h) + except Exception as e: + msg = "Error while converting host_str: %s to list error: %s" % ( + host_str, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + return host_list + + def add_host(self, existing_host_dict, new_host_dict): + """ Compares & adds up new hosts with the existing ones and provide + the final consolidated hosts + + :param existing_host_dict: All hosts params details which are + associated with existing nfs which to be modified + :type existing_host_dict: dict + :param new_host_dict: All hosts param details which are to be added + :type new_host_dict: dict + :return: consolidated hosts params details which contains newly added + hosts along with the existing ones + :rtype: dict + """ + modify_host_dict = {} + for k in existing_host_dict: + LOG.info("Checking add host for param: %s", k) + existing_host_str = existing_host_dict[k] + existing_host_list = self.convert_host_str_to_list( + existing_host_str) + + new_host_str = new_host_dict[k] + new_host_list = self.convert_host_str_to_list( + new_host_str) + + if not new_host_list: + LOG.info("Nothing to add as no host given") + continue + + if new_host_list and not existing_host_list: + # Existing nfs host is empty so lets directly add + # new_host_str as it is + LOG.info("Existing nfs host key: %s is empty, so lets add " + "new host given value as it is", k) + modify_host_dict[k] = new_host_str + continue + + actual_to_add = list(set(new_host_list) - set(existing_host_list)) + if not actual_to_add: + LOG.info("All host given to be added is already added") + continue + + # Lets extends actual_to_add list, which is new with existing + actual_to_add.extend(existing_host_list) + + # Since SDK takes host_str as ',' separated instead of list, so + # lets convert str to list + # Note: explicity str() needed here to convert IP4/IP6 object + modify_host_dict[k] = ",".join(str(v) for v in actual_to_add) + + return modify_host_dict + + def remove_host(self, existing_host_dict, new_host_dict): + """ Compares & remove new hosts from the existing ones and provide + the remaining hosts + + :param existing_host_dict: All hosts params details which are + associated with existing nfs which to be modified + :type existing_host_dict: dict + :param new_host_dict: All hosts param details which are to be removed + :type new_host_dict: dict + :return: existing hosts params details from which given new hosts are + removed + :rtype: dict + """ + modify_host_dict = {} + + for k in existing_host_dict: + LOG.info("Checking remove host for param: %s", k) + existing_host_str = existing_host_dict[k] + existing_host_list = self.convert_host_str_to_list( + existing_host_str) + + new_host_str = new_host_dict[k] + new_host_list = self.convert_host_str_to_list( + new_host_str) + + if not new_host_list: + LOG.info("Nothing to remove as no host given") + continue + + if len(new_host_list) > len(set(new_host_list)): + msg = "Duplicate host given: %s in host param: %s" % ( + new_host_list, k) + LOG.error(msg) + self.module.fail_json(msg=msg) + + if new_host_list and not existing_host_list: + # existing list is already empty, so nothing to remove + LOG.info("Existing list is already empty, so nothing to " + "remove") + continue + + actual_to_remove = list(set(new_host_list) & set( + existing_host_list)) + if not actual_to_remove: + continue + + final_host_list = list(set(existing_host_list) - set( + actual_to_remove)) + + # Since SDK takes host_str as ',' separated instead of list, so + # lets convert str to list + # Note: explicity str() needed here to convert IP4/IP6 object + modify_host_dict[k] = ",".join(str(v) for v in final_host_list) + + return modify_host_dict + + def modify_nfs_share(self, nfs_obj): + """ Modify given nfs share + + :param nfs_obj: NFS share obj + :type nfs_obj: UnityNfsShare + :return: tuple(bool, nfs_obj) + - bool: indicates whether nfs_obj is modified or not + - nfs_obj: same nfs_obj if not modified else modified nfs_obj + :rtype: tuple + """ + modify_param = {} + LOG.info("Modifying nfs share") + + nfs_details = nfs_obj._get_properties() + + fields = ('description', 'anonymous_uid', 'anonymous_gid') + for field in fields: + if self.module.params[field] is not None: + if self.module.params[field] != nfs_details[field]: + modify_param[field] = self.module.params[field] + + if self.module.params['min_security'] and self.module.params[ + 'min_security'] != nfs_obj.min_security.name: + modify_param['min_security'] = utils.NFSShareSecurityEnum[ + self.module.params['min_security']] + + if self.module.params['default_access']: + default_access = self.get_default_access() + if default_access != nfs_obj.default_access: + modify_param['default_access'] = default_access + + new_host_dict = self.get_host_dict_from_pb() + if new_host_dict: + try: + if is_nfs_have_host_with_host_obj(nfs_details): + msg = "Modification of nfs host is restricted as nfs " \ + "already have host added using host obj" + LOG.error(msg) + self.module.fail_json(msg=msg) + LOG.info("Extracting same given param from nfs") + existing_host_dict = {k: nfs_details[k] for k in new_host_dict} + except KeyError as e: + msg = "Failed to extract key-value from current nfs: %s" % \ + str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + + if self.module.params['host_state'] == HOST_STATE_LIST[0]: + # present-in-export + LOG.info("Getting host to be added") + modify_host_dict = self.add_host(existing_host_dict, + new_host_dict) + else: + # absent-in-export + LOG.info("Getting host to be removed") + modify_host_dict = self.remove_host(existing_host_dict, + new_host_dict) + + if modify_host_dict: + modify_param.update(modify_host_dict) + + if not modify_param: + LOG.info("Existing nfs attribute value is same as given input, " + "so returning same nfs object - idempotency case") + return False, nfs_obj + + modify_param = self.correct_payload_as_per_sdk( + modify_param, nfs_details) + + try: + resp = nfs_obj.modify(**modify_param) + resp.raise_if_err() + except Exception as e: + msg = "Failed to modify nfs error: %s" % str(e) + LOG.error(msg) + self.module.fail_json(msg=msg) + + return True, self.get_nfs_share(id=nfs_obj.id) + + def perform_module_operation(self): + """ Perform different actions on nfs based on user parameter + chosen in playbook """ + + changed = False + nfs_share_details = {} + + self.validate_input() + + self.nas_obj = None + if self.module.params['nas_server_id'] or self.module.params[ + 'nas_server_name']: + self.nas_obj = self.get_nas_from_given_input() + + self.fs_obj = None + self.snap_obj = None + if self.is_given_nfs_for_fs: + self.fs_obj = self.get_filesystem() + elif self.is_given_nfs_for_fs is False: + self.snap_obj = self.get_snapshot() + + # Get nfs Share + nfs_obj = self.get_nfs_share( + id=self.module.params['nfs_export_id'], + name=self.module.params['nfs_export_name'] + ) + + # Delete nfs Share + if self.module.params['state'] == STATE_LIST[1]: + if nfs_obj: + # delete_nfs_share() does not return any value + # In case of successful delete, lets nfs_obj set None + # to avoid fetching and displaying attribute + nfs_obj = self.delete_nfs_share(nfs_obj) + changed = True + elif not nfs_obj: + # create + nfs_obj = self.create_nfs_share() + changed = True + else: + # modify + changed, nfs_obj = self.modify_nfs_share(nfs_obj) + + # Get display attributes + if self.module.params['state'] and nfs_obj: + nfs_share_details = get_nfs_share_display_attrs(nfs_obj) + + result = {"changed": changed, + "nfs_share_details": nfs_share_details} + self.module.exit_json(**result) + + +def get_nfs_share_display_attrs(nfs_obj): + """ Provide nfs share attributes for display + + :param nfs: NFS share obj + :type nfs: UnityNfsShare + :return: nfs_share_details + :rtype: dict + """ + LOG.info("Getting nfs share details from nfs share object") + nfs_share_details = nfs_obj._get_properties() + + # Adding filesystem_name to nfs_share_details + LOG.info("Updating filesystem details") + nfs_share_details['filesystem']['UnityFileSystem']['name'] = \ + nfs_obj.filesystem.name + if 'id' not in nfs_share_details['filesystem']['UnityFileSystem']: + nfs_share_details['filesystem']['UnityFileSystem']['id'] = \ + nfs_obj.filesystem.id + + # Adding nas server details + LOG.info("Updating nas server details") + nas_details = nfs_obj.filesystem._get_properties()['nas_server'] + nas_details['UnityNasServer']['name'] = \ + nfs_obj.filesystem.nas_server.name + nfs_share_details['nas_server'] = nas_details + + # Adding snap.id & snap.name if nfs_obj is for snap + if is_nfs_obj_for_snap(nfs_obj): + LOG.info("Updating snap details") + nfs_share_details['snap']['UnitySnap']['id'] = nfs_obj.snap.id + nfs_share_details['snap']['UnitySnap']['name'] = nfs_obj.snap.name + + LOG.info("Successfully updated nfs share details") + return nfs_share_details + + +def is_nfs_have_host_with_host_obj(nfs_details): + """ Check whether nfs host is already added using host obj + + :param nfs_details: nfs details + :return: True if nfs have host already added with host obj else False + :rtype: bool + """ + host_obj_params = ('no_access_hosts', 'read_only_hosts', + 'read_only_root_access_hosts', 'read_write_hosts', + 'root_access_hosts') + for host_obj_param in host_obj_params: + if nfs_details.get(host_obj_param): + return True + return False + + +def get_ip_version(val): + try: + val = u'{0}'.format(val) + ip = ip_network(val, strict=False) + return ip.version + except ValueError: + return 0 + + +def is_nfs_obj_for_fs(nfs_obj): + """ Check whether the nfs_obj if for filesystem + + :param nfs_obj: NFS share object + :return: True if nfs_obj is of filesystem type + :rtype: bool + """ + if nfs_obj.type == utils.NFSTypeEnum.NFS_SHARE: + return True + return False + + +def is_nfs_obj_for_snap(nfs_obj): + """ Check whether the nfs_obj if for snapshot + + :param nfs_obj: NFS share object + :return: True if nfs_obj is of snapshot type + :rtype: bool + """ + if nfs_obj.type == utils.NFSTypeEnum.NFS_SNAPSHOT: + return True + return False + + +def get_nfs_parameters(): + """ Provides parameters required for the NFS share module on Unity """ + + return dict( + nfs_export_name=dict(required=False, type='str'), + nfs_export_id=dict(required=False, type='str'), + filesystem_id=dict(required=False, type='str'), + filesystem_name=dict(required=False, type='str'), + snapshot_id=dict(required=False, type='str'), + snapshot_name=dict(required=False, type='str'), + nas_server_id=dict(required=False, type='str'), + nas_server_name=dict(required=False, type='str'), + path=dict(required=False, type='str', no_log=True), + description=dict(required=False, type='str'), + default_access=dict(required=False, type='str', + choices=DEFAULT_ACCESS_LIST), + min_security=dict(required=False, type='str', + choices=MIN_SECURITY_LIST), + adv_host_mgmt_enabled=dict(required=False, type='bool', default=None), + no_access_hosts=HOST_DICT, + read_only_hosts=HOST_DICT, + read_only_root_hosts=HOST_DICT, + read_write_hosts=HOST_DICT, + read_write_root_hosts=HOST_DICT, + host_state=dict(required=False, type='str', choices=HOST_STATE_LIST), + anonymous_uid=dict(required=False, type='int'), + anonymous_gid=dict(required=False, type='int'), + state=dict(required=True, type='str', choices=STATE_LIST) + ) + + +def main(): + """ Create UnityNFS object and perform action on it + based on user input from playbook""" + obj = NFS() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py new file mode 100644 index 00000000..cca15433 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py @@ -0,0 +1,494 @@ +#!/usr/bin/python +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing NFS server on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +module: nfsserver +version_added: '1.4.0' +short_description: Manage NFS server on Unity storage system +description: +- Managing the NFS server on the Unity storage system includes creating NFS server, getting NFS server details + and deleting NFS server attributes. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Meenakshi Dembi (@dembim) + +options: + nas_server_name: + description: + - Name of the NAS server on which NFS server will be hosted. + type: str + nas_server_id: + description: + - ID of the NAS server on which NFS server will be hosted. + type: str + nfs_server_id: + description: + - ID of the NFS server. + type: str + host_name: + description: + - Host name of the NFS server. + type: str + nfs_v4_enabled: + description: + - Indicates whether the NFSv4 is enabled on the NAS server. + type: bool + is_secure_enabled: + description: + - Indicates whether the secure NFS is enabled. + type: bool + kerberos_domain_controller_type: + description: + - Type of Kerberos Domain Controller used for secure NFS service. + choices: [CUSTOM, UNIX, WINDOWS] + type: str + kerberos_domain_controller_username: + description: + - Kerberos Domain Controller administrator username. + type: str + kerberos_domain_controller_password: + description: + - Kerberos Domain Controller administrator password. + type: str + is_extended_credentials_enabled: + description: + - Indicates whether support for more than 16 unix groups in a Unix credential. + type: bool + remove_spn_from_kerberos: + description: + - Indicates whether to remove the SPN from Kerberos Domain Controller. + default: true + type: bool + state: + description: + - Define whether the NFS server should exist or not. + choices: [absent, present] + required: true + type: str +notes: +- The I(check_mode) is supported. +- Modify operation for NFS Server is not supported. +- When I(kerberos_domain_controller_type) is C(UNIX), I(kdc_type) in I(nfs_server_details) output is displayed as C(null). +''' + +EXAMPLES = r''' + + - name: Create NFS server with kdctype as Windows + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + host_name: "dummy_nas23" + is_secure_enabled: True + kerberos_domain_controller_type: "WINDOWS" + kerberos_domain_controller_username: "administrator" + kerberos_domain_controller_password: "Password123!" + is_extended_credentials_enabled: True + nfs_v4_enabled: True + state: "present" + + - name: Create NFS server with kdctype as Unix + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + host_name: "dummy_nas23" + is_secure_enabled: True + kerberos_domain_controller_type: "UNIX" + is_extended_credentials_enabled: True + nfs_v4_enabled: True + state: "present" + + - name: Get NFS server details + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + state: "present" + + - name: Delete NFS server + dellemc.unity.nfsserver: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + nas_server_name: "dummy_nas" + kerberos_domain_controller_username: "administrator" + kerberos_domain_controller_password: "Password123!" + unjoin_server_account: False + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: true +nfs_server_details: + description: Details of the NFS server. + returned: When NFS server exists + type: dict + contains: + credentials_cache_ttl: + description: Credential cache refresh timeout. Resolution is in minutes. Default value is 15 minutes. + type: str + existed: + description: Indicates if NFS Server exists. + type: bool + host_name: + description: Host name of the NFS server. + type: str + id: + description: Unique identifier of the NFS Server instance. + type: str + is_extended_credentials_enabled: + description: Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential. + type: bool + is_secure_enabled: + description: Indicates whether secure NFS is enabled on the NFS server. + type: bool + kdc_type: + description: Type of Kerberos Domain Controller used for secure NFS service. + type: str + nfs_v4_enabled: + description: Indicates whether NFSv4 is enabled on the NAS server. + type: bool + servicee_principal_name: + description: The Service Principal Name (SPN) for the NFS Server. + type: str + sample: { + "credentials_cache_ttl": "0:15:00", + "existed": true, + "file_interfaces": { + "UnityFileInterfaceList": [ + { + "UnityFileInterface": { + "hash": 8778980109421, + "id": "if_37" + } + } + ] + }, + "hash": 8778980109388, + "host_name": "dummy_nas23.pie.lab.emc.com", + "id": "nfs_51", + "is_extended_credentials_enabled": true, + "is_secure_enabled": true, + "kdc_type": "KdcTypeEnum.WINDOWS", + "nas_server": { + "UnityNasServer": { + "hash": 8778980109412 + } + }, + "nfs_v4_enabled": true, + "servicee_principal_name": null + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('nfsserver') + +application_type = "Ansible/1.5.0" + + +class NFSServer(object): + """Class with NFS server operations""" + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_nfs_server_parameters()) + + mutually_exclusive = [['nas_server_name', 'nas_server_id']] + required_one_of = [['nfs_server_id', 'nas_server_name', 'nas_server_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of + ) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + LOG.info('Check Mode Flag %s', self.module.check_mode) + + def get_nfs_server_details(self, nfs_server_id=None, nas_server_id=None): + """Get NFS server details. + :param: nfs_server_id: The ID of the NFS server + :param: nas_server_id: The name of the NAS server + :return: Dict containing NFS server details if exists + """ + LOG.info("Getting NFS server details") + try: + if nfs_server_id: + nfs_server_details = self.unity_conn.get_nfs_server(_id=nfs_server_id) + return nfs_server_details._get_properties() + elif nas_server_id: + nfs_server_details = self.unity_conn.get_nfs_server(nas_server=nas_server_id) + if len(nfs_server_details) > 0: + return process_dict(nfs_server_details._get_properties()) + return None + except utils.HttpError as e: + if e.http_status == 401: + msg = 'Incorrect username or password provided.' + LOG.error(msg) + self.module.fail_json(msg=msg) + else: + err_msg = "Failed to get details of NFS Server" \ + " with error {0}".format(str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + except utils.UnityResourceNotFoundError as e: + err_msg = "Failed to get details of NFS Server" \ + " with error {0}".format(str(e)) + LOG.error(err_msg) + return None + + def get_nfs_server_instance(self, nfs_server_id): + """Get NFS server instance. + :param: nfs_server_id: The ID of the NFS server + :return: Return NFS server instance if exists + """ + + try: + nfs_server_obj = self.unity_conn.get_nfs_server(_id=nfs_server_id) + return nfs_server_obj + except Exception as e: + error_msg = "Failed to get the NFS server %s instance" \ + " with error %s" % (nfs_server_id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def delete_nfs_server(self, nfs_server_id, skip_unjoin=None, domain_username=None, domain_password=None): + """Delete NFS server. + :param: nfs_server_id: The ID of the NFS server + :param: skip_unjoin: Flag indicating whether to unjoin SMB server account from AD before deletion + :param: domain_username: The domain username + :param: domain_password: The domain password + :return: Return True if NFS server is deleted + """ + + LOG.info("Deleting NFS server") + try: + if not self.module.check_mode: + nfs_obj = self.get_nfs_server_instance(nfs_server_id=nfs_server_id) + nfs_obj.delete(skip_kdc_unjoin=skip_unjoin, username=domain_username, password=domain_password) + return True + except Exception as e: + msg = "Failed to delete NFS server: %s with error: %s" % (nfs_server_id, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_nas_server_id(self, nas_server_name): + """Get NAS server ID. + :param: nas_server_name: The name of NAS server + :return: Return NAS server ID if exists + """ + + LOG.info("Getting NAS server ID") + try: + obj_nas = self.unity_conn.get_nas_server(name=nas_server_name) + return obj_nas.get_id() + except Exception as e: + msg = "Failed to get details of NAS server: %s with error: %s" % (nas_server_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def is_modification_required(self, is_extended_credentials_enabled, nfs_server_details): + """Check if modification is required in existing NFS server + :param: is_extended_credentials_enabled: Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential. + :param: nfs_server_details: NFS server details + :return: True if modification is required + """ + + LOG.info("Checking if any modification is required") + # Check for Extend Credential + if is_extended_credentials_enabled is not None and \ + is_extended_credentials_enabled != nfs_server_details['is_extended_credentials_enabled']: + return True + + def create_nfs_server(self, nas_server_id, host_name=None, nfs_v4_enabled=None, is_secure_enabled=None, + kerberos_domain_controller_type=None, kerberos_domain_controller_username=None, + kerberos_domain_controller_password=None, is_extended_credentials_enabled=None): + """Create NFS server. + :param: nas_server_id: The ID of NAS server. + :param: host_name: Name of NFS Server. + :param: nfs_v4_enabled: Indicates whether the NFSv4 is enabled on the NAS server. + :param: is_secure_enabled: Indicates whether the secure NFS is enabled. + :param: kerberos_domain_controller_type: Type of Kerberos Domain Controller used for secure NFS service. + :param: kerberos_domain_controller_username: Kerberos Domain Controller administrator username. + :param: kerberos_domain_controller_password: Kerberos Domain Controller administrator password. + :param: is_extended_credentials_enabled: Indicates whether support for more than 16 unix groups in a Unix credential. + """ + + LOG.info("Creating NFS server") + try: + if not self.module.check_mode: + kdc_enum_type = get_enum_kdctype(kerberos_domain_controller_type) + if kerberos_domain_controller_type == "UNIX": + is_extended_credentials_enabled = None + is_secure_enabled = None + utils.UnityNfsServer.create(cli=self.unity_conn._cli, nas_server=nas_server_id, host_name=host_name, + nfs_v4_enabled=nfs_v4_enabled, + is_secure_enabled=is_secure_enabled, kdc_type=kdc_enum_type, + kdc_username=kerberos_domain_controller_username, + kdc_password=kerberos_domain_controller_password, + is_extended_credentials_enabled=is_extended_credentials_enabled) + return True + except Exception as e: + msg = "Failed to create NFS server with on NAS Server %s with error: %s" % (nas_server_id, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_input_params(self): + param_list = ["nfs_server_id", "nas_server_id", "nas_server_name", "host_name", "kerberos_domain_controller_username", + "kerberos_domain_controller_password"] + + for param in param_list: + msg = "Please provide valid value for: %s" % param + if self.module.params[param] is not None and len(self.module.params[param].strip()) == 0: + errmsg = msg.format(param) + self.module.fail_json(msg=errmsg) + + def perform_module_operation(self): + """ + Perform different actions on NFS server module based on parameters + passed in the playbook + """ + nfs_server_id = self.module.params['nfs_server_id'] + nas_server_id = self.module.params['nas_server_id'] + nas_server_name = self.module.params['nas_server_name'] + host_name = self.module.params['host_name'] + nfs_v4_enabled = self.module.params['nfs_v4_enabled'] + is_secure_enabled = self.module.params['is_secure_enabled'] + kerberos_domain_controller_type = self.module.params['kerberos_domain_controller_type'] + kerberos_domain_controller_username = self.module.params['kerberos_domain_controller_username'] + kerberos_domain_controller_password = self.module.params['kerberos_domain_controller_password'] + is_extended_credentials_enabled = self.module.params['is_extended_credentials_enabled'] + remove_spn_from_kerberos = self.module.params['remove_spn_from_kerberos'] + state = self.module.params['state'] + + # result is a dictionary that contains changed status and NFS server details + result = dict( + changed=False, + nfs_server_details={} + ) + + modify_flag = False + + self.validate_input_params() + + if nas_server_name: + nas_server_id = self.get_nas_server_id(nas_server_name) + + nfs_server_details = self.get_nfs_server_details(nfs_server_id=nfs_server_id, + nas_server_id=nas_server_id) + + # Check if modification is required + if nfs_server_details and state == 'present': + modify_flag = self.is_modification_required(is_extended_credentials_enabled, nfs_server_details) + if modify_flag: + self.module.fail_json(msg="Modification of NFS Server parameters is not supported through Ansible module") + + if not nfs_server_details and state == 'present': + if not nas_server_id: + self.module.fail_json(msg="Please provide nas server id/name to create NFS server.") + + result['changed'] = self.create_nfs_server(nas_server_id, host_name, nfs_v4_enabled, + is_secure_enabled, kerberos_domain_controller_type, + kerberos_domain_controller_username, + kerberos_domain_controller_password, + is_extended_credentials_enabled) + + if state == 'absent' and nfs_server_details: + skip_unjoin = not remove_spn_from_kerberos + result['changed'] = self.delete_nfs_server(nfs_server_details["id"], skip_unjoin, + kerberos_domain_controller_username, + kerberos_domain_controller_password) + + if state == 'present': + result['nfs_server_details'] = self.get_nfs_server_details(nfs_server_id=nfs_server_id, + nas_server_id=nas_server_id) + self.module.exit_json(**result) + + +def get_nfs_server_parameters(): + """This method provide parameters required for the ansible + NFS server module on Unity""" + return dict( + nfs_server_id=dict(type='str'), + host_name=dict(type='str'), + nfs_v4_enabled=dict(type='bool'), + is_secure_enabled=dict(type='bool'), + kerberos_domain_controller_type=dict(type='str', choices=['UNIX', 'WINDOWS', 'CUSTOM']), + kerberos_domain_controller_username=dict(type='str'), + kerberos_domain_controller_password=dict(type='str', no_log=True), + nas_server_name=dict(type='str'), + nas_server_id=dict(type='str'), + is_extended_credentials_enabled=dict(type='bool'), + remove_spn_from_kerberos=dict(default=True, type='bool'), + state=dict(required=True, type='str', choices=['present', 'absent']), + ) + + +def get_enum_kdctype(kerberos_domain_controller_type): + """Getting correct enum values for kerberos_domain_controller_type + :param: kerberos_domain_controller_type: Type of Kerberos Domain Controller used for secure NFS service. + :return: enum value for kerberos_domain_controller_type. + """ + + if utils.KdcTypeEnum[kerberos_domain_controller_type]: + kerberos_domain_controller_type = utils.KdcTypeEnum[kerberos_domain_controller_type] + return kerberos_domain_controller_type + + +def process_dict(nfs_server_details): + """Process NFS server details. + :param: nfs_server_details: Dict containing NFS server details + :return: Processed dict containing NFS server details + """ + param_list = ['credentials_cache_ttl', 'file_interfaces', 'host_name', 'id', 'kdc_type', 'nas_server', 'is_secure_enabled', + 'is_extended_credentials_enabled', 'nfs_v4_enabled', 'servicee_principal_name'] + + for param in param_list: + if param in nfs_server_details and param == 'credentials_cache_ttl': + nfs_server_details[param] = str(nfs_server_details[param][0]) + else: + nfs_server_details[param] = nfs_server_details[param][0] + return nfs_server_details + + +def main(): + """Create Unity NFS server object and perform action on it + based on user input from playbook""" + obj = NFSServer() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/smbshare.py b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py new file mode 100644 index 00000000..ae5fe182 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py @@ -0,0 +1,877 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: smbshare +version_added: '1.1.0' +short_description: Manage SMB shares on Unity storage system +extends_documentation_fragment: +- dellemc.unity.unity +author: +- P Srinivas Rao (@srinivas-rao5) +description: +- Managing SMB Shares on Unity storage system includes create, get, + modify, and delete the smb shares. +options: + share_name: + description: + - Name of the SMB share. + - Required during creation of the SMB share. + - For all other operations either I(share_name) or I(share_id) is required. + type: str + share_id: + description: + - ID of the SMB share. + - Should not be specified during creation. Id is auto generated. + - For all other operations either I(share_name) or I(share_id) is required. + - If I(share_id) is used then no need to pass nas_server/filesystem/snapshot/path. + type: str + path: + description: + - Local path to the file system/Snapshot or any existing sub-folder of + the file system/Snapshot that is shared over the network. + - Path is relative to the root of the filesystem. + - Required for creation of the SMB share. + type: str + filesystem_id: + description: + - The ID of the File System. + - Either I(filesystem_name) or I(filesystem_id) is required for creation of the SMB share for filesystem. + - If I(filesystem_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to + uniquely identify the filesystem. + - Options I(filesystem_name) and I(filesystem_id) are mutually exclusive parameters. + type: str + snapshot_id: + description: + - The ID of the Filesystem Snapshot. + - Either I(snapshot_name) or I(snapshot_id) is required for creation of the SMB share for a snapshot. + - If I(snapshot_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to + uniquely identify the snapshot. + - Options I(snapshot_name) and I(snapshot_id) are mutually exclusive parameters. + type: str + nas_server_id: + description: + - The ID of the NAS Server. + - It is not required if I(share_id) is used. + type: str + filesystem_name: + description: + - The Name of the File System. + - Either I(filesystem_name) or I(filesystem_id) is required for creation of the SMB share for filesystem. + - If I(filesystem_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to + uniquely identify the filesystem. + - Options I(filesystem_name) and I(filesytem_id) are mutually exclusive parameters. + type: str + snapshot_name: + description: + - The Name of the Filesystem Snapshot. + - Either I(snapshot_name) or I(snapshot_id) is required for creation of the SMB share for a snapshot. + - If I(snapshot_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to + uniquely identify the snapshot. + - Options I(snapshot_name) and I(snapshot_id) are mutually exclusive parameters. + type: str + nas_server_name: + description: + - The Name of the NAS Server. + - It is not required if I(share_id) is used. + - Options I(nas_server_name) and I(nas_server_id) are mutually exclusive parameters. + type: str + description: + description: + - Description for the SMB share. + - Optional parameter when creating a share. + - To modify, pass the new value in description field. + type: str + is_abe_enabled: + description: + - Indicates whether Access-based Enumeration (ABE) for SMB share is enabled. + - During creation, if not mentioned then default is C(false). + type: bool + is_branch_cache_enabled: + description: + - Indicates whether Branch Cache optimization for SMB share is enabled. + - During creation, if not mentioned then default is C(false). + type: bool + is_continuous_availability_enabled: + description: + - Indicates whether continuous availability for SMB 3.0 is enabled. + - During creation, if not mentioned then default is C(false). + type: bool + is_encryption_enabled: + description: + - Indicates whether encryption for SMB 3.0 is enabled at the shared folder level. + - During creation, if not mentioned then default is C(false). + type: bool + offline_availability: + description: + - Defines valid states of Offline Availability. + - C(MANUAL)- Only specified files will be available offline. + - C(DOCUMENTS)- All files that users open will be available offline. + - C(PROGRAMS)- Program will preferably run from the offline cache even when + connected to the network. All files that users open will be available offline. + - C(NONE)- Prevents clients from storing documents and programs in offline cache. + type: str + choices: ["MANUAL","DOCUMENTS","PROGRAMS","NONE"] + umask: + description: + - The default UNIX umask for new files created on the SMB Share. + type: str + state: + description: + - Define whether the SMB share should exist or not. + - Value C(present) indicates that the share should exist on the system. + - Value C(absent) indicates that the share should not exist on the system. + type: str + required: true + choices: ['absent', 'present'] +notes: +- When ID/Name of the filesystem/snapshot is passed then I(nas_server) is not required. + If passed, then filesystem/snapshot should exist for the mentioned I(nas_server), + else the task will fail. +- The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Create SMB share for a filesystem + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_smb_share" + filesystem_name: "sample_fs" + nas_server_id: "NAS_11" + path: "/sample_fs" + description: "Sample SMB share created" + is_abe_enabled: True + is_branch_cache_enabled: True + offline_availability: "DOCUMENTS" + is_continuous_availability_enabled: True + is_encryption_enabled: True + umask: "777" + state: "present" +- name: Modify Attributes of SMB share for a filesystem + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_smb_share" + nas_server_name: "sample_nas_server" + description: "Sample SMB share attributes updated" + is_abe_enabled: False + is_branch_cache_enabled: False + offline_availability: "MANUAL" + is_continuous_availability_enabled: "False" + is_encryption_enabled: "False" + umask: "022" + state: "present" +- name: Create SMB share for a snapshot + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_snap_smb_share" + snapshot_name: "sample_snapshot" + nas_server_id: "NAS_11" + path: "/sample_snapshot" + description: "Sample SMB share created for snapshot" + is_abe_enabled: True + is_branch_cache_enabled: True + is_continuous_availability_enabled: True + is_encryption_enabled: True + umask: "777" + state: "present" +- name: Modify Attributes of SMB share for a snapshot + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_name: "sample_snap_smb_share" + snapshot_name: "sample_snapshot" + description: "Sample SMB share attributes updated for snapshot" + is_abe_enabled: False + is_branch_cache_enabled: False + offline_availability: "MANUAL" + is_continuous_availability_enabled: "False" + is_encryption_enabled: "False" + umask: "022" + state: "present" +- name: Get details of SMB share + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_id: "{{smb_share_id}}" + state: "present" +- name: Delete SMB share + dellemc.unity.smbshare: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + share_id: "{{smb_share_id}}" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: True +smb_share_details: + description: The SMB share details. + type: dict + returned: When share exists. + contains: + id: + description: The ID of the SMB share. + type: str + name: + description: Name of the SMB share. + type: str + sample: "sample_smb_share" + filesystem_id: + description: The ID of the Filesystem. + type: str + filesystem_name: + description: The Name of the filesystem + type: str + snapshot_id: + description: The ID of the Snapshot. + type: str + snapshot_name: + description: The Name of the Snapshot. + type: str + nas_server_id: + description: The ID of the nas_server. + type: str + nas_server_name: + description: The Name of the nas_server. + type: str + description: + description: Additional information about the share. + type: str + sample: This share is created for demo purpose only. + is_abe_enabled: + description: Whether Access Based enumeration is enforced or not. + type: bool + sample: false + is_branch_cache_enabled: + description: Whether branch cache is enabled or not. + type: bool + sample: false + is_continuous_availability_enabled: + description: Whether the share will be available continuously or not. + type: bool + sample: false + is_encryption_enabled: + description: Whether encryption is enabled or not. + type: bool + sample: false + umask: + description: Unix mask for the SMB share. + type: str + sample: { + "creation_time": "2022-03-17 11:56:54.867000+00:00", + "description": "", + "existed": true, + "export_paths": [ + "\\\\multi-prot-pie.extreme1.com\\multi-prot-hui", + "\\\\10.230.24.26\\multi-prot-hui" + ], + "filesystem": { + "UnityFileSystem": { + "hash": 8748426746492 + } + }, + "filesystem_id": "fs_140", + "filesystem_name": "multi-prot-hui", + "hash": 8748426746588, + "id": "SMBShare_20", + "is_abe_enabled": false, + "is_ace_enabled": false, + "is_branch_cache_enabled": false, + "is_continuous_availability_enabled": false, + "is_dfs_enabled": false, + "is_encryption_enabled": false, + "is_read_only": null, + "modified_time": "2022-03-17 11:56:54.867000+00:00", + "name": "multi-prot-hui", + "nas_server_id": "nas_5", + "nas_server_name": "multi-prot", + "offline_availability": "CifsShareOfflineAvailabilityEnum.NONE", + "path": "/", + "snap": null, + "type": "CIFSTypeEnum.CIFS_SHARE", + "umask": "022" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('smbshare') + +application_type = "Ansible/1.5.0" + + +class SMBShare(object): + """Class with SMB Share operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_smb_share_parameters()) + + # initialize the ansible module + mut_ex_args = [['share_name', 'share_id'], + ['nas_server_name', 'nas_server_id'], + ['filesystem_name', 'snapshot_name', + 'filesystem_id', 'snapshot_id'], + ['share_id', 'nas_server_name'], + ['share_id', 'nas_server_id'], + ['share_id', 'filesystem_name'], + ['share_id', 'filesystem_id'], + ['share_id', 'path'], + ['share_id', 'snapshot_name'], + ['share_id', 'snapshot_id']] + required_one_of = [['share_id', 'share_name']] + + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mut_ex_args, + required_one_of=required_one_of + ) + utils.ensure_required_libs(self.module) + + # result is a dictionary that contains changed status and + # snapshot details + self.result = {"changed": False, + 'smb_share_details': {}} + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + self.smb_share_conn_obj = utils.cifs_share.UnityCifsShare( + self.unity_conn) + LOG.info('Connection established with the Unity Array') + + def get_offline_availability_enum(self, offline_availability): + """ + Get the enum of the Offline Availability parameter. + :param offline_availability: The offline_availability string + :return: offline_availability enum + """ + if offline_availability in \ + utils.CifsShareOfflineAvailabilityEnum.__members__: + return utils.CifsShareOfflineAvailabilityEnum[ + offline_availability] + else: + error_msg = "Invalid value {0} for offline availability" \ + " provided".format(offline_availability) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_smb_share_obj(self, share_id=None, share_name=None, + filesystem_obj=None, snap_obj=None, nas_obj=None): + """Get SMB share details""" + msg = "Failed to get details of SMB Share {0} with error {1} " + smb_share = share_name if share_name else share_id + try: + if share_id: + obj_smb = self.unity_conn.get_cifs_share(_id=share_id) + if obj_smb and obj_smb.existed: + LOG.info("Successfully got the SMB share " + "object %s ", obj_smb) + return obj_smb + + elif share_name is not None and filesystem_obj: + # There might be a case where SMB share with same name exists + # for different nas server. Hence, filesystem_obj is passed + # along with share name to get a unique resource. + return self.unity_conn.get_cifs_share( + name=share_name, filesystem=filesystem_obj) + + elif share_name is not None and snap_obj: + # There might be a case where SMB share with same name exists + # for different nas server. Hence, snap_obj is passed + # along with share name to get a unique resource. + return self.unity_conn.get_cifs_share( + name=share_name, snap=snap_obj) + + # This elif is addressing scenario where nas server details is + # passed and neither filesystem nor snapshot details are passed. + elif share_name is not None and nas_obj: + # Multiple smb shares can be received, as only name is passed + smb_share_obj = self.unity_conn.get_cifs_share( + name=share_name) + + # Checking if instance or list of instance is returned. + if isinstance(smb_share_obj, + utils.cifs_share.UnityCifsShareList): + LOG.info("Multiple SMB share with same name found.") + smb_share_obj_list = smb_share_obj + + for smb_share in smb_share_obj_list: + if smb_share.filesystem.nas_server == nas_obj: + return smb_share + + msg = "No SMB share found with the given NAS Server." \ + " Please provide correct share name and" \ + " nas server details." + return None + + # Below statements will execute when there is only single + # smb share returned. + if smb_share_obj.filesystem.nas_server == nas_obj: + return smb_share_obj + msg = "No SMB share found with the given NAS Server." \ + " Please provide correct share name and" \ + " nas server details." + return None + + else: + self.module.fail_json( + msg="Share Name is Passed. Please enter Filesystem/" + "Snapshot/NAS Server Resource along with share_name" + " to get the details of the SMB share") + + except utils.HttpError as e: + if e.http_status == 401: + cred_err = "Incorrect username or password , {0}".format( + e.message) + self.module.fail_json(msg=cred_err) + else: + err_msg = msg.format(smb_share, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + except utils.UnityResourceNotFoundError as e: + err_msg = msg.format(smb_share, str(e)) + LOG.error(err_msg) + return None + + except Exception as e: + err_msg = msg.format(smb_share, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def create_smb_share(self, share_name, path, filesystem_obj=None, + snapshot_obj=None, description=None, + is_abe_enabled=None, is_branch_cache_enabled=None, + is_continuous_availability_enabled=None, + is_encryption_enabled=None, + offline_availability=None, umask=None): + """ + Create SMB Share + :return: SMB Share Object if successful, else error. + """ + if path is None or path == "": + self.module.fail_json(msg="Please enter a valid path." + " Empty string or None provided.") + if not filesystem_obj and not snapshot_obj: + self.module.fail_json(msg="Either Filesystem or Snapshot " + "Resource's Name/ID is required to" + " Create a SMB share") + try: + if filesystem_obj: + return self.smb_share_conn_obj.create( + cli=self.unity_conn._cli, name=share_name, + fs=filesystem_obj, path=path, + is_encryption_enabled=is_encryption_enabled, + is_con_avail_enabled=is_continuous_availability_enabled, + is_abe_enabled=is_abe_enabled, + is_branch_cache_enabled=is_branch_cache_enabled, + umask=umask, description=description, + offline_availability=offline_availability) + else: + return self.smb_share_conn_obj.create_from_snap( + cli=self.unity_conn._cli, name=share_name, + snap=snapshot_obj, path=path, + is_encryption_enabled=is_encryption_enabled, + is_con_avail_enabled=is_continuous_availability_enabled, + is_abe_enabled=is_abe_enabled, + is_branch_cache_enabled=is_branch_cache_enabled, + umask=umask, description=description, + offline_availability=offline_availability) + + except Exception as e: + error_msg = "Failed to create SMB share" \ + " %s with error %s" % (share_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_filesystem(self, filesystem_id=None, filesystem_name=None, + nas_server_obj=None): + """ + Get the Filesystem Object. + :param filesystem_id: ID of the Filesystem. + :param filesystem_name: Name of the filesystem. + :param nas_server_obj: NAS Server object. + :return: Object of the filesystem. + """ + try: + if filesystem_id: + obj_fs = self.unity_conn.get_filesystem(_id=filesystem_id) + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem " + "object %s ", obj_fs) + return obj_fs + else: + return self.unity_conn.get_filesystem( + name=filesystem_name, nas_server=nas_server_obj) + return None + except Exception as e: + filesystem = filesystem_name if filesystem_name \ + else filesystem_id + err_msg = "Failed to get filesystem details {0} with" \ + " error {1}".format(filesystem, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def get_snapshot(self, snapshot_name, snapshot_id): + """ + Get the Snapshot Object. + :param snapshot_id: ID of the Snapshot. + :param snapshot_name: Name of the Snapshot + :return: Object of the filesystem. + """ + try: + obj_snap = self.unity_conn.get_snap(_id=snapshot_id, + name=snapshot_name) + if snapshot_id and obj_snap and not obj_snap.existed: + LOG.info("Snapshot object does not exist %s ", obj_snap) + return None + return obj_snap + except Exception as e: + snapshot = snapshot_name if snapshot_name else snapshot_id + err_msg = "Failed to get filesystem snapshots details {0} with" \ + " error {1}".format(snapshot, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def get_nas_server(self, nas_server_name, nas_server_id): + """ + Get the NAS Server Object using NAME/ID of the NAS Server. + :param nas_server_name: Name of the NAS Server + :param nas_server_id: ID of the NAS Server + :return: NAS Server object. + """ + nas_server = nas_server_name if nas_server_name else nas_server_id + try: + obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id, + name=nas_server_name) + if nas_server_id and obj_nas and not obj_nas.existed: + LOG.info("NAS Server object does not exist %s ", obj_nas) + return None + return obj_nas + except utils.HttpError as e: + if e.http_status == 401: + cred_err = "Incorrect username or password , {0}".format( + e.message) + self.module.fail_json(msg=cred_err) + else: + err_msg = "Failed to get details of NAS Server" \ + " {0} with error {1}".format(nas_server, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + except Exception as e: + nas_server = nas_server_name if nas_server_name \ + else nas_server_id + err_msg = "Failed to get nas server details {0} with" \ + " error {1}".format(nas_server, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def delete_smb_share(self, smb_share_obj): + """ + Delete SMB share if exists, else thrown error. + """ + try: + smb_share_obj.delete() + except Exception as e: + error_msg = "Failed to Delete SMB share" \ + " %s with error %s" % (smb_share_obj.name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def to_update(self, smb_share_obj): + LOG.info("Checking Whether the parameters are modified or not.") + + offline_availability = self.module.params['offline_availability'] + # Get the enum for the corresponding offline_availability + if offline_availability: + offline_availability = \ + self.get_offline_availability_enum(offline_availability) + if offline_availability is not None and \ + offline_availability != smb_share_obj.offline_availability: + return True + + smb_share_dict = smb_share_obj._get_properties() + params_list = ['is_abe_enabled', 'is_branch_cache_enabled', + 'is_continuous_availability_enabled', + 'is_encryption_enabled', 'description', 'umask'] + for param in params_list: + if self.module.params[param] is not None and \ + self.module.params[param] != smb_share_dict[param]: + return True + return False + + def update_smb_share(self, smb_share_obj, is_encryption_enabled=None, + is_continuous_availability_enabled=None, + is_abe_enabled=None, + is_branch_cache_enabled=None, + umask=None, description=None, + offline_availability=None): + """ + The Details of the SMB share will be updated in the function. + """ + try: + smb_share_obj.modify( + is_encryption_enabled=is_encryption_enabled, + is_con_avail_enabled=is_continuous_availability_enabled, + is_abe_enabled=is_abe_enabled, + is_branch_cache_enabled=is_branch_cache_enabled, + umask=umask, description=description, + offline_availability=offline_availability) + + except Exception as e: + error_msg = "Failed to Update parameters of SMB share" \ + " %s with error %s" % (smb_share_obj.name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on SMB share based on user parameters + chosen in playbook + """ + state = self.module.params['state'] + share_name = self.module.params['share_name'] + filesystem_name = self.module.params['filesystem_name'] + snapshot_name = self.module.params['snapshot_name'] + nas_server_name = self.module.params['nas_server_name'] + share_id = self.module.params['share_id'] + filesystem_id = self.module.params['filesystem_id'] + snapshot_id = self.module.params['snapshot_id'] + nas_server_id = self.module.params['nas_server_id'] + path = self.module.params['path'] + + description = self.module.params['description'] + is_branch_cache_enabled = \ + self.module.params['is_branch_cache_enabled'] + is_continuous_availability_enabled = \ + self.module.params['is_continuous_availability_enabled'] + is_encryption_enabled = self.module.params['is_encryption_enabled'] + is_abe_enabled = self.module.params['is_abe_enabled'] + umask = self.module.params['umask'] + + offline_availability = self.module.params['offline_availability'] + # Get the enum for the corresponding offline_availability + if offline_availability: + offline_availability = \ + self.get_offline_availability_enum(offline_availability) + + changed = False + ''' + Validate parameters. + ''' + if share_id is not None and \ + (share_id == "" or len(share_id.split()) == 0): + self.module.fail_json(msg="Invalid share id provided." + " Please enter a valid share ID.") + + ''' + Get details of NAS Server, if entered. + ''' + nas_server_obj = None + if nas_server_name or nas_server_id: + nas_server_obj = self.get_nas_server(nas_server_name, + nas_server_id) + if nas_server_obj: + msg = "NAS Server Object:" \ + " {0}".format(nas_server_obj._get_properties()) + LOG.info(msg) + else: + msg = "NAS Server Resource not fetched." + LOG.info(msg) + + ''' + Get details of Filesystem, if entered. + ''' + filesystem_obj = None + if filesystem_id: + filesystem_obj = self.get_filesystem(filesystem_id) + if filesystem_name: + # nas_server_obj is required to uniquely identify filesystem + # resource. If neither nas_server_name nor nas_server_id + # is passed along with filesystem_name then error is thrown. + if not nas_server_obj: + self.module.fail_json(msg="nas_server_id/nas_server_name is " + "required when filesystem_name is " + "passed") + filesystem_obj = self.get_filesystem( + None, filesystem_name, nas_server_obj) + if filesystem_obj: + msg = "Filesystem Object:" \ + " {0}".format(filesystem_obj._get_properties()) + LOG.info(msg) + # Checking if filesystem supports SMB protocol or not. + if filesystem_obj and \ + filesystem_obj.supported_protocols.name == "NFS": + self.module.fail_json(msg="Cannot perform SMB share operations " + "as file system supports only NFS " + "protocol. Please enter a valid " + "Filesystem having supported protocol" + " as SMB or Multiprotocol.") + ''' + Get details of Snapshot, if entered. + ''' + snapshot_obj = None + if snapshot_id or snapshot_name: + # Snapshot Name and Snapshot ID both are unique across array. + # Hence no need to mention nas server details + snapshot_obj = self.get_snapshot(snapshot_name, snapshot_id) + if snapshot_obj: + msg = "Snapshot Object:" \ + " {0}".format(snapshot_obj._get_properties()) + LOG.info(msg) + else: + msg = "Snapshot Resource not fetched." + LOG.info(msg) + + ''' + Get the Details of the SMB Share + ''' + smb_share_obj = self.get_smb_share_obj( + share_id, share_name, filesystem_obj, snapshot_obj, + nas_server_obj) + if smb_share_obj: + msg = "SMB Share Object:" \ + " {0}".format(smb_share_obj._get_properties()) + LOG.info(msg) + elif state == 'present' and share_id: + msg = "Unable to fetch SMB Share Resource. " \ + "Incorrect SMB share id provided. " \ + "Please enter a correct share id." + LOG.error(msg) + self.module.fail_json(msg=msg) + + ''' + Creation of SMB Share + ''' + if state == "present" and not smb_share_obj: + smb_share_obj = self.create_smb_share( + share_name, path, filesystem_obj, snapshot_obj, description, + is_abe_enabled, is_branch_cache_enabled, + is_continuous_availability_enabled, is_encryption_enabled, + offline_availability, umask) + changed = True + + ''' + Update the SMB share details + ''' + if state == "present" and smb_share_obj: + LOG.info("Modify the details of the SMB share.") + update_flag = self.to_update(smb_share_obj) + msg = "Update Flag: {0}".format(str(update_flag)) + LOG.info(msg) + if update_flag: + self.update_smb_share(smb_share_obj, is_encryption_enabled, + is_continuous_availability_enabled, + is_abe_enabled, + is_branch_cache_enabled, + umask, description, + offline_availability) + changed = True + + ''' + Delete the SMB share details + ''' + if state == "absent" and smb_share_obj: + self.delete_smb_share(smb_share_obj) + changed = True + + ''' + Update the changed state and SMB share details + ''' + + self.result["changed"] = changed + smb_details = self.prepare_output_dict(state, share_id, share_name, + filesystem_obj, snapshot_obj, + nas_server_obj) + self.result["smb_share_details"] = smb_details + self.module.exit_json(**self.result) + + def prepare_output_dict(self, state, share_id, share_name, + filesystem_obj, snapshot_obj, nas_server_obj): + smb_share_details = None + smb_share_obj = None + if state == 'present': + smb_share_obj = self.get_smb_share_obj( + share_id, share_name, filesystem_obj, + snapshot_obj, nas_server_obj) + smb_share_details = smb_share_obj._get_properties() + if smb_share_details: + # Get Snapshot NAME and ID if SMB share exists for Snapshot + if smb_share_obj.type.name == "CIFS_SNAPSHOT": + smb_share_details['snapshot_name'] = smb_share_obj.snap.name + smb_share_details['snapshot_id'] = smb_share_obj.snap.id + + # Get Filesystem NAME and ID + smb_share_details['filesystem_name'] = \ + smb_share_obj.filesystem.name + smb_share_details['filesystem_id'] = smb_share_obj.filesystem.id + + # Get NAS server NAME and ID + smb_share_details['nas_server_name'] = \ + smb_share_obj.filesystem.nas_server.name + smb_share_details['nas_server_id'] = \ + smb_share_obj.filesystem.nas_server.id + return smb_share_details + + +def get_smb_share_parameters(): + """ + This method provides parameters required for the ansible smb share + modules on Unity + """ + + return dict( + share_name=dict(), share_id=dict(), + filesystem_name=dict(), filesystem_id=dict(), + snapshot_name=dict(), snapshot_id=dict(), + nas_server_name=dict(), nas_server_id=dict(), + path=dict(no_log=True), umask=dict(), description=dict(), + offline_availability=dict( + choices=["MANUAL", "DOCUMENTS", "PROGRAMS", "NONE"]), + is_abe_enabled=dict(type='bool'), + is_branch_cache_enabled=dict(type='bool'), + is_continuous_availability_enabled=dict(type='bool'), + is_encryption_enabled=dict(type='bool'), + state=dict(required=True, choices=['present', 'absent'], type='str') + ) + + +def main(): + """ Create Unity SMB share object and perform action on it + based on user input from playbook""" + obj = SMBShare() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py new file mode 100644 index 00000000..b001abe8 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py @@ -0,0 +1,751 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" Ansible module for managing Snapshots on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: snapshot +short_description: Manage snapshots on the Unity storage system +description: +- Managing snapshots on the Unity storage system includes create snapshot, + delete snapshot, update snapshot, get snapshot, map host and unmap host. +version_added: '1.1.0' + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- P Srinivas Rao (@srinivas-rao5) +options: + snapshot_name: + description: + - The name of the snapshot. + - Mandatory parameter for creating a snapshot. + - For all other operations either I(snapshot_name) or I(snapshot_id) is + required. + type: str + vol_name: + description: + - The name of the volume for which snapshot is created. + - For creation of a snapshot either I(vol_name) or I(cg_name) is required. + - Not required for other operations. + type: str + cg_name: + description: + - The name of the Consistency Group for which snapshot is created. + - For creation of a snapshot either I(vol_name) or I(cg_name) is required. + - Not required for other operations. + type: str + snapshot_id: + description: + - The id of the snapshot. + - For all operations other than creation either I(snapshot_name) or + I(snapshot_id) is required. + type: str + auto_delete: + description: + - This option specifies whether the snapshot is auto deleted or not. + - If set to C(true), snapshot will expire based on the pool auto deletion + policy. + - If set to (false), snapshot will not be auto deleted + based on the pool auto deletion policy. + - Option I(auto_delete) can not be set to C(true), if I(expiry_time) is specified. + - If during creation neither I(auto_delete) nor I(expiry_time) is mentioned + then snapshot will be created keeping I(auto_delete) as C(true). + - Once the I(expiry_time) is set then snapshot cannot be assigned + to the auto delete policy. + type: bool + expiry_time: + description: + - This option is for specifying the date and time after which the + snapshot will expire. + - The time is to be mentioned in UTC timezone. + - The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits. + type: str + description: + description: + - The additional information about the snapshot can be provided using + this option. + type: str + new_snapshot_name: + description: + - New name for the snapshot. + type: str + state: + description: + - The I(state) option is used to mention the existence of + the snapshot. + type: str + required: true + choices: [ 'absent', 'present' ] + host_name: + description: + - The name of the host. + - Either I(host_name) or I(host_id) is required to map or unmap a snapshot from + a host. + - Snapshot can be attached to multiple hosts. + type: str + host_id: + description: + - The id of the host. + - Either I(host_name) or I(host_id) is required to map or unmap a snapshot from + a host. + - Snapshot can be attached to multiple hosts. + type: str + host_state: + description: + - The I(host_state) option is used to mention the existence of the host + for snapshot. + - It is required when a snapshot is mapped or unmapped from host. + type: str + choices: ['mapped', 'unmapped'] + +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' + - name: Create a Snapshot for a CG + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + cg_name: "{{cg_name}}" + snapshot_name: "{{cg_snapshot_name}}" + description: "{{description}}" + auto_delete: False + state: "present" + + - name: Create a Snapshot for a volume with Host attached + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + vol_name: "{{vol_name}}" + snapshot_name: "{{vol_snapshot_name}}" + description: "{{description}}" + expiry_time: "04/15/2025 16:30" + host_name: "{{host_name}}" + host_state: "mapped" + state: "present" + + - name: Unmap a host for a Snapshot + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + snapshot_name: "{{vol_snapshot_name}}" + host_name: "{{host_name}}" + host_state: "unmapped" + state: "present" + + - name: Map snapshot to a host + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + port: "{{port}}" + snapshot_name: "{{vol_snapshot_name}}" + host_name: "{{host_name}}" + host_state: "mapped" + state: "present" + + - name: Update attributes of a Snapshot for a volume + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "{{vol_snapshot_name}}" + new_snapshot_name: "{{new_snapshot_name}}" + description: "{{new_description}}" + host_name: "{{host_name}}" + host_state: "unmapped" + state: "present" + + - name: Delete Snapshot of CG + dellemc.unity.snapshot: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + snapshot_name: "{{cg_snapshot_name}}" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: True + +snapshot_details: + description: Details of the snapshot. + returned: When snapshot exists + type: dict + contains: + is_auto_delete: + description: Additional information mentioned for snapshot. + type: str + expiration_time: + description: Date and time after which the snapshot + will expire. + type: str + hosts_list: + description: Contains the name and id of the associated + hosts. + type: dict + id: + description: Unique identifier of the snapshot instance. + type: str + name: + description: The name of the snapshot. + type: str + storage_resource_name: + description: Name of the storage resource for which the + snapshot exists. + type: str + storage_resource_id: + description: Id of the storage resource for which the snapshot + exists. + type: str + sample: { + "access_type": null, + "attached_wwn": null, + "creation_time": "2022-10-21 08:20:25.803000+00:00", + "creator_schedule": null, + "creator_type": "SnapCreatorTypeEnum.USER_CUSTOM", + "creator_user": { + "id": "user_admin" + }, + "description": "Test snap creation", + "existed": true, + "expiration_time": null, + "hash": 8756689457056, + "hosts_list": [], + "id": "85899355291", + "io_limit_policy": null, + "is_auto_delete": true, + "is_modifiable": false, + "is_modified": false, + "is_read_only": true, + "is_system_snap": false, + "last_writable_time": null, + "lun": null, + "name": "ansible_snap_cg_1_1", + "parent_snap": null, + "size": null, + "snap_group": null, + "state": "SnapStateEnum.READY", + "storage_resource_id": "res_95", + "storage_resource_name": "CG_ansible_test_2_new" + } +''' + +import logging +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils +from datetime import datetime + +LOG = utils.get_logger('snapshot') + +application_type = "Ansible/1.5.0" + + +class Snapshot(object): + """Class with Snapshot operations""" + + def __init__(self): + """ Define all parameters required by this module""" + + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_snapshot_parameters()) + + mutually_exclusive = [['snapshot_name', 'snapshot_id'], + ['vol_name', 'cg_name'], + ['host_name', 'host_id']] + + required_one_of = [['snapshot_name', 'snapshot_id']] + # initialize the ansible module + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + utils.ensure_required_libs(self.module) + + # result is a dictionary that contains changed status and + # snapshot details + self.result = {"changed": False, + 'snapshot_details': {}} + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + self.snap_obj = utils.snap.UnitySnap(self.unity_conn) + LOG.info('Connection established with the Unity Array') + + def validate_expiry_time(self, expiry_time): + """Validates the specified expiry_time""" + try: + datetime.strptime(expiry_time, '%m/%d/%Y %H:%M') + except ValueError: + error_msg = "expiry_time not in MM/DD/YYYY HH:MM format" + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def to_update(self, snapshot, new_name=None, description=None, + auto_del=None, expiry_time=None, host=None, + host_state=None): + """Determines whether to update the snapshot or not""" + if expiry_time: + # If the snapshot has is_auto_delete True, + # Check if auto_delete in the input is either None or True + if snapshot.is_auto_delete and (auto_del is None or auto_del): + self.module.fail_json(msg="expiry_time can be assigned when" + " auto delete is False") + if auto_del and snapshot.expiration_time: + error_msg = "expiry_time for snapshot is set." \ + " Once it is set then snapshot cannot" \ + " be assigned to auto_delete policy" + self.module.fail_json(msg=error_msg) + if new_name and new_name != snapshot.name: + return True + if description and description != snapshot.description: + return True + if auto_del and auto_del != snapshot.is_auto_delete: + return True + if to_update_expiry_time(snapshot, expiry_time): + return True + if host and to_update_host_list(snapshot, host, host_state): + return True + return False + + def update_snapshot(self, snapshot, new_name=None, + description=None, auto_del=None, expiry_time=None, + host_access_list=None): + try: + duration = None + if expiry_time: + duration = convert_timestamp_to_sec( + expiry_time, self.unity_conn.system_time) + if duration and duration <= 0: + self.module.fail_json(msg="expiry_time should be after" + " the current system time") + snapshot.modify(name=new_name, retentionDuration=duration, + isAutoDelete=auto_del, description=description, + hostAccess=host_access_list) + snapshot.update() + except Exception as e: + error_msg = "Failed to modify snapshot" \ + " [name: %s , id: %s] with error %s"\ + % (snapshot.name, snapshot.id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def create_snapshot(self, snap_name, storage_id, description=None, + auto_del=None, expiry_time=None): + try: + duration = None + if expiry_time: + duration = convert_timestamp_to_sec( + expiry_time, self.unity_conn.system_time) + if duration <= 0: + self.module.fail_json(msg="expiry_time should be after" + " the current system time") + snapshot = self.snap_obj.create( + cli=self.unity_conn._cli, storage_resource=storage_id, + name=snap_name, description=description, + is_auto_delete=auto_del, retention_duration=duration) + return snapshot + except Exception as e: + error_msg = "Failed to create snapshot" \ + " %s with error %s" % (snap_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def delete_snapshot(self, snapshot): + try: + if not bool(get_hosts_dict(snapshot)): + snapshot.detach_from(None) + snapshot.delete() + else: + snapshot.delete() + return None + + except Exception as e: + error_msg = "Failed to delete snapshot" \ + " [name: %s, id: %s] with error %s" \ + % (snapshot.name, snapshot.id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_snapshot_obj(self, name=None, id=None): + snapshot = id if id else name + msg = "Failed to get details of snapshot %s with error %s " + try: + return self.unity_conn.get_snap(name=name, _id=id) + + except utils.HttpError as e: + if e.http_status == 401: + cred_err = "Incorrect username or password , {0}".format( + e.message) + self.module.fail_json(msg=cred_err) + else: + err_msg = msg % (snapshot, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + except utils.UnityResourceNotFoundError as e: + err_msg = msg % (snapshot, str(e)) + LOG.error(err_msg) + return None + + except Exception as e: + err_msg = msg % (snapshot, str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def get_volume_obj(self, name): + try: + return self.unity_conn.get_lun(name=name) + except Exception as e: + error_msg = "Failed to get volume %s with error %s"\ + % (name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_cg_obj(self, name): + try: + return self.unity_conn.get_cg(name=name) + except Exception as e: + error_msg = "Failed to get cg %s with error %s" % (name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_host_obj(self, name=None, id=None): + """ Get the Host object""" + try: + return self.unity_conn.get_host(name=name, _id=id) + except Exception as e: + host = id if id else name + error_msg = "Failed to get host %s with error %s"\ + % (host, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def attach_to_snap(self, snapshot, host): + """ Attach snapshot to a host """ + try: + if not get_hosts_dict(snapshot): + snapshot.detach_from(None) + snapshot.attach_to(host) + snapshot.update() + except Exception as e: + error_msg = "Failed to attach snapshot [name: %s, id: %s]" \ + " to host [%s, %s] with error %s"\ + % (snapshot.name, snapshot.id, + host.name, host.id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def perform_module_operation(self): + """ + Perform different actions on snapshot module based on parameters + chosen in playbook + """ + snapshot_name = self.module.params['snapshot_name'] + snapshot_id = self.module.params['snapshot_id'] + vol_name = self.module.params['vol_name'] + cg_name = self.module.params['cg_name'] + auto_delete = self.module.params['auto_delete'] + expiry_time = self.module.params['expiry_time'] + description = self.module.params['description'] + new_snapshot_name = self.module.params['new_snapshot_name'] + host_name = self.module.params['host_name'] + host_id = self.module.params['host_id'] + host_state = self.module.params['host_state'] + state = self.module.params['state'] + host = None + storage_resource = None + changed = False + + LOG.info("Getting Snapshot details") + snapshot = self.get_snapshot_obj(name=snapshot_name, id=snapshot_id) + + if snapshot and not snapshot.existed: + snapshot = None + msg = "snapshot details: %s" % str(snapshot) + LOG.info(msg) + + # Get Volume Object + if vol_name is not None: + if vol_name == "" or vol_name.isspace(): + self.module.fail_json(msg="Invalid vol_name given, Please" + " provide a valid vol_name") + storage_resource = self.get_volume_obj(name=vol_name) + + # Get Consistency Group Object + if cg_name is not None: + if cg_name == "" or cg_name.isspace(): + self.module.fail_json(msg="Invalid cg_name given, Please" + " provide a valid cg_name") + storage_resource = self.get_cg_obj(name=cg_name) + + # Get host object for volume snapshots + if host_id or host_name: + if cg_name: + self.module.fail_json(msg="Mapping CG snapshot to host" + " is not supported.") + host = self.get_host_obj(name=host_name, id=host_id) + + # Check whether host_name or host_id is given in input + # along with host_state + if (host and not host_state) or (not host and host_state): + self.module.fail_json( + msg="Either host_name or host_id along with host_state " + "is required to map or unmap a snapshot from a host") + + # Check for error, if user tries to create a snapshot with the + # same name for other storage resource. + if snapshot and storage_resource and\ + (snapshot.storage_resource.id != storage_resource.id): + self.module.fail_json( + msg="Snapshot %s is of %s storage resource. Cannot create new" + " snapshot with same name for %s storage resource" + % (snapshot.name, snapshot.storage_resource.name, + storage_resource.name)) + + # check for valid expiry_time + if expiry_time is not None and \ + (expiry_time == "" or expiry_time.isspace()): + self.module.fail_json(msg="Please provide valid expiry_time," + " empty expiry_time given") + # Check if in input auto_delete is True and expiry_time is not None + if expiry_time and auto_delete: + error_msg = "Cannot set expiry_time if auto_delete given as True" + LOG.info(error_msg) + self.module.fail_json(msg=error_msg) + + # Check whether to modify the snapshot or not + update_flag = False + if snapshot: + update_flag = self.to_update(snapshot, + new_name=new_snapshot_name, + description=description, + auto_del=auto_delete, + expiry_time=expiry_time, + host=host, host_state=host_state) + msg = "update_flag for snapshot %s" % str(update_flag) + LOG.info(msg) + + # Create a Snapshot + if not snapshot and state == "present": + LOG.info("Creating a snapshot") + if snapshot_id: + self.module.fail_json(msg="Creation of Snapshot is allowed" + " using snapshot_name only, " + "snapshot_id given") + if snapshot_name == "" or snapshot_name.isspace(): + self.module.fail_json(msg="snapshot_name is required for" + " creation of a snapshot," + " empty snapshot_name given") + if not storage_resource: + self.module.fail_json(msg="vol_name or cg_name required to" + " create a snapshot") + + if new_snapshot_name: + self.module.fail_json( + msg="new_snapshot_name can not be assigned" + " during creation of a snapshot") + + snapshot = self.create_snapshot(snapshot_name, + storage_resource.id, + description, auto_delete, + expiry_time) + if host and host_state == "mapped": + self.attach_to_snap(snapshot, host) + changed = True + + # Update the Snapshot + if snapshot and state == "present" and update_flag: + + LOG.info("Updating the Snapshot details") + + if host_state == 'mapped': + self.attach_to_snap(snapshot, host) + self.update_snapshot( + snapshot, new_name=new_snapshot_name, + description=description, auto_del=auto_delete, + expiry_time=expiry_time) + + elif host_state == 'unmapped': + host_access_list = create_host_access_list(snapshot, + host, + host_state) + self.update_snapshot( + snapshot, new_name=new_snapshot_name, + description=description, auto_del=auto_delete, + expiry_time=expiry_time, + host_access_list=host_access_list) + + else: + self.update_snapshot( + snapshot, new_name=new_snapshot_name, + description=description, auto_del=auto_delete, + expiry_time=expiry_time) + changed = True + + # Delete the Snapshot + if state == "absent" and snapshot: + snapshot = self.delete_snapshot(snapshot) + changed = True + + # Add snapshot details to the result. + if snapshot: + snapshot.update() + self.result["snapshot_details"] = \ + create_snapshot_details_dict(snapshot) + else: + self.result["snapshot_details"] = {} + + self.result["changed"] = changed + self.module.exit_json(**self.result) + + +def create_snapshot_details_dict(snapshot): + """ Add name and id of storage resource and hosts to snapshot details """ + snapshot_dict = snapshot._get_properties() + del snapshot_dict['storage_resource'] + del snapshot_dict['host_access'] + snapshot_dict['hosts_list'] = get_hosts_list( + get_hosts_dict(snapshot)) + snapshot_dict['storage_resource_name'] = \ + snapshot.storage_resource.name + snapshot_dict['storage_resource_id'] = \ + snapshot.storage_resource.id + return snapshot_dict + + +def get_hosts_list(hosts_dict): + """ Get the host name and host id of all the associated hosts """ + hosts_list = [] + if not hosts_dict: + return hosts_list + + for host in list(hosts_dict.keys()): + hosts_list.append( + { + "host_name": host.name, + "host_id": host.id + } + ) + return hosts_list + + +def create_host_access_list(snapshot, host, host_state): + """ This method creates a List of dictionaries which will be used + to modify the list of hosts mapped to a snapshot """ + host_access_list = [] + hosts_dict = get_hosts_dict(snapshot) + # If snapshot is not attached to any host. + if not hosts_dict: + return None + if to_update_host_list(snapshot, host, host_state): + if host_state == "mapped": + return None + for snap_host in list(hosts_dict.keys()): + if snap_host != host: + access_dict = {'host': snap_host, + 'allowedAccess': hosts_dict[snap_host]} + host_access_list.append(access_dict) + return host_access_list + + +def get_hosts_dict(snapshot): + """ This method creates a dictionary, with host as key and + allowed access as value """ + hosts_dict = {} + LOG.info("Inside get_hosts_dict") + if not snapshot.host_access: + return hosts_dict + for host_access_obj in snapshot.host_access: + hosts_dict[host_access_obj.host] = \ + host_access_obj.allowed_access + return hosts_dict + + +def to_update_host_list(snapshot, host, host_state): + """ Determines whether to update hosts list or not""" + hosts_dict = get_hosts_dict(snapshot) + if (not hosts_dict or host not in list(hosts_dict.keys()))\ + and host_state == "mapped": + return True + if (hosts_dict and host in list(hosts_dict.keys())) \ + and host_state == "unmapped": + return True + return False + + +def to_update_expiry_time(snapshot, expiry_time=None): + """ Check whether to update expiry_time or not""" + if not expiry_time: + return False + if snapshot.expiration_time is None: + return True + if convert_timestamp_to_sec(expiry_time, snapshot.expiration_time) != 0: + return True + return False + + +def convert_timestamp_to_sec(expiry_time, snap_time): + """Converts the time difference to seconds""" + snap_time_str = snap_time.strftime('%m/%d/%Y %H:%M') + snap_timestamp = datetime.strptime(snap_time_str, '%m/%d/%Y %H:%M') + expiry_timestamp = datetime.strptime(expiry_time, "%m/%d/%Y %H:%M") + return int((expiry_timestamp - snap_timestamp).total_seconds()) + + +def get_snapshot_parameters(): + """This method provide parameter required for the ansible snapshot + module on Unity""" + return dict( + snapshot_name=dict(required=False, type='str'), + snapshot_id=dict(required=False, type='str'), + vol_name=dict(required=False, type='str'), + cg_name=dict(required=False, type='str'), + auto_delete=dict(required=False, type='bool'), + expiry_time=dict(required=False, type='str'), + description=dict(required=False, type='str'), + new_snapshot_name=dict(required=False, type='str'), + host_name=dict(required=False, type='str'), + host_id=dict(required=False, type='str'), + host_state=dict(required=False, type='str', + choices=['mapped', 'unmapped']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create Unity Snapshot object and perform actions on it + based on user input from playbook""" + obj = Snapshot() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py new file mode 100644 index 00000000..6db9b8ae --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py @@ -0,0 +1,1002 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing snapshot schedules on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +module: snapshotschedule +version_added: '1.1.0' +short_description: Manage snapshot schedules on Unity storage system +description: +- Managing snapshot schedules on Unity storage system includes + creating new snapshot schedule, getting details of snapshot schedule, + modifying attributes of snapshot schedule, and deleting snapshot schedule. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Akash Shendge (@shenda1) + +options: + name: + description: + - The name of the snapshot schedule. + - Name is mandatory for a create operation. + - Specify either I(name) or I(id) (but not both) for any operation. + type: str + id: + description: + - The ID of the snapshot schedule. + type: str + type: + description: + - Type of the rule to be included in snapshot schedule. + - Type is mandatory for any create or modify operation. + - Once the snapshot schedule is created with one type it can be modified. + type: str + choices: ['every_n_hours', 'every_day', 'every_n_days', 'every_week', + 'every_month'] + interval: + description: + - Number of hours between snapshots. + - Applicable only when rule type is C(every_n_hours). + type: int + hours_of_day: + description: + - Hours of the day when the snapshot will be taken. + - Applicable only when rule type is C(every_day). + type: list + elements: int + day_interval: + description: + - Number of days between snapshots. + - Applicable only when rule type is C(every_n_days). + type: int + days_of_week: + description: + - Days of the week for which the snapshot schedule rule applies. + - Applicable only when rule type is C(every_week). + type: list + elements: str + choices: ['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', + 'FRIDAY', 'SATURDAY'] + day_of_month: + description: + - Day of the month for which the snapshot schedule rule applies. + - Applicable only when rule type is C(every_month). + - Value should be [1, 31]. + type: int + hour: + description: + - The hour when the snapshot will be taken. + - Applicable for C(every_n_days), C(every_week), C(every_month) rule types. + - For create operation, if I(hour) parameter is not specified, value will + be taken as C(0). + - Value should be [0, 23]. + type: int + minute: + description: + - Minute offset from the hour when the snapshot will be taken. + - Applicable for all rule types. + - For a create operation, if I(minute) parameter is not specified, value will + be taken as C(0). + - Value should be [0, 59]. + type: int + desired_retention: + description: + - The number of days/hours for which snapshot will be retained. + - When I(auto_delete) is C(true), I(desired_retention) cannot be specified. + - Maximum desired retention supported is 31 days or 744 hours. + type: int + retention_unit: + description: + - The retention unit for the snapshot. + default: 'hours' + type: str + choices: ['hours' , 'days'] + auto_delete: + description: + - Indicates whether the system can automatically delete the snapshot. + type: bool + state: + description: + - Define whether the snapshot schedule should exist or not. + type: str + required: true + choices: [absent, present] +notes: +- Snapshot schedule created through Ansible will have only one rule. +- Modification of rule type is not allowed. Within the same type, other + parameters can be modified. +- If an existing snapshot schedule has more than 1 rule in it, only get and + delete operation is allowed. +- The I(check_mode) is not supported. +""" + +EXAMPLES = r""" +- name: Create snapshot schedule (Rule Type - every_n_hours) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_N_Hours_Testing" + type: "every_n_hours" + interval: 6 + desired_retention: 24 + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_day) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Day_Testing" + type: "every_day" + hours_of_day: + - 8 + - 14 + auto_delete: True + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_n_days) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_N_Day_Testing" + type: "every_n_days" + day_interval: 2 + desired_retention: 16 + retention_unit: "days" + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_week) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Week_Testing" + type: "every_week" + days_of_week: + - MONDAY + - FRIDAY + hour: 12 + minute: 30 + desired_retention: 200 + state: "{{state_present}}" + +- name: Create snapshot schedule (Rule Type - every_month) + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Month_Testing" + type: "every_month" + day_of_month: 17 + auto_delete: True + state: "{{state_present}}" + +- name: Get snapshot schedule details using name + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_N_Hours_Testing" + state: "{{state_present}}" + +- name: Get snapshot schedule details using id + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + id: "{{id}}" + state: "{{state_present}}" + +- name: Modify snapshot schedule details id + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + id: "{{id}}" + type: "every_n_hours" + interval: 8 + state: "{{state_present}}" + +- name: Modify snapshot schedule using name + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Day_Testing" + type: "every_day" + desired_retention: 200 + auto_delete: False + state: "{{state_present}}" + +- name: Delete snapshot schedule using id + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + id: "{{id}}" + state: "{{state_absent}}" + +- name: Delete snapshot schedule using name + dellemc.unity.snapshotschedule: + unispherehost: "{{unispherehost}}" + validate_certs: "{{validate_certs}}" + username: "{{username}}" + password: "{{password}}" + name: "Ansible_Every_Day_Testing" + state: "{{state_absent}}" +""" + +RETURN = r""" +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: True + +snapshot_schedule_details: + description: Details of the snapshot schedule. + returned: When snapshot schedule exists + type: dict + contains: + id: + description: The system ID given to the snapshot schedule. + type: str + name: + description: The name of the snapshot schedule. + type: str + luns: + description: Details of volumes for which snapshot schedule + applied. + type: dict + contains: + UnityLunList: + description: List of volumes for which snapshot schedule + applied. + type: list + contains: + UnityLun: + description: Detail of volume. + type: dict + contains: + id: + description: The system ID given to volume. + type: str + rules: + description: Details of rules that apply to snapshot schedule. + type: list + contains: + id: + description: The system ID of the rule. + type: str + interval: + description: Number of days or hours between snaps, + depending on the rule type. + type: int + hours: + description: Hourly frequency for the snapshot + schedule rule. + type: list + minute: + description: Minute frequency for the snapshot + schedule rule. + type: int + days_of_week: + description: Days of the week for which the snapshot + schedule rule applies. + type: dict + contains: + DayOfWeekEnumList: + description: Enumeration of days of the week. + type: list + days_of_month: + description: Days of the month for which the snapshot + schedule rule applies. + type: list + retention_time: + description: Period of time in seconds for which to keep + the snapshot. + type: int + retention_time_in_hours: + description: Period of time in hours for which to keep the + snapshot. + type: int + rule_type: + description: Type of the rule applied to snapshot schedule. + type: str + is_auto_delete: + description: Indicates whether the system can automatically + delete the snapshot based on pool automatic-deletion + thresholds. + type: bool + storage_resources: + description: Details of storage resources for which snapshot. + schedule applied. + type: dict + contains: + UnityStorageResourceList: + description: List of storage resources for which snapshot + schedule applied. + type: list + contains: + UnityStorageResource: + description: Detail of storage resource. + type: dict + contains: + id: + description: The system ID given to storage + resource. + type: str + sample: { + "existed": true, + "hash": 8742032390151, + "id": "snapSch_63", + "is_default": false, + "is_modified": null, + "is_sync_replicated": false, + "luns": null, + "modification_time": "2021-12-14 21:37:47.905000+00:00", + "name": "SS7_empty_hour_SS", + "rules": [ + { + "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT", + "days_of_month": null, + "days_of_week": { + "DayOfWeekEnumList": [] + }, + "existed": true, + "hash": 8742032280772, + "hours": [ + 0 + ], + "id": "SchedRule_109", + "interval": 2, + "is_auto_delete": false, + "minute": 0, + "retention_time": 86400, + "retention_time_in_hours": 24, + "rule_type": "every_n_days", + "type": "ScheduleTypeEnum.N_DAYS_AT_HHMM" + } + ], + "storage_resources": null, + "version": "ScheduleVersionEnum.LEGACY" + } +""" + +import logging +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('snapshotschedule') + +application_type = "Ansible/1.5.0" + + +class SnapshotSchedule(object): + """Class with snapshot schedule operations""" + + def __init__(self): + """Define all parameters required by this module""" + + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_snapshotschedule_parameters()) + + mutually_exclusive = [['name', 'id'], ['interval', 'hour'], + ['hours_of_day', 'hour'], + ['interval', 'hours_of_day', 'day_interval', + 'days_of_week', 'day_of_month']] + required_one_of = [['name', 'id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of + ) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + + def schedule_modify_required(self, schedule_details): + """Check if the desired snapshot schedule state is different from + existing snapshot schedule state + :param schedule_details: The dict containing snapshot schedule + details + :return: Boolean value to indicate if modification is needed + """ + + # Check if existing snapshot schedule has auto_delete = True and + # playbook sets desired retention without mentioning auto_delete + if schedule_details['rules'][0]['is_auto_delete'] and\ + self.module.params['desired_retention']\ + and self.module.params['auto_delete'] is None: + self.module.fail_json(msg="Desired retention cannot be " + "specified when auto_delete is true" + ) + if schedule_details['rules'][0]['retention_time'] and \ + self.module.params['auto_delete']: + self.module.fail_json(msg="auto_delete cannot be specified when" + " existing desired retention is set") + + desired_rule_type = get_schedule_value(self.module.params['type']) + existing_rule_string = schedule_details['rules'][0][ + 'type'].split(".")[1] + existing_rule_type = utils.ScheduleTypeEnum[ + existing_rule_string]._get_properties()['value'] + modified = False + + # Check if rule type is modified + if desired_rule_type != existing_rule_type: + self.module.fail_json(msg="Modification of rule type is not" + " allowed.") + + # Convert desired retention to seconds + duration_in_sec = convert_retention_to_seconds( + self.module.params['desired_retention'], + self.module.params['retention_unit']) + + if not duration_in_sec: + duration_in_sec = schedule_details['rules'][0]['retention_time'] + + # Check if common parameters for the rules getting modified + if (duration_in_sec and duration_in_sec != schedule_details[ + 'rules'][0]['retention_time']): + modified = True + elif (self.module.params['auto_delete'] is not None and + self.module.params['auto_delete'] != schedule_details['rules'] + [0]['is_auto_delete']): + modified = True + + if (self.module.params['minute'] is not None and self.module.params[ + 'minute'] != schedule_details['rules'][0]['minute']): + modified = True + + if not modified and desired_rule_type == 0: + if (self.module.params['interval'] and self.module.params[ + 'interval'] != schedule_details['rules'][0]['interval']): + modified = True + elif not modified and desired_rule_type == 1: + if (self.module.params['hours_of_day'] and + set(self.module.params['hours_of_day']) != + set(schedule_details['rules'][0]['hours'])): + modified = True + elif not modified and desired_rule_type == 2: + if (self.module.params['day_interval'] and self.module.params[ + 'day_interval'] != schedule_details['rules'][0]['interval'])\ + or (self.module.params['hour'] is not None and + self.module.params['hour'] != schedule_details[ + 'rules'][0]['hours'][0]): + modified = True + elif not modified and desired_rule_type == 3: + days = schedule_details['rules'][0]['days_of_week'][ + 'DayOfWeekEnumList'] + existing_days = list() + + for day in days: + temp = day.split(".") + existing_days.append(temp[1]) + + if (self.module.params['days_of_week'] and + set(self.module.params['days_of_week']) != + set(existing_days)) or\ + (self.module.params['hour'] is not None and + self.module.params['hour'] != schedule_details['rules'][ + 0]['hours'][0]): + modified = True + elif not modified and desired_rule_type == 4: + if (self.module.params['day_of_month'] and self.module.params[ + 'day_of_month'] != schedule_details['rules'][0][ + 'days_of_month'][0]) or\ + (self.module.params['hour'] is not None and + self.module.params['hour'] != schedule_details['rules'][ + 0]['hours'][0]): + modified = True + LOG.info("Modify Flag: %s", modified) + return modified + + def get_daysOfWeek_enum(self, days_of_week): + """Get the enum for days of week. + :param days_of_week: The list of days of week + :return: The list of days_of_week enum + """ + + days_of_week_enum = [] + for day in days_of_week: + if day in utils.DayOfWeekEnum.__members__: + days_of_week_enum.append(utils.DayOfWeekEnum[day]) + else: + errormsg = "Invalid choice {0} for days of week".format(day) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + return days_of_week_enum + + def create_rule(self, type, interval, hours_of_day, day_interval, + days_of_week, day_of_month, hour, minute, + desired_retention, retention_unit, auto_delete, + schedule_details=None): + """Create the rule.""" + + duration_in_sec = None + if desired_retention: + duration_in_sec = convert_retention_to_seconds(desired_retention, + retention_unit) + + if not duration_in_sec and schedule_details: + duration_in_sec = schedule_details['rules'][0]['retention_time'] + + if hour is None and schedule_details is None: + hour = 0 + + if hour is None and schedule_details: + if schedule_details['rules'][0]['hours'] is not None: + hour = schedule_details['rules'][0]['hours'][0] + + if minute is None and schedule_details is None: + minute = 0 + + if minute is None and schedule_details: + minute = schedule_details['rules'][0]['minute'] + + try: + if type == "every_n_hours": + if not interval: + interval = schedule_details['rules'][0]['interval'] + rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\ + every_n_hours(hour_interval=interval, minute=minute, + retention_time=duration_in_sec, + is_auto_delete=auto_delete) + elif type == "every_day": + if not hours_of_day: + hours_of_day = schedule_details['rules'][0]['hours'] + + rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\ + every_day(hours=hours_of_day, minute=minute, + retention_time=duration_in_sec, + is_auto_delete=auto_delete) + elif type == "every_n_days": + if not day_interval: + day_interval = schedule_details['rules'][0]['interval'] + + rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\ + every_n_days(day_interval=day_interval, hour=hour, + minute=minute, + retention_time=duration_in_sec, + is_auto_delete=auto_delete) + elif type == "every_week": + if days_of_week: + days_of_week_enum = self.get_daysOfWeek_enum(days_of_week) + else: + days = schedule_details['rules'][0]['days_of_week'][ + 'DayOfWeekEnumList'] + existing_days = list() + + for day in days: + temp = day.split(".") + existing_days.append(temp[1]) + days_of_week_enum = self.get_daysOfWeek_enum(days_of_week) + + rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\ + every_week(days_of_week=days_of_week_enum, hour=hour, + minute=minute, retention_time=duration_in_sec, + is_auto_delete=auto_delete) + else: + if day_of_month: + day_of_month_list = [day_of_month] + else: + day_of_month_list = schedule_details['rules'][0][ + 'days_of_month'] + + rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\ + every_month(days_of_month=day_of_month_list, hour=hour, + minute=minute, retention_time=duration_in_sec, + is_auto_delete=auto_delete) + + return rule_dict + + except Exception as e: + errormsg = "Create operation of snapshot schedule rule " \ + " failed with error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_snapshot_schedule(self, name, rule_dict): + """Create snapshot schedule. + :param name: The name of the snapshot schedule + :param rule_dict: The dict of the rule + :return: Boolean value to indicate if snapshot schedule created + """ + + try: + utils.snap_schedule.UnitySnapSchedule.create( + cli=self.unity_conn._cli, name=name, rules=[rule_dict]) + return True + + except Exception as e: + errormsg = "Create operation of snapshot schedule {0} failed" \ + " with error {1}".format(name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_desired_retention(self, desired_retention, retention_unit): + """Validates the specified desired retention. + :param desired_retention: Desired retention of the snapshot + schedule + :param retention_unit: Retention unit for the snapshot schedule + """ + + if retention_unit == 'hours' and (desired_retention < 1 or + desired_retention > 744): + self.module.fail_json(msg="Please provide a valid integer as the" + " desired retention between 1 and 744.") + elif retention_unit == 'days' and (desired_retention < 1 or + desired_retention > 31): + self.module.fail_json(msg="Please provide a valid integer as the" + " desired retention between 1 and 31.") + + def return_schedule_instance(self, id): + """Return the snapshot schedule instance + :param id: The id of the snapshot schedule + :return: Instance of the snapshot schedule + """ + + try: + obj_schedule = utils.snap_schedule.UnitySnapSchedule.get( + self.unity_conn._cli, id) + return obj_schedule + + except Exception as e: + error_msg = "Failed to get the snapshot schedule {0} instance" \ + " with error {1}".format(id, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def delete_snapshot_schedule(self, id): + """Delete snapshot schedule. + :param id: The ID of the snapshot schedule + :return: The boolean value to indicate if snapshot schedule + deleted + """ + + try: + obj_schedule = self.return_schedule_instance(id=id) + obj_schedule.delete() + return True + + except Exception as e: + errormsg = "Delete operation of snapshot schedule id:{0} failed" \ + " with error {1}".format(id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_snapshot_schedule(self, id, schedule_details): + """Modify snapshot schedule details. + :param id: The id of the snapshot schedule + :param schedule_details: The dict containing schedule details + :return: The boolean value to indicate if snapshot schedule + modified + """ + + try: + obj_schedule = self.return_schedule_instance(id=id) + rule_id = schedule_details['rules'][0]['id'] + + if self.module.params['auto_delete'] is None: + auto_delete = schedule_details['rules'][0]['is_auto_delete'] + else: + auto_delete = self.module.params['auto_delete'] + + if schedule_details['rules'][0]['is_auto_delete'] and\ + self.module.params['desired_retention'] and\ + self.module.params['auto_delete'] is False: + auto_delete = False + elif schedule_details['rules'][0]['retention_time']: + auto_delete = None + + rule_dict = self.create_rule( + self.module.params['type'], self.module.params['interval'], + self.module.params['hours_of_day'], + self.module.params['day_interval'], + self.module.params['days_of_week'], + self.module.params['day_of_month'], + self.module.params['hour'], self.module.params['minute'], + self.module.params['desired_retention'], + self.module.params['retention_unit'], auto_delete, + schedule_details) + + obj_schedule.modify(add_rules=[rule_dict], + remove_rule_ids=[rule_id]) + return True + except Exception as e: + errormsg = "Modify operation of snapshot schedule id:{0} failed" \ + " with error {1}".format(id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_details(self, id=None, name=None): + """Get snapshot schedule details. + :param id: The id of the snapshot schedule + :param name: The name of the snapshot schedule + :return: Dict containing snapshot schedule details if exists + """ + + id_or_name = id if id else name + errormsg = "Failed to get details of snapshot schedule {0} with" \ + " error {1}" + try: + if not id: + details = utils.snap_schedule.UnitySnapScheduleList.get( + self.unity_conn._cli, name=name) + + if details: + id = details[0].id + + if id: + details = self.unity_conn.get_snap_schedule(_id=id) + + if id and details.existed: + rule_list = [rules._get_properties() for rules in + details.rules] + for rule in rule_list: + rule['retention_time_in_hours'] = int( + rule['retention_time'] / 3600) + rule['rule_type'] = get_rule_type(rule['type']) + schedule_details = details._get_properties() + schedule_details['rules'] = rule_list + return schedule_details + else: + LOG.info("Failed to get the snapshot schedule %s", id_or_name) + return None + + except utils.HttpError as e: + if e.http_status == 401: + auth_err = "Incorrect username or password, {0}".format( + e.message) + msg = errormsg.format(id_or_name, auth_err) + LOG.error(msg) + self.module.fail_json(msg=msg) + else: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except utils.UnityResourceNotFoundError as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + return None + + except Exception as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def validate_parameters(self): + """Validate the parameters.""" + + try: + if self.module.params['interval'] is not None and\ + self.module.params['interval'] <= 0: + self.module.fail_json(msg="Interval can not be less than or" + " equal to 0.") + + param_list = ['day_interval', 'day_of_month'] + for param in param_list: + if self.module.params[param] is not None and\ + self.module.params[param] == 0: + self.module.fail_json(msg="{0} can not be 0.".format( + param)) + + except Exception as e: + errormsg = "Failed to validate the module param with error" \ + " {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def perform_module_operation(self): + """ + Perform different actions on snapshot schedule module based on + parameters chosen in playbook + """ + name = self.module.params['name'] + id = self.module.params['id'] + type = self.module.params['type'] + interval = self.module.params['interval'] + hours_of_day = self.module.params['hours_of_day'] + day_interval = self.module.params['day_interval'] + days_of_week = self.module.params['days_of_week'] + day_of_month = self.module.params['day_of_month'] + hour = self.module.params['hour'] + minute = self.module.params['minute'] + desired_retention = self.module.params['desired_retention'] + retention_unit = self.module.params['retention_unit'] + auto_delete = self.module.params['auto_delete'] + state = self.module.params['state'] + + # result is a dictionary that contains changed status and snapshot + # schedule details + result = dict( + changed=False, + snapshot_schedule_details={} + ) + + self.validate_parameters() + + if desired_retention is not None: + self.validate_desired_retention(desired_retention, retention_unit) + + if auto_delete and desired_retention: + self.module.fail_json(msg="Desired retention cannot be " + "specified when auto_delete is true" + ) + + schedule_details = self.get_details(name=name, id=id) + + if not id and schedule_details: + id = schedule_details['id'] + + if state == 'present' and not schedule_details: + if not name: + msg = "The parameter name length is 0. It is too short." \ + " The min length is 1." + self.module.fail_json(msg=msg) + + if not type: + self.module.fail_json(msg="Rule type is necessary to create" + " snapshot schedule") + + if type == "every_n_hours" and interval is None: + self.module.fail_json(msg="To create snapshot schedule with" + " rule type every_n_hours, interval" + " is the mandatory parameter.") + elif type == "every_day" and hours_of_day is None: + self.module.fail_json(msg="To create snapshot schedule with" + " rule type every_day, hours_of_day" + " is the mandatory parameter.") + elif type == "every_n_days" and day_interval is None: + self.module.fail_json(msg="To create snapshot schedule with" + " rule type every_n_days," + " day_interval is the mandatory" + " parameter.") + elif type == "every_week" and days_of_week is None: + self.module.fail_json(msg="To create snapshot schedule with" + " rule type every_week," + " days_of_week is the mandatory" + " parameter.") + elif type == "every_month" and day_of_month is None: + self.module.fail_json(msg="To create snapshot schedule with" + " rule type every_month," + " day_of_month is the mandatory" + " parameter.") + + rule_dict = self.create_rule(type, interval, hours_of_day, + day_interval, days_of_week, + day_of_month, hour, minute, + desired_retention, retention_unit, + auto_delete) + result['changed'] = self.create_snapshot_schedule(name, rule_dict) + + elif state == 'absent' and schedule_details: + result['changed'] = self.delete_snapshot_schedule(id) + + if state == 'present' and type and schedule_details and\ + len(schedule_details['rules']) == 1: + if (self.schedule_modify_required(schedule_details)): + result['changed'] = self.modify_snapshot_schedule( + id, schedule_details) + + result['snapshot_schedule_details'] = self.get_details(name=name, + id=id) + self.module.exit_json(**result) + + +def get_rule_type(type): + """Get the rule type of schedule. + :param type: The schedule type enum + :return: The rule type of snapshot schedule + """ + + schedule_type = { + "ScheduleTypeEnum.N_HOURS_AT_MM": "every_n_hours", + "ScheduleTypeEnum.DAY_AT_HHMM": "every_day", + "ScheduleTypeEnum.N_DAYS_AT_HHMM": "every_n_days", + "ScheduleTypeEnum.SELDAYS_AT_HHMM": "every_week", + "ScheduleTypeEnum.NTH_DAYOFMONTH_AT_HHMM": "every_month" + } + + return schedule_type.get(type) + + +def get_schedule_value(type): + """Get the enum for schedule. + :param type: The type of rule + :return: The enum value for rule + """ + + rule_type = { + "every_n_hours": 0, + "every_day": 1, + "every_n_days": 2, + "every_week": 3, + "every_month": 4 + } + + return rule_type.get(type) + + +def convert_retention_to_seconds(desired_retention, retention_unit): + """Convert desired retention to seconds. + :param desired_retention: The desired retention for snapshot + schedule + :param retention_unit: The retention unit for snapshot schedule + :return: The integer value in seconds + """ + + duration_in_sec = None + if desired_retention: + if retention_unit == 'hours': + duration_in_sec = desired_retention * 60 * 60 + else: + duration_in_sec = desired_retention * 24 * 60 * 60 + return duration_in_sec + + +def get_snapshotschedule_parameters(): + """This method provide parameters required for the ansible snapshot + schedule module on Unity""" + + return dict( + name=dict(type='str'), + id=dict(type='str'), + type=dict(type='str', choices=['every_n_hours', 'every_day', + 'every_n_days', 'every_week', + 'every_month']), + interval=dict(type='int'), + hours_of_day=dict(type='list', elements='int'), + day_interval=dict(type='int'), + days_of_week=dict(type='list', elements='str', + choices=['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY', + 'THURSDAY', 'FRIDAY', 'SATURDAY']), + day_of_month=dict(type='int'), + hour=dict(type='int'), + minute=dict(type='int'), + desired_retention=dict(type='int'), + retention_unit=dict(type='str', choices=['hours', 'days'], + default='hours'), + auto_delete=dict(type='bool'), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create Unity snapshot schedule object and perform action on it + based on user input from playbook""" + obj = SnapshotSchedule() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/storagepool.py b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py new file mode 100644 index 00000000..55eb6f47 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py @@ -0,0 +1,879 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing storage pool on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +module: storagepool +version_added: '1.1.0' +short_description: Manage storage pool on Unity +description: +- Managing storage pool on Unity storage system contains the operations + Get details of storage pool, + Create a storage pool, + Modify storage pool. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Ambuj Dubey (@AmbujDube) + +options: + pool_name: + description: + - Name of the storage pool, unique in the storage system. + type: str + + pool_id: + description: + - Unique identifier of the pool instance. + type: str + + new_pool_name: + description: + - New name of the storage pool, unique in the storage system. + type: str + + pool_description: + description: + - The description of the storage pool. + type: str + + fast_cache: + description: + - Indicates whether the fast cache is enabled for the storage pool. + - C(Enabled) - FAST Cache is enabled for the pool. + - C(Disabled) - FAST Cache is disabled for the pool. + choices: [enabled, disabled] + type: str + + fast_vp: + description: + - Indicates whether to enable scheduled data relocations for the pool. + - C(Enabled) - Enabled scheduled data relocations for the pool. + - C(Disabled) - Disabled scheduled data relocations for the pool. + choices: [enabled, disabled] + type: str + + raid_groups: + description: + - Parameters to create RAID group from the disks and add it to the pool. + type: dict + suboptions: + disk_group_id: + description: + - Id of the disk group. + type: str + + disk_num: + description: + - Number of disks. + type: int + + raid_type: + description: + - RAID group types or RAID levels. + choices: [None, RAID5, RAID0, RAID1, RAID3, RAID10, RAID6, Mixed, Automatic] + type: str + + stripe_width : + description: + - RAID group stripe widths, including parity or mirror disks. + choices: ['BEST_FIT', '2', '4', '5', '6', '8', '9', '10', '12', '13', '14', '16'] + type: str + + alert_threshold: + description: + - Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage. + - Minimum threshold limit is 50. + - Maximum threshold limit is 84. + type: int + + is_harvest_enabled: + description: + - Enable/Disable automatic deletion of snapshots based on pool space usage. + type: bool + + pool_harvest_high_threshold: + description: + - Max threshold for space used in pool beyond which the system automatically starts deleting snapshots in the pool. + - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool. + - Minimum pool harvest high threshold value is 1. + - Maximum pool harvest high threshold value is 99. + type: float + + pool_harvest_low_threshold: + description: + - Min threshold for space used in pool below which the system automatically stops deletion of snapshots in the pool. + - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool. + - Minimum pool harvest low threshold value is 0. + - Maximum pool harvest low threshold value is 98. + type: float + + is_snap_harvest_enabled: + description: + - Enable/Disable automatic deletion of snapshots based on pool space usage. + type: bool + + snap_harvest_high_threshold: + description: + - Max threshold for space used in snapshot beyond which the system automatically starts deleting snapshots in the pool. + - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool. + - Minimum snap harvest high threshold value is 1. + - Maximum snap harvest high threshold value is 99. + type: float + + snap_harvest_low_threshold: + description: + - Min threshold for space used in snapshot below which the system will stop automatically deleting snapshots in the pool. + - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool. + - Minimum snap harvest low threshold value is 0. + - Maximum snap harvest low threshold value is 98. + type: float + + pool_type: + description: + - Indicates storage pool type. + choices: [TRADITIONAL, DYNAMIC] + type: str + + state: + description: + - Define whether the storage pool should exist or not. + - C(Present) - indicates that the storage pool should exist on the system. + - C(Absent) - indicates that the storage pool should not exist on the system. + choices: [absent, present] + type: str + required: true + +notes: +- Deletion of storage pool is not allowed through Ansible module. +- The I(check_mode) is not supported. +''' + +EXAMPLES = r''' +- name: Get Storage pool details using pool_name + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_name: "{{pool_name}}" + state: "present" + +- name: Get Storage pool details using pool_id + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_id: "{{pool_id}}" + state: "present" + +- name: Modify Storage pool attributes using pool_name + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_name: "{{pool_name}}" + new_pool_name: "{{new_pool_name}}" + pool_description: "{{pool_description}}" + fast_cache: "{{fast_cache_enabled}}" + fast_vp: "{{fast_vp_enabled}}" + state: "present" + +- name: Modify Storage pool attributes using pool_id + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_id: "{{pool_id}}" + new_pool_name: "{{new_pool_name}}" + pool_description: "{{pool_description}}" + fast_cache: "{{fast_cache_enabled}}" + fast_vp: "{{fast_vp_enabled}}" + state: "present" + +- name: Create a StoragePool + dellemc.unity.storagepool: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + pool_name: "Test" + pool_description: "test pool" + raid_groups: + disk_group_id : "dg_16" + disk_num : 2 + raid_type : "RAID10" + stripe_width : "BEST_FIT" + alert_threshold : 50 + is_harvest_enabled : True + pool_harvest_high_threshold : 60 + pool_harvest_low_threshold : 40 + is_snap_harvest_enabled : True + snap_harvest_high_threshold : 70 + snap_harvest_low_threshold : 50 + fast_vp: "enabled" + fast_cache: "enabled" + pool_type : "DYNAMIC" + state: "present" + +''' + +RETURN = r''' + changed: + description: Whether or not the storage pool has changed. + returned: always + type: bool + sample: True + + storage_pool_details: + description: The storage pool details. + returned: When storage pool exists. + type: dict + contains: + id: + description: Pool id, unique identifier of the pool. + type: str + name: + description: Pool name, unique in the storage system. + type: str + is_fast_cache_enabled: + description: Indicates whether the fast cache is enabled for the storage + pool. + true - FAST Cache is enabled for the pool. + false - FAST Cache is disabled for the pool. + type: bool + is_fast_vp_enabled: + description: Indicates whether to enable scheduled data relocations + for the storage pool. + true - Enabled scheduled data relocations for the pool. + false - Disabled scheduled data relocations for the pool. + type: bool + size_free_with_unit: + description: Indicates size_free with its appropriate unit + in human readable form. + type: str + size_subscribed_with_unit: + description: Indicates size_subscribed with its appropriate unit in + human readable form. + type: str + size_total_with_unit: + description: Indicates size_total with its appropriate unit in human + readable form. + type: str + size_used_with_unit: + description: Indicates size_used with its appropriate unit in human + readable form. + type: str + snap_size_subscribed_with_unit: + description: Indicates snap_size_subscribed with its + appropriate unit in human readable form. + type: str + snap_size_used_with_unit: + description: Indicates snap_size_used with its + appropriate unit in human readable form. + type: str + drives: + description: Indicates information about the drives + associated with the storage pool. + type: list + contains: + id: + description: Unique identifier of the drive. + type: str + name: + description: Indicates name of the drive. + type: str + size: + description: Indicates size of the drive. + type: str + disk_technology: + description: Indicates disk technology of the drive. + type: str + tier_type: + description: Indicates tier type of the drive. + type: str + sample: { + "alert_threshold": 50, + "creation_time": "2022-03-08 14:05:32+00:00", + "description": "", + "drives": [ + { + "disk_technology": "SAS", + "id": "dpe_disk_22", + "name": "DPE Drive 22", + "size": 590860984320, + "tier_type": "PERFORMANCE" + }, + { + "disk_technology": "SAS", + "id": "dpe_disk_23", + "name": "DPE Drive 23", + "size": 590860984320, + "tier_type": "PERFORMANCE" + }, + { + "disk_technology": "SAS", + "id": "dpe_disk_24", + "name": "DPE Drive 24", + "size": 590860984320, + "tier_type": "PERFORMANCE" + } + ], + "existed": true, + "harvest_state": "UsageHarvestStateEnum.IDLE", + "hash": 8744642897210, + "health": { + "UnityHealth": { + "hash": 8744642799842 + } + }, + "id": "pool_280", + "is_all_flash": false, + "is_empty": false, + "is_fast_cache_enabled": false, + "is_fast_vp_enabled": false, + "is_harvest_enabled": true, + "is_snap_harvest_enabled": true, + "metadata_size_subscribed": 105763569664, + "metadata_size_used": 57176752128, + "name": "test_pool", + "object_id": 12884902146, + "pool_fast_vp": { + "UnityPoolFastVp": { + "hash": 8744647518980 + } + }, + "pool_space_harvest_high_threshold": 59.0, + "pool_space_harvest_low_threshold": 40.0, + "pool_type": "StoragePoolTypeEnum.DYNAMIC", + "raid_type": "RaidTypeEnum.RAID10", + "rebalance_progress": null, + "size_free": 470030483456, + "size_free_with_unit": "437.75 GB", + "size_subscribed": 447215820800, + "size_subscribed_with_unit": "416.5 GB", + "size_total": 574720311296, + "size_total_with_unit": "535.25 GB", + "size_used": 76838068224, + "size_used_with_unit": "71.56 GB", + "snap_size_subscribed": 128851369984, + "snap_size_subscribed_with_unit": "120.0 GB", + "snap_size_used": 2351104, + "snap_size_used_with_unit": "2.24 MB", + "snap_space_harvest_high_threshold": 80.0, + "snap_space_harvest_low_threshold": 60.0, + "tiers": { + "UnityPoolTierList": [ + { + "disk_count": [ + 0, + 3, + 0 + ], + "existed": true, + "hash": 8744643017382, + "name": [ + "Extreme Performance", + "Performance", + "Capacity" + ], + "pool_units": [ + null, + { + "UnityPoolUnitList": [ + { + "UnityPoolUnit": { + "hash": 8744642786759, + "id": "rg_4" + } + }, + { + "UnityPoolUnit": { + "hash": 8744642786795, + "id": "rg_5" + } + } + ] + }, + null + ], + "raid_type": [ + "RaidTypeEnum.NONE", + "RaidTypeEnum.RAID10", + "RaidTypeEnum.NONE" + ], + "size_free": [ + 0, + 470030483456, + 0 + ], + "size_moving_down": [ + 0, + 0, + 0 + ], + "size_moving_up": [ + 0, + 0, + 0 + ], + "size_moving_within": [ + 0, + 0, + 0 + ], + "size_total": [ + 0, + 574720311296, + 0 + ], + "size_used": [ + 0, + 104689827840, + 0 + ], + "stripe_width": [ + null, + "RaidStripeWidthEnum._2", + null + ], + "tier_type": [ + "TierTypeEnum.EXTREME_PERFORMANCE", + "TierTypeEnum.PERFORMANCE", + "TierTypeEnum.CAPACITY" + ] + } + ] + } + } + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils +import logging + +LOG = utils.get_logger('storagepool') + +application_type = "Ansible/1.5.0" + + +class StoragePool(object): + """Class with storage pool operations""" + + def __init__(self): + """ Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_storagepool_parameters()) + + mutually_exclusive = [['pool_name', 'pool_id']] + required_one_of = [['pool_name', 'pool_id']] + + # initialize the Ansible module + self.module = AnsibleModule(argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + utils.ensure_required_libs(self.module) + + self.conn = utils.\ + get_unity_unisphere_connection(self.module.params, application_type) + + def get_details(self, pool_id=None, pool_name=None): + """ Get storage pool details""" + try: + api_response = self.conn.get_pool(_id=pool_id, name=pool_name) + details = api_response._get_properties() + + is_fast_vp_enabled = api_response._get_property_from_raw( + 'pool_fast_vp').is_schedule_enabled + details['is_fast_vp_enabled'] = is_fast_vp_enabled + + details['size_free_with_unit'] = utils.\ + convert_size_with_unit(int(details['size_free'])) + + details['size_subscribed_with_unit'] = utils.\ + convert_size_with_unit(int(details['size_subscribed'])) + + details['size_total_with_unit'] = utils.\ + convert_size_with_unit(int(details['size_total'])) + + details['size_used_with_unit'] = utils.\ + convert_size_with_unit(int(details['size_used'])) + + details['snap_size_subscribed_with_unit'] = utils.\ + convert_size_with_unit(int(details['snap_size_subscribed'])) + + details['snap_size_used_with_unit'] = utils.\ + convert_size_with_unit(int(details['snap_size_used'])) + + pool_instance = utils.UnityPool.get(self.conn._cli, details['id']) + pool_tier_list = [] + pool_tier_list.append((pool_instance.tiers)._get_properties()) + pool_tier_dict = {} + pool_tier_dict['UnityPoolTierList'] = pool_tier_list + details['tiers'] = pool_tier_dict + return details + except Exception as e: + error = str(e) + check_list = ['not found', 'no attribute'] + if any(ele in error for ele in check_list): + error_message = "pool details are not found" + LOG.info(error_message) + return None + error_message = 'Get details of storage pool failed with ' \ + 'error: {0}'.format(str(e)) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def is_pool_modification_required(self, storage_pool_details): + """ Check if attributes of storage pool needs to be modified + """ + try: + if self.module.params['new_pool_name'] and \ + self.module.params['new_pool_name'] != \ + storage_pool_details['name']: + return True + + if self.module.params['pool_description'] is not None and \ + self.module.params['pool_description'] != \ + storage_pool_details['description']: + return True + + if self.module.params['fast_cache']: + if (self.module.params['fast_cache'] == "enabled" and + not storage_pool_details['is_fast_cache_enabled']) or\ + (self.module.params['fast_cache'] == "disabled" and storage_pool_details['is_fast_cache_enabled']): + return True + + if self.module.params['fast_vp']: + if (self.module.params['fast_vp'] == "enabled" and + not storage_pool_details['is_fast_vp_enabled']) or \ + (self.module.params['fast_vp'] == "disabled" and + storage_pool_details['is_fast_vp_enabled']): + return True + + LOG.info("modify not required") + return False + + except Exception as e: + error_message = 'Failed to determine if any modification'\ + 'required for pool attributes with error: {0}'.format(str(e)) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def pool_modify(self, id, new_pool_name, + pool_description, fast_cache, fast_vp): + """ Modify attributes of storage pool """ + pool_obj = utils.UnityPool.get(self.conn._cli, id) + try: + pool_obj.modify(name=new_pool_name, description=pool_description, + is_fast_cache_enabled=fast_cache, + is_fastvp_enabled=fast_vp) + new_storage_pool_details = self.get_details(pool_id=id, + pool_name=None) + LOG.info("Modification Successful") + return new_storage_pool_details + except Exception as e: + if self.module.params['pool_id']: + pool_identifier = self.module.params['pool_id'] + else: + pool_identifier = self.module.params['pool_name'] + error_message = 'Modify attributes of storage pool {0} ' \ + 'failed with error: {1}'.format(pool_identifier, str(e)) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def get_pool_drives(self, pool_id=None, pool_name=None): + """ Get pool drives attached to pool""" + pool_identifier = pool_id or pool_name + pool_drives_list = [] + try: + drive_instances = utils.UnityDiskList.get(self.conn._cli) + if drive_instances: + for drive in drive_instances: + if drive.pool and (drive.pool.id == pool_identifier or drive.pool.name == pool_identifier): + pool_drive = {"id": drive.id, "name": drive.name, "size": drive.size, + "disk_technology": drive.disk_technology.name, + "tier_type": drive.tier_type.name} + pool_drives_list.append(pool_drive) + LOG.info("Successfully retrieved pool drive details") + return pool_drives_list + except Exception as e: + error_message = 'Get details of pool drives failed with ' \ + 'error: {0}'.format(str(e)) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def get_raid_type_enum(self, raid_type): + """ Get raid_type_enum. + :param raid_type: The raid_type + :return: raid_type enum + """ + + if raid_type in utils.RaidTypeEnum.__members__: + return utils.RaidTypeEnum[raid_type] + else: + errormsg = "Invalid choice %s for Raid Type" % raid_type + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_raid_stripe_width_enum(self, stripe_width): + """ Get raid_stripe_width enum. + :param stripe_width: The raid_stripe_width + :return: raid_stripe_width enum + """ + if stripe_width != "BEST_FIT": + stripe_width = "_" + stripe_width + if stripe_width in utils.RaidStripeWidthEnum.__members__: + return utils.RaidStripeWidthEnum[stripe_width] + else: + errormsg = "Invalid choice %s for stripe width" % stripe_width + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_pool_type_enum(self, pool_type): + """ Get the storage pool_type enum. + :param pool_type: The pool_type + :return: pool_type enum + """ + + if pool_type == "TRADITIONAL": + return 1 + elif pool_type == "DYNAMIC": + return 2 + else: + errormsg = "Invalid choice %s for Storage Pool Type" % pool_type + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_raid_groups(self, raid_groups): + """ Get the raid groups for creating pool""" + try: + disk_obj = utils.UnityDiskGroup.get(self.conn._cli, _id=raid_groups['disk_group_id']) + disk_num = raid_groups['disk_num'] + raid_type = raid_groups['raid_type'] + raid_type = self.get_raid_type_enum(raid_type) \ + if raid_type else None + stripe_width = raid_groups['stripe_width'] + stripe_width = self.get_raid_stripe_width_enum(stripe_width) \ + if stripe_width else None + raid_group = utils.RaidGroupParameter(disk_group=disk_obj, + disk_num=disk_num, raid_type=raid_type, + stripe_width=stripe_width) + raid_groups = [raid_group] + return raid_groups + except Exception as e: + error_message = 'Failed to create storage pool with error: %s' % str(e) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def validate_create_pool_params(self, alert_threshold=None, + pool_harvest_high_threshold=None, + pool_harvest_low_threshold=None, + snap_harvest_high_threshold=None, + snap_harvest_low_threshold=None): + """ Validates params for creating pool""" + if alert_threshold and (alert_threshold < 50 or alert_threshold > 84): + errormsg = "Alert threshold is not in the allowed value range of 50 - 84" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + if pool_harvest_high_threshold and (pool_harvest_high_threshold < 1 or pool_harvest_high_threshold > 99): + errormsg = "Pool harvest high threshold is not in the allowed value range of 1 - 99" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + if pool_harvest_low_threshold and (pool_harvest_low_threshold < 0 or pool_harvest_low_threshold > 98): + errormsg = "Pool harvest low threshold is not in the allowed value range of 0 - 98" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + if snap_harvest_high_threshold and (snap_harvest_high_threshold < 1 or snap_harvest_high_threshold > 99): + errormsg = "Snap harvest high threshold is not in the allowed value range of 1 - 99" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + if snap_harvest_low_threshold and (snap_harvest_low_threshold < 0 or snap_harvest_low_threshold > 98): + errormsg = "Snap harvest low threshold is not in the allowed value range of 0 - 98" + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_pool(self, name, raid_groups): + """ Creates a StoragePool""" + try: + pool_obj = utils.UnityPool.get(self.conn._cli) + pool_description = self.module.params['pool_description'] + raid_groups = self.get_raid_groups(raid_groups) \ + if raid_groups else None + alert_threshold = self.module.params['alert_threshold'] + pool_harvest_high_threshold = None + pool_harvest_low_threshold = None + snap_harvest_high_threshold = None + snap_harvest_low_threshold = None + is_harvest_enabled = self.module.params['is_harvest_enabled'] + if is_harvest_enabled: + pool_harvest_high_threshold = self.module.params['pool_harvest_high_threshold'] + pool_harvest_low_threshold = self.module.params['pool_harvest_low_threshold'] + is_snap_harvest_enabled = self.module.params['is_snap_harvest_enabled'] + if is_snap_harvest_enabled: + snap_harvest_high_threshold = self.module.params['snap_harvest_high_threshold'] + snap_harvest_low_threshold = self.module.params['snap_harvest_low_threshold'] + self.validate_create_pool_params(alert_threshold=alert_threshold, + pool_harvest_high_threshold=pool_harvest_high_threshold, + pool_harvest_low_threshold=pool_harvest_low_threshold, + snap_harvest_high_threshold=snap_harvest_high_threshold, + snap_harvest_low_threshold=snap_harvest_low_threshold) + pool_type = self.module.params['pool_type'] + pool_type = self.get_pool_type_enum(pool_type) \ + if pool_type else None + fast_vp = self.module.params['fast_vp'] + if fast_vp: + if fast_vp == "enabled": + fast_vp = True + else: + fast_vp = False + + pool_obj.create(self.conn._cli, name=name, description=pool_description, raid_groups=raid_groups, + alert_threshold=alert_threshold, + is_harvest_enabled=is_harvest_enabled, + is_snap_harvest_enabled=is_snap_harvest_enabled, + pool_harvest_high_threshold=pool_harvest_high_threshold, + pool_harvest_low_threshold=pool_harvest_low_threshold, + snap_harvest_high_threshold=snap_harvest_high_threshold, + snap_harvest_low_threshold=snap_harvest_low_threshold, + is_fastvp_enabled=fast_vp, + pool_type=pool_type) + LOG.info("Creation of storage pool successful") + storage_pool_details = self.get_details(pool_name=name) + changed = True + return changed, storage_pool_details + except Exception as e: + error_message = 'Failed to create storage pool with error: %s' % str(e) + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + def perform_module_operation(self): + """ + Perform different actions on storage pool module based on parameters + chosen in playbook + """ + pool_name = self.module.params['pool_name'] + pool_id = self.module.params['pool_id'] + new_pool_name = self.module.params['new_pool_name'] + pool_description = self.module.params['pool_description'] + fast_cache = self.module.params['fast_cache'] + fast_vp = self.module.params['fast_vp'] + state = self.module.params['state'] + raid_groups = self.module.params['raid_groups'] + if fast_cache: + if fast_cache == "enabled": + fast_cache = True + else: + fast_cache = False + + if fast_vp: + if fast_vp == "enabled": + fast_vp = True + else: + fast_vp = False + + # result is a dictionary that contains changed status and storage pool details + result = dict( + changed=False, + storage_pool_details={} + ) + + storage_pool_details = self.get_details(pool_id, pool_name) + result['storage_pool_details'] = storage_pool_details + + if state == 'absent' and storage_pool_details: + error_message = 'Deletion of storage pool is not allowed through'\ + ' Ansible module' + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + # Create storage pool + if state == 'present' and not storage_pool_details: + if pool_name is not None and len(pool_name) != 0: + result['changed'], storage_pool_details \ + = self.create_pool(name=pool_name, raid_groups=raid_groups) + result['storage_pool_details'] = storage_pool_details + else: + error_message = 'The parameter pool_name length is 0. It'\ + ' is too short. The min length is 1' + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + # Get pool drive details + if result['storage_pool_details']: + result['storage_pool_details']['drives'] = self.get_pool_drives(pool_id=pool_id, pool_name=pool_name) + + if state == 'present' and storage_pool_details: + if new_pool_name is not None and len(new_pool_name) == 0: + error_message = 'The parameter new_pool_name length is 0. It'\ + ' is too short. The min length is 1' + LOG.error(error_message) + self.module.fail_json(msg=error_message) + pool_modify_flag = self.\ + is_pool_modification_required(storage_pool_details) + LOG.info("Storage pool modification flag %s", + str(pool_modify_flag)) + + if pool_modify_flag: + result['storage_pool_details'] = \ + self.pool_modify(storage_pool_details['id'], new_pool_name, + pool_description, fast_cache, fast_vp) + result['changed'] = True + self.module.exit_json(**result) + + +def get_storagepool_parameters(): + """This method provides parameters required for the ansible storage pool + module on Unity""" + return dict( + pool_name=dict(required=False, type='str'), + pool_id=dict(required=False, type='str'), + new_pool_name=dict(required=False, type='str'), + pool_description=dict(required=False, type='str'), + fast_cache=dict(required=False, type='str', choices=['enabled', + 'disabled']), + fast_vp=dict(required=False, type='str', choices=['enabled', + 'disabled']), + state=dict(required=True, type='str', choices=['present', 'absent']), + raid_groups=dict(required=False, type='dict', options=dict( + disk_group_id=dict(required=False, type='str'), + disk_num=dict(required=False, type='int'), + raid_type=dict(required=False, type='str', choices=['None', 'RAID5', 'RAID0', 'RAID1', 'RAID3', 'RAID10', + 'RAID6', 'Mixed', 'Automatic']), + stripe_width=dict(required=False, type='str', choices=['BEST_FIT', '2', '4', '5', + '6', '8', '9', '10', '12', '13', '14', '16']))), + alert_threshold=dict(required=False, type='int'), + is_harvest_enabled=dict(required=False, type='bool'), + pool_harvest_high_threshold=dict(required=False, type='float'), + pool_harvest_low_threshold=dict(required=False, type='float'), + is_snap_harvest_enabled=dict(required=False, type='bool'), + snap_harvest_high_threshold=dict(required=False, type='float'), + snap_harvest_low_threshold=dict(required=False, type='float'), + pool_type=dict(required=False, type='str', choices=['TRADITIONAL', 'DYNAMIC']) + ) + + +def main(): + """ Create Unity storage pool object and perform action on it + based on user input from playbook""" + obj = StoragePool() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py new file mode 100644 index 00000000..b6c6584d --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py @@ -0,0 +1,708 @@ +#!/usr/bin/python +# Copyright: (c) 2021, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing quota tree on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: tree_quota +short_description: Manage quota tree on the Unity storage system +description: +- Managing Quota tree on the Unity storage system includes + Create quota tree, + Get quota tree, + Modify quota tree and + Delete quota tree. +version_added: '1.2.0' +extends_documentation_fragment: + - dellemc.unity.unity +author: +- Spandita Panigrahi (@panigs7) +options: + filesystem_name: + description: + - The name of the filesystem for which quota tree is created. + - For creation or modification of a quota tree either I(filesystem_name) or + I(filesystem_id) is required. + type: str + filesystem_id: + description: + - The ID of the filesystem for which the quota tree is created. + - For creation of a quota tree either I(filesystem_id) or + I(filesystem_name) is required. + type: str + nas_server_name: + description: + - The name of the NAS server in which the filesystem is created. + - For creation of a quota tree either I(nas_server_name) or + I(nas_server_id) is required. + type: str + nas_server_id: + description: + - The ID of the NAS server in which the filesystem is created. + - For creation of a quota tree either I(filesystem_id) or + I(filesystem_name) is required. + type: str + tree_quota_id: + description: + - The ID of the quota tree. + - Either I(tree_quota_id) or I(path) to quota tree is required to + view/modify/delete quota tree. + type: str + path: + description: + - The path to the quota tree. + - Either I(tree_quota_id) or I(path) to quota tree is required to + create/view/modify/delete a quota tree. + - Path must start with a forward slash '/'. + type: str + hard_limit: + description: + - Hard limitation for a quota tree on the total space available. If exceeded, + users in quota tree cannot write data. + - Value C(0) implies no limit. + - One of the values of I(soft_limit) and I(hard_limit) can be C(0), however, both cannot be both C(0) + during creation of a quota tree. + type: int + soft_limit: + description: + - Soft limitation for a quota tree on the total space available. If exceeded, + notification will be sent to users in the quota tree for the grace period mentioned, beyond + which users cannot use space. + - Value C(0) implies no limit. + - Both I(soft_limit) and I(hard_limit) cannot be C(0) during creation of quota tree. + type: int + cap_unit: + description: + - Unit of I(soft_limit) and I(hard_limit) size. + - It defaults to C(GB) if not specified. + choices: ['MB', 'GB', 'TB'] + type: str + description: + description: + - Description of a quota tree. + type: str + state: + description: + - The state option is used to mention the existence of the filesystem + quota tree. + type: str + required: true + choices: ['absent', 'present'] + +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' + - name: Get quota tree details by quota tree id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_10" + state: "present" + + - name: Get quota tree details by quota tree path + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "fs_2171" + nas_server_id: "nas_21" + path: "/test" + state: "present" + + - name: Create quota tree for a filesystem with filesystem id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + path: "/test_new" + state: "present" + + - name: Create quota tree for a filesystem with filesystem name + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "Test_filesystem" + nas_server_name: "lglad068" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + path: "/test_new" + state: "present" + + - name: Modify quota tree limit usage by quota tree path + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + path: "/test_new" + hard_limit: 10 + cap_unit: "TB" + soft_limit: 8 + state: "present" + + - name: Modify quota tree by quota tree id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + tree_quota_id: "treequota_171798700679_10" + hard_limit: 12 + cap_unit: "TB" + soft_limit: 10 + state: "present" + + - name: Delete quota tree by quota tree id + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + tree_quota_id: "treequota_171798700679_10" + state: "absent" + + - name: Delete quota tree by path + dellemc.unity.tree_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/test_new" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: True + +get_tree_quota_details: + description: Details of the quota tree. + returned: When quota tree exists + type: dict + contains: + filesystem: + description: Filesystem details for which the quota + tree is created. + type: dict + contains: + UnityFileSystem: + description: Filesystem details for which the + quota tree is created. + type: dict + contains: + id: + description: ID of the filesystem for + which the quota tree is create. + type: str + description: + description: Description of the quota tree. + type: str + path: + description: Path to quota tree. + A valid path must start with a forward slash '/'. + It is mandatory while creating a quota tree. + type: str + hard_limit: + description: Hard limit of quota tree. + If the quota tree's space usage exceeds + the hard limit, users in quota tree cannot write data. + type: int + soft_limit: + description: Soft limit of the quota tree. + If the quota tree's space usage exceeds the soft limit, + the storage system starts to count down based + on the specified grace period. + type: int + id: + description: Quota tree ID. + type: str + size_used: + description: Size of used space in the filesystem by the user files. + type: int + gp_left: + description: The grace period left after the + soft limit for the user quota is exceeded. + type: int + state: + description: State of the quota tree. + type: int + sample: { + "description": "", + "existed": true, + "filesystem": { + "UnityFileSystem": { + "hash": 8788549469862, + "id": "fs_137", + "name": "test", + "nas_server": { + "id": "nas_1", + "name": "lglad072" + } + } + }, + "gp_left": null, + "hard_limit": "6.0 TB", + "hash": 8788549497558, + "id": "treequota_171798694897_1", + "path": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "size_used": 0, + "soft_limit": "5.0 TB", + "state": 0 + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('tree_quota') + +application_type = "Ansible/1.5.0" + + +class QuotaTree(object): + """Class with Quota Tree operations""" + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_quota_tree_parameters()) + + mutually_exclusive = [['filesystem_name', 'filesystem_id'], + ['nas_server_name', 'nas_server_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + + def check_quota_tree_is_present(self, fs_id, path, tree_quota_id): + """ + Check if quota tree is present in filesystem. + :param fs_id: ID of filesystem where quota tree is searched. + :param path: Path to the quota tree + :param tree_quota_id: ID of the quota tree + :return: ID of quota tree if it exists else None. + """ + if tree_quota_id is None and path is None: + return None + + all_tree_quota = self.unity_conn.get_tree_quota(filesystem=fs_id, + id=tree_quota_id, + path=path) + + if tree_quota_id and len(all_tree_quota) == 0 \ + and self.module.params['state'] == "present": + errormsg = "Tree quota %s does not exist." % tree_quota_id + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + if len(all_tree_quota) > 0: + msg = "Quota tree with id %s is present in filesystem %s" % (all_tree_quota[0].id, + fs_id) + LOG.info(msg) + return all_tree_quota[0].id + else: + return None + + def create_quota_tree(self, fs_id, soft_limit, hard_limit, unit, path, description): + """ + Create quota tree of a filesystem. + :param fs_id: ID of filesystem where quota tree is to be created. + :param soft_limit: Soft limit + :param hard_limit: Hard limit + :param unit: Unit of soft limit and hard limit + :param path: Path to quota tree + :param description: Description for quota tree + :return: Dict containing new quota tree details. + """ + + if soft_limit is None and hard_limit is None: + errormsg = "Both soft limit and hard limit cannot be empty. " \ + "Please provide atleast one to create quota tree." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit) + hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit) + try: + obj_tree_quota = self.unity_conn.create_tree_quota(filesystem_id=fs_id, hard_limit=hard_limit_in_bytes, + soft_limit=soft_limit_in_bytes, path=path, + description=description) + LOG.info("Successfully created quota tree") + + if obj_tree_quota: + return obj_tree_quota + else: + return None + + except Exception as e: + errormsg = "Create quota tree operation at path {0} failed in filesystem {1}" \ + " with error {2}".format(path, fs_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_filesystem_tree_quota_display_attributes(self, tree_quota_id): + """Display quota tree attributes + :param tree_quota_id: Quota tree ID + :return: Quota tree dict to display + """ + try: + tree_quota_obj = self.unity_conn.get_tree_quota(_id=tree_quota_id) + tree_quota_details = tree_quota_obj._get_properties() + if tree_quota_obj and tree_quota_obj.existed: + tree_quota_details['soft_limit'] = utils. \ + convert_size_with_unit(int(tree_quota_details['soft_limit'])) + tree_quota_details['hard_limit'] = utils. \ + convert_size_with_unit(int(tree_quota_details['hard_limit'])) + + tree_quota_details['filesystem']['UnityFileSystem']['name'] = \ + tree_quota_obj.filesystem.name + tree_quota_details['filesystem']['UnityFileSystem'].update( + {'nas_server': {'name': tree_quota_obj.filesystem.nas_server.name, + 'id': tree_quota_obj.filesystem.nas_server.id}}) + return tree_quota_details + + except Exception as e: + errormsg = "Failed to display quota tree details {0} with " \ + "error {1}".format(tree_quota_obj.id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_filesystem(self, nas_server=None, name=None, id=None): + """ + Get filesystem details. + :param nas_server: Nas server object. + :param name: Name of filesystem. + :param id: ID of filesystem. + :return: Dict containing filesystem details if it exists. + """ + id_or_name = id if id else name + try: + obj_fs = None + if name: + if not nas_server: + err_msg = "NAS Server is required to get the FileSystem." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + obj_fs = self.unity_conn.get_filesystem(name=name, + nas_server=nas_server) + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem object %s.", + obj_fs) + return obj_fs + if id: + if nas_server: + obj_fs = self.unity_conn \ + .get_filesystem(id=id, nas_server=nas_server) + else: + obj_fs = self.unity_conn.get_filesystem(id=id) + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem object %s.", + obj_fs) + return obj_fs + except Exception as e: + error_msg = "Failed to get filesystem %s with error %s." \ + % (id_or_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_nas_server_obj(self, name=None, id=None): + """ + Get nas server details. + :param name: Nas server name. + :param id: Nas server ID. + :return: Dict containing nas server details if it exists. + """ + nas_server = id if id else name + error_msg = ("Failed to get NAS server %s." % nas_server) + try: + obj_nas = self.unity_conn.get_nas_server(_id=id, name=name) + if name and obj_nas.existed: + LOG.info("Successfully got the NAS server object %s.", + obj_nas) + return obj_nas + elif id and obj_nas.existed: + LOG.info("Successfully got the NAS server object %s.", + obj_nas) + return obj_nas + else: + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + except Exception as e: + error_msg = "Failed to get NAS server %s with error %s." \ + % (nas_server, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def modify_tree_quota(self, tree_quota_id, soft_limit, hard_limit, unit, description): + """ + Modify quota tree of filesystem. + :param tree_quota_id: ID of the quota tree + :param soft_limit: Soft limit + :param hard_limit: Hard limit + :param unit: Unit of soft limit and hard limit + :param description: Description of quota tree + :return: Boolean value whether modify quota tree operation is successful. + """ + try: + if soft_limit is None and hard_limit is None: + return False + tree_quota_obj = self.unity_conn.get_tree_quota(tree_quota_id)._get_properties() + if soft_limit is None: + soft_limit_in_bytes = tree_quota_obj['soft_limit'] + else: + soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit) + if hard_limit is None: + hard_limit_in_bytes = tree_quota_obj['hard_limit'] + else: + hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit) + + if description is None: + description = tree_quota_obj['description'] + + if tree_quota_obj: + if tree_quota_obj['soft_limit'] == soft_limit_in_bytes and \ + tree_quota_obj['hard_limit'] == hard_limit_in_bytes and \ + tree_quota_obj['description'] == description: + return False + else: + modify_tree_quota = self.unity_conn.modify_tree_quota(tree_quota_id=tree_quota_id, + hard_limit=hard_limit_in_bytes, + soft_limit=soft_limit_in_bytes, + description=description) + LOG.info("Successfully modified quota tree") + if modify_tree_quota: + return True + except Exception as e: + errormsg = "Modify quota tree operation {0} failed" \ + " with error {1}".format(tree_quota_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_tree_quota(self, tree_quota_id): + """ + Delete quota tree of a filesystem. + :param tree_quota_id: ID of quota tree + :return: Boolean whether quota tree is deleted + """ + + try: + delete_tree_quota_obj = self.unity_conn.delete_tree_quota(tree_quota_id=tree_quota_id) + + if delete_tree_quota_obj: + return True + + except Exception as e: + errormsg = "Delete operation of quota tree id:{0} " \ + "failed with error {1}".format(tree_quota_id, + str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def perform_module_operation(self): + """ + Perform different actions on quota tree module based on parameters + passed in the playbook + """ + filesystem_id = self.module.params['filesystem_id'] + filesystem_name = self.module.params['filesystem_name'] + nas_server_name = self.module.params['nas_server_name'] + nas_server_id = self.module.params['nas_server_id'] + cap_unit = self.module.params['cap_unit'] + state = self.module.params['state'] + hard_limit = self.module.params['hard_limit'] + soft_limit = self.module.params['soft_limit'] + path = self.module.params['path'] + description = self.module.params['description'] + tree_quota_id = self.module.params['tree_quota_id'] + create_tree_quota_obj = None + nas_server_resource = None + get_unity_quota_tree_details = None + fs_id = None + changed = False + + ''' + result is a dictionary to contain end state and quota tree details + ''' + result = dict( + changed=False, + create_tree_quota=False, + modify_tree_quota=False, + get_tree_quota_details={}, + delete_tree_quota=False + + ) + + if (soft_limit or hard_limit) and cap_unit is None: + cap_unit = 'GB' + + if soft_limit and utils.is_size_negative(soft_limit): + error_message = "Invalid soft_limit provided, " \ + "must be greater than or equal to 0" + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + if hard_limit and utils.is_size_negative(hard_limit): + error_message = "Invalid hard_limit provided, " \ + "must be greater than or equal to 0" + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + ''' + Get NAS server Object + ''' + + if nas_server_name is not None: + if utils.is_input_empty(nas_server_name): + self.module.fail_json(msg="Invalid nas_server_name given," + " Please provide a valid name.") + nas_server_resource = self \ + .get_nas_server_obj(name=nas_server_name) + elif nas_server_id is not None: + if utils.is_input_empty(nas_server_id): + self.module.fail_json(msg="Invalid nas_server_id given," + " Please provide a valid ID.") + nas_server_resource = self.get_nas_server_obj(id=nas_server_id) + + ''' + Get filesystem Object + ''' + if filesystem_name is not None: + if utils.is_input_empty(filesystem_name): + self.module.fail_json(msg="Invalid filesystem_name given," + " Please provide a valid name.") + filesystem_obj = self \ + .get_filesystem(nas_server=nas_server_resource, + name=filesystem_name) + fs_id = filesystem_obj.id + elif filesystem_id is not None: + if utils.is_input_empty(filesystem_id): + self.module.fail_json(msg="Invalid filesystem_id given," + " Please provide a valid ID.") + filesystem_obj = self \ + .get_filesystem(id=filesystem_id) + if filesystem_obj: + fs_id = filesystem_obj[0].id + else: + self.module.fail_json(msg="Filesystem does not exist.") + + ''' + Validate path to quota tree + ''' + if path is not None: + if utils.is_input_empty(path): + self.module.fail_json(msg=" Please provide a valid path.") + elif not path.startswith('/'): + self.module.fail_json(msg="The path is relative to the root of the file system " + "and must start with a forward slash '/'.") + + if filesystem_id is None and filesystem_name is None: + self.module.fail_json(msg="Please provide either filesystem_name or fileystem_id.") + + quota_tree_id_present = self.check_quota_tree_is_present(fs_id, path, tree_quota_id) + tree_quota_id = quota_tree_id_present + + ''' + Create quota tree + ''' + + if (filesystem_id or filesystem_name) and path is not None and state == "present": + if not tree_quota_id: + LOG.info("Creating quota tree") + create_tree_quota_obj = self.create_quota_tree(fs_id, soft_limit, hard_limit, + cap_unit, path, description) + + if create_tree_quota_obj: + tree_quota_id = create_tree_quota_obj.id + result['create_tree_quota'] = True + + ''' + Modify quota tree + ''' + + if tree_quota_id and state == "present": + LOG.info("Modifying quota tree") + result['modify_tree_quota'] = self.modify_tree_quota(tree_quota_id, soft_limit, hard_limit, cap_unit, + description) + + ''' + Delete quota tree + ''' + + if tree_quota_id is not None and state == "absent": + LOG.info("Deleting quota tree") + result['delete_tree_quota'] = self.delete_tree_quota(tree_quota_id) + + ''' + Get quota tree details + ''' + if state == "present" and tree_quota_id is not None: + result['get_tree_quota_details'] = self.get_filesystem_tree_quota_display_attributes(tree_quota_id) + else: + result['get_tree_quota_details'] = {} + + if result['create_tree_quota'] or result['modify_tree_quota'] or result['delete_tree_quota']: + result['changed'] = True + + self.module.exit_json(**result) + + +def get_quota_tree_parameters(): + """This method provide parameters required for the ansible + quota tree module on Unity""" + return dict( + filesystem_id=dict(required=False, type='str'), + filesystem_name=dict(required=False, type='str'), + state=dict(required=True, type='str', choices=['present', 'absent']), + hard_limit=dict(required=False, type='int'), + soft_limit=dict(required=False, type='int'), + cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']), + tree_quota_id=dict(required=False, type='str'), + nas_server_name=dict(required=False, type='str'), + nas_server_id=dict(required=False, type='str'), + path=dict(required=False, type='str', no_log=True), + description=dict(required=False, type='str') + ) + + +def main(): + """ Create Unity quota tree object and perform action on it + based on user input from playbook""" + obj = QuotaTree() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/user_quota.py b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py new file mode 100644 index 00000000..0ad70088 --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py @@ -0,0 +1,1013 @@ +#!/usr/bin/python +# Copyright: (c) 2021, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing User Quota on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: user_quota +short_description: Manage user quota on the Unity storage system +description: +- Managing User Quota on the Unity storage system includes + Create user quota, + Get user quota, + Modify user quota, + Delete user quota, + Create user quota for quota tree, + Modify user quota for quota tree and + Delete user quota for quota tree. +version_added: '1.2.0' +extends_documentation_fragment: + - dellemc.unity.unity +author: +- Spandita Panigrahi (@panigs7) +options: + filesystem_name: + description: + - The name of the filesystem for which the user quota is created. + - For creation of a user quota either I(filesystem_name) or + I(filesystem_id) is required. + type: str + filesystem_id: + description: + - The ID of the filesystem for which the user quota is created. + - For creation of a user quota either I(filesystem_id) or + I(filesystem_name) is required. + type: str + nas_server_name: + description: + - The name of the NAS server in which the filesystem is created. + - For creation of a user quota either I(nas_server_name) or + I(nas_server_id) is required. + type: str + nas_server_id: + description: + - The ID of the NAS server in which the filesystem is created. + - For creation of a user quota either I(filesystem_id) or + I(filesystem_name) is required. + type: str + hard_limit: + description: + - Hard limitation for a user on the total space available. If exceeded, user cannot write data. + - Value C(0) implies no limit. + - One of the values of I(soft_limit) and I(hard_limit) can be C(0), however, both cannot be C(0) + during creation or modification of user quota. + type: int + soft_limit: + description: + - Soft limitation for a user on the total space available. If exceeded, + notification will be sent to the user for the grace period mentioned, beyond + which the user cannot use space. + - Value C(0) implies no limit. + - Both I(soft_limit) and I(hard_limit) cannot be C(0) during creation or modification + of user quota. + type: int + cap_unit: + description: + - Unit of I(soft_limit) and I(hard_limit) size. + - It defaults to C(GB) if not specified. + choices: ['MB', 'GB', 'TB'] + type: str + user_type: + description: + - Type of user creating a user quota. + - Mandatory while creating or modifying user quota. + choices: ['Unix', 'Windows'] + type: str + win_domain: + description: + - Fully qualified or short domain name for Windows user type. + - Mandatory when I(user_type) is C(Windows). + type: str + user_name: + description: + - User name of the user quota when I(user_type) is C(Windows) or C(Unix). + - Option I(user_name) must be specified along with I(win_domain) when I(user_type) is C(Windows). + type: str + uid: + description: + - User ID of the user quota. + type: str + user_quota_id: + description: + - User quota ID generated after creation of a user quota. + type: str + tree_quota_id: + description: + - The ID of the quota tree. + - Either I(tree_quota_id) or I(path) to quota tree is required to + create/modify/delete user quota for a quota tree. + type: str + path: + description: + - The path to the quota tree. + - Either I(tree_quota_id) or I(path) to quota tree is required to + create/modify/delete user quota for a quota tree. + - Path must start with a forward slash '/'. + type: str + state: + description: + - The I(state) option is used to mention the existence of the user quota. + type: str + required: true + choices: ['absent', 'present'] + +notes: + - The I(check_mode) is not supported. +''' + +EXAMPLES = r''' + - name: Get user quota details by user quota id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + user_quota_id: "userquota_171798700679_0_123" + state: "present" + + - name: Get user quota details by user quota uid/user name + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "fs_2171" + nas_server_id: "nas_21" + user_name: "test" + state: "present" + + - name: Create user quota for a filesystem with filesystem id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + uid: "111" + state: "present" + + - name: Create user quota for a filesystem with filesystem name + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_name: "Test_filesystem" + nas_server_name: "lglad068" + hard_limit: 6 + cap_unit: "TB" + soft_limit: 5 + uid: "111" + state: "present" + + - name: Modify user quota limit usage by user quota id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + user_quota_id: "userquota_171798700679_0_123" + hard_limit: 10 + cap_unit: "TB" + soft_limit: 8 + state: "present" + + - name: Modify user quota by filesystem id and user quota uid/user_name + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + hard_limit: 12 + cap_unit: "TB" + soft_limit: 10 + state: "present" + + - name: Delete user quota + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + win_domain: "prod" + user_name: "sample" + state: "absent" + + - name: Create user quota of a quota tree + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_4" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + soft_limit: 9 + cap_unit: "TB" + state: "present" + + - name: Create user quota of a quota tree by quota tree path + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/sample" + user_type: "Unix" + user_name: "test" + hard_limit: 2 + cap_unit: "TB" + state: "present" + + - name: Modify user quota of a quota tree + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_4" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + soft_limit: 10 + cap_unit: "TB" + state: "present" + + - name: Modify user quota of a quota tree by quota tree path + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/sample" + user_type: "Windows" + win_domain: "prod" + user_name: "sample" + hard_limit: 12 + cap_unit: "TB" + state: "present" + + - name: Delete user quota of a quota tree by quota tree path + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + filesystem_id: "fs_2171" + path: "/sample" + win_domain: "prod" + user_name: "sample" + state: "absent" + + - name: Delete user quota of a quota tree by quota tree id + dellemc.unity.user_quota: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + tree_quota_id: "treequota_171798700679_4" + win_domain: "prod" + user_name: "sample" + state: "absent" +''' + +RETURN = r''' +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: True + +get_user_quota_details: + description: Details of the user quota. + returned: When user quota exists + type: dict + contains: + filesystem: + description: Filesystem details for which the user quota is + created. + type: dict + contains: + UnityFileSystem: + description: Filesystem details for which the + user quota is created. + type: dict + contains: + id: + description: ID of the filesystem for + which the user quota is created. + type: str + name: + description: Name of filesystem. + type: str + nas_server: + description: Nasserver details where + filesystem is created. + type: dict + contains: + name: + description: Name of nasserver. + type: str + id: + description: ID of nasserver. + type: str + tree_quota: + description: Quota tree details for which the user quota is + created. + type: dict + contains: + UnityTreeQuota: + description: Quota tree details for which the user + quota is created. + type: dict + contains: + id: + description: ID of the quota tree. + type: str + path: + description: Path to quota tree. + type: str + gp_left: + description: The grace period left after the soft limit + for the user quota is exceeded. + type: int + hard_limit: + description: Hard limitation for a user on the total space + available. If exceeded, user cannot write data. + type: int + hard_ratio: + description: The hard ratio is the ratio between the + hard limit size of the user quota + and the amount of storage actually consumed. + type: str + soft_limit: + description: Soft limitation for a user on the total space + available. If exceeded, notification will be + sent to user for the grace period mentioned, beyond + which user cannot use space. + type: int + soft_ratio: + description: The soft ratio is the ratio between + the soft limit size of the user quota + and the amount of storage actually consumed. + type: str + id: + description: User quota ID. + type: str + size_used: + description: Size of used space in the filesystem + by the user files. + type: int + state: + description: State of the user quota. + type: int + uid: + description: User ID of the user. + type: int + unix_name: + description: Unix user name for this user quota's uid. + type: str + windows_names: + description: Windows user name that maps to this quota's uid. + type: str + windows_sids: + description: Windows SIDs that maps to this quota's uid + type: str + sample: { + "existed": true, + "filesystem": { + "UnityFileSystem": { + "hash": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "id": "fs_120", + "name": "nfs-multiprotocol", + "nas_server": { + "id": "nas_1", + "name": "lglad072" + } + } + }, + "gp_left": null, + "hard_limit": "10.0 GB", + "hard_ratio": null, + "hash": 8752448438089, + "id": "userquota_171798694698_0_60000", + "size_used": 0, + "soft_limit": "10.0 GB", + "soft_ratio": null, + "state": 0, + "tree_quota": null, + "uid": 60000, + "unix_name": null, + "windows_names": null, + "windows_sids": null + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +LOG = utils.get_logger('user_quota') + +application_type = "Ansible/1.5.0" + + +class UserQuota(object): + """Class with User Quota operations""" + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_user_quota_parameters()) + + mutually_exclusive = [['user_name', 'uid'], ['uid', 'win_domain'], + ['filesystem_name', 'filesystem_id'], + ['nas_server_name', 'nas_server_id'], + ['user_name', 'user_quota_id'], + ['uid', 'user_quota_id']] + + required_if = [('user_type', 'Windows', ['win_domain', 'user_name'], False), + ('user_type', 'Unix', ['user_name'], False)] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_if=required_if) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + + def check_user_is_present(self, fs_id, uid, unix, win_name, user_quota_id): + """ + Check if user quota is present in filesystem. + :param fs_id: ID of filesystem where user quota is searched. + :param uid: UID of the user quota + :param unix: Unix user name of user quota + :param win_name: Windows user name of user quota + :param user_quota_id: ID of the user quota + :return: ID of user quota if it exists else None. + """ + + if not self.check_user_type_provided(win_name, uid, unix): + return None + + user_name_or_uid_or_id = unix if unix else win_name if win_name else uid if \ + uid else user_quota_id + + # All user quotas in the given filesystem + all_user_quota = self.unity_conn.get_user_quota(filesystem=fs_id, id=user_quota_id, + unix_name=unix, windows_names=win_name, + uid=uid) + + for user_quota in range(len(all_user_quota)): + + if all_user_quota[user_quota].tree_quota is None: + msg = "User quota %s with id %s " \ + "is present in filesystem %s" \ + % (user_name_or_uid_or_id, all_user_quota[user_quota].id, fs_id) + LOG.info(msg) + return all_user_quota[user_quota].id + + return None + + def check_quota_tree_is_present(self, fs_id, path, tree_quota_id): + """ + Check if quota tree is present in filesystem. + :param fs_id: ID of filesystem where quota tree is searched. + :param path: Path to quota tree + :param tree_quota_id: ID of the quota tree + :return: ID of quota tree if it exists. + """ + + path_or_id = path if path else tree_quota_id + tree_quota_obj = self.unity_conn.get_tree_quota(filesystem=fs_id, path=path, + id=tree_quota_id) + if len(tree_quota_obj) > 0: + msg = "Tree quota id %s present in filesystem %s" % (tree_quota_obj[0].id, fs_id) + LOG.info(msg) + return tree_quota_obj[0].id + else: + errormsg = "The quota tree '%s' does not exist" % path_or_id + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def check_user_quota_in_quota_tree(self, tree_quota_id, uid, unix, win_name, user_quota_id): + """ + Check if user quota is present in quota tree. + :param tree_quota_id: ID of quota tree where user quota is searched. + :param uid: UID of user quota + :param unix: Unix name of user quota + :param win_name: Windows name of user quota + :param user_quota_id: ID of the user quota + :return: ID of user quota if it exists in quota tree else None. + """ + if not self.check_user_type_provided(win_name, uid, unix): + return None + + user_quota_name = uid if uid else unix if unix else win_name \ + if win_name else user_quota_id + user_quota_obj = self.unity_conn.get_user_quota(tree_quota=tree_quota_id, + uid=uid, windows_names=win_name, + unix_name=unix, + id=user_quota_id) + if len(user_quota_obj) > 0: + msg = "User quota %s is present in quota tree %s " % (user_quota_name, tree_quota_id) + LOG.info(msg) + return user_quota_obj[0].id + else: + return None + + def create_user_quota(self, fs_id, soft_limit, hard_limit, unit, uid, unix, win_name, tree_quota_id): + """ + Create user quota of a filesystem. + :param fs_id: ID of filesystem where user quota is to be created. + :param soft_limit: Soft limit + :param hard_limit: Hard limit + :param unit: Unit of soft limit and hard limit + :param uid: UID of the user quota + :param unix: Unix user name of user quota + :param win_name: Windows user name of user quota + :param tree_quota_id: ID of tree quota + :return: Object containing new user quota details. + """ + + unix_or_uid_or_win = uid if uid else unix if unix else win_name + fs_id_or_tree_quota_id = fs_id if fs_id else tree_quota_id + if soft_limit is None and hard_limit is None: + errormsg = "Both soft limit and hard limit cannot be empty. " \ + "Please provide atleast one to create user quota." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit) + hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit) + try: + if self.check_user_type_provided(win_name, uid, unix): + obj_user_quota = self.unity_conn.create_user_quota(filesystem_id=fs_id, + hard_limit=hard_limit_in_bytes, + soft_limit=soft_limit_in_bytes, + uid=uid, unix_name=unix, + win_name=win_name, + tree_quota_id=tree_quota_id) + LOG.info("Successfully created user quota") + return obj_user_quota + + except Exception as e: + errormsg = "Create quota for user {0} on {1} , failed with error {2} "\ + .format(unix_or_uid_or_win, fs_id_or_tree_quota_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_filesystem_user_quota_display_attributes(self, user_quota_id): + """Get display user quota attributes + :param user_quota_id: User quota ID + :return: User quota dict to display + """ + try: + user_quota_obj = self.unity_conn.get_user_quota(user_quota_id) + user_quota_details = user_quota_obj._get_properties() + + if user_quota_obj and user_quota_obj.existed: + user_quota_details['soft_limit'] = utils. \ + convert_size_with_unit(int(user_quota_details['soft_limit'])) + user_quota_details['hard_limit'] = utils. \ + convert_size_with_unit(int(user_quota_details['hard_limit'])) + + user_quota_details['filesystem']['UnityFileSystem']['name'] = \ + user_quota_obj.filesystem.name + user_quota_details['filesystem']['UnityFileSystem'].update( + {'nas_server': {'name': user_quota_obj.filesystem.nas_server.name, + 'id': user_quota_obj.filesystem.nas_server.id}}) + + if user_quota_obj.tree_quota: + user_quota_details['tree_quota']['UnityTreeQuota']['path'] = \ + user_quota_obj.tree_quota.path + + return user_quota_details + else: + errormsg = "User quota does not exist." + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + except Exception as e: + errormsg = "Failed to display the details of user quota {0} with " \ + "error {1}".format(user_quota_obj.id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_filesystem(self, nas_server=None, name=None, id=None): + """ + Get filesystem details. + :param nas_server: Nas server object. + :param name: Name of filesystem. + :param id: ID of filesystem. + :return: Object containing filesystem details if it exists. + """ + id_or_name = id if id else name + try: + obj_fs = None + if name: + if not nas_server: + err_msg = "NAS Server is required to get the FileSystem." + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + obj_fs = self.unity_conn.get_filesystem(name=name, + nas_server=nas_server) + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem object %s.", + obj_fs) + return obj_fs + if id: + if nas_server: + obj_fs = self.unity_conn \ + .get_filesystem(id=id, nas_server=nas_server) + else: + obj_fs = self.unity_conn.get_filesystem(id=id) + if obj_fs and obj_fs.existed: + LOG.info("Successfully got the filesystem object %s.", + obj_fs) + return obj_fs + except Exception as e: + error_msg = "Failed to get filesystem %s with error %s." \ + % (id_or_name, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def get_nas_server_obj(self, name=None, id=None): + """ + Get nas server details. + :param name: Nas server name. + :param id: Nas server ID. + :return: Object containing nas server details if it exists. + """ + nas_server = id if id else name + error_msg = ("Failed to get NAS server %s." % nas_server) + try: + obj_nas = self.unity_conn.get_nas_server(_id=id, name=name) + if name and obj_nas.existed: + LOG.info("Successfully got the NAS server object %s.", + obj_nas) + return obj_nas + elif id and obj_nas.existed: + LOG.info("Successfully got the NAS server object %s.", + obj_nas) + return obj_nas + else: + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + except Exception as e: + error_msg = "Failed to get NAS server %s with error %s." \ + % (nas_server, str(e)) + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + def modify_user_quota(self, user_quota_id, soft_limit, hard_limit, unit): + """ + Modify user quota of filesystem by its uid/username/user quota id. + :param user_quota_id: ID of the user quota + :param soft_limit: Soft limit + :param hard_limit: Hard limit + :param unit: Unit of soft limit and hard limit + :return: Boolean value whether modify user quota operation is successful. + """ + + if soft_limit is None and hard_limit is None: + return False + + user_quota_obj = self.unity_conn.get_user_quota(user_quota_id)._get_properties() + + if soft_limit is None: + soft_limit_in_bytes = user_quota_obj['soft_limit'] + else: + soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit) + + if hard_limit is None: + hard_limit_in_bytes = user_quota_obj['hard_limit'] + else: + hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit) + + if user_quota_obj: + if user_quota_obj['soft_limit'] == soft_limit_in_bytes and \ + user_quota_obj['hard_limit'] == hard_limit_in_bytes: + return False + else: + error_msg = "The user quota does not exist." + LOG.error(error_msg) + self.module.fail_json(msg=error_msg) + + try: + obj_user_quota = self.unity_conn.modify_user_quota(user_quota_id=user_quota_id, + hard_limit=hard_limit_in_bytes, + soft_limit=soft_limit_in_bytes) + LOG.info("Successfully modified user quota") + if obj_user_quota: + return True + except Exception as e: + errormsg = "Modify user quota {0} failed" \ + " with error {1}".format(user_quota_id, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def check_user_type_provided(self, win_name, uid, unix_name): + """Checks if user type or uid is provided + :param win_name: Windows name of user quota + :param uid: UID of user quota + :param unix_name: Unix name of user quota""" + if win_name is None and uid is None and unix_name is None: + return False + else: + return True + + def perform_module_operation(self): + """ + Perform different actions on user quota module based on parameters + passed in the playbook + """ + filesystem_id = self.module.params['filesystem_id'] + filesystem_name = self.module.params['filesystem_name'] + nas_server_name = self.module.params['nas_server_name'] + nas_server_id = self.module.params['nas_server_id'] + cap_unit = self.module.params['cap_unit'] + state = self.module.params['state'] + user_quota_id = self.module.params['user_quota_id'] + hard_limit = self.module.params['hard_limit'] + soft_limit = self.module.params['soft_limit'] + user_type = self.module.params['user_type'] + uid = self.module.params['uid'] + user_name = self.module.params['user_name'] + win_domain = self.module.params['win_domain'] + tree_quota_id = self.module.params['tree_quota_id'] + path = self.module.params['path'] + create_user_quota_obj = None + win_name = None + unix_name = None + nas_server_resource = None + fs_id = None + user_quota_details = '' + filesystem_obj = None + changed = False + + ''' + result is a dictionary to contain end state and user quota details + ''' + result = dict( + changed=False, + create_user_quota=False, + modify_user_quota=False, + get_user_quota_details={}, + delete_user_quota=False + ) + + if (soft_limit or hard_limit) and cap_unit is None: + cap_unit = 'GB' + + if soft_limit == 0 and hard_limit == 0: + error_message = 'Both soft limit and hard limit cannot be unlimited' + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + if soft_limit and utils.is_size_negative(soft_limit): + error_message = "Invalid soft_limit provided, " \ + "must be greater than 0" + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + if hard_limit and utils.is_size_negative(hard_limit): + error_message = "Invalid hard_limit provided, " \ + "must be greater than 0" + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + if (user_type or uid) and filesystem_id is None and \ + filesystem_name is None and tree_quota_id is None: + error_message = 'Please provide either ' \ + 'filesystem_name or filesystem_id' + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + if (nas_server_name or nas_server_id) \ + and (filesystem_id is None and filesystem_name is None): + error_message = 'Please provide either ' \ + 'filesystem_name or filesystem_id' + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + ''' + Validate path to quota tree + ''' + if path is not None: + if utils.is_input_empty(path): + self.module.fail_json(msg=" Please provide a valid path.") + elif not path.startswith('/'): + self.module.fail_json(msg="The path is relative to the root of the file system " + "and must start with a forward slash.") + + if filesystem_id is None and filesystem_name is None: + self.module.fail_json(msg="Please provide either filesystem_name or fileystem_id.") + + if user_type and filesystem_id is None and filesystem_name is None and tree_quota_id is None: + error_message = 'Please provide either ' \ + 'filesystem_name or filesystem_id to create user quota for a' \ + 'filesystem. Or provide tree_quota_id to create user quota for a quota tree.' + LOG.error(error_message) + self.module.fail_json(msg=error_message) + + ''' + Get NAS server Object + ''' + + if nas_server_name is not None: + if utils.is_input_empty(nas_server_name): + self.module.fail_json(msg="Invalid nas_server_name given," + " Please provide a valid name.") + nas_server_resource = self \ + .get_nas_server_obj(name=nas_server_name) + elif nas_server_id is not None: + if utils.is_input_empty(nas_server_id): + self.module.fail_json(msg="Invalid nas_server_id given," + " Please provide a valid ID.") + nas_server_resource = self.get_nas_server_obj(id=nas_server_id) + + ''' + Get filesystem Object + ''' + if filesystem_name is not None: + if utils.is_input_empty(filesystem_name): + self.module.fail_json(msg="Invalid filesystem_name given," + " Please provide a valid name.") + filesystem_obj = self \ + .get_filesystem(nas_server=nas_server_resource, + name=filesystem_name) + fs_id = filesystem_obj.id + elif filesystem_id is not None: + if utils.is_input_empty(filesystem_id): + self.module.fail_json(msg="Invalid filesystem_id given," + " Please provide a valid ID.") + filesystem_obj = self \ + .get_filesystem(id=filesystem_id) + if filesystem_obj: + filesystem_obj = filesystem_obj[0] + fs_id = filesystem_obj.id + else: + self.module.fail_json(msg="Filesystem does not exist.") + + if (user_name or win_domain) and (soft_limit or hard_limit) \ + and user_type is None: + self.module.fail_json(msg="Invalid user_type given," + " Please provide a valid user_type.") + + # Check the sharing protocol supported by the filesystem + # while creating a user quota + if filesystem_obj and (soft_limit is not None or hard_limit is not None): + supported_protocol = filesystem_obj.supported_protocols + + if supported_protocol == utils.FSSupportedProtocolEnum["CIFS"] \ + and (user_type == "Unix" or uid): + self.module.fail_json(msg="This filesystem supports only SMB protocol " + "and applicable only for windows users. " + "Please provide valid windows details.") + elif supported_protocol == utils.FSSupportedProtocolEnum["NFS"] \ + and user_type == "Windows": + self.module.fail_json(msg="This filesystem supports only NFS protocol " + "and applicable only for unix users. " + "Please provide valid uid or unix details.") + + ''' + Validate user type or uid + ''' + if uid and (utils.is_input_empty(uid) or not uid.isnumeric()): + self.module.fail_json(msg=" UID is empty. Please provide valid UID.") + if user_type: + if user_type == "Unix": + if user_name is None or utils.is_input_empty(user_name): + self.module.fail_json(msg=" 'user_name' is empty. Please provide valid user_name.") + + if user_type == "Windows": + if win_domain is None or utils.is_input_empty(win_domain): + self.module.fail_json(msg=" 'win_domain' is empty. Please provide valid win_domain.") + elif user_name is None or utils.is_input_empty(user_name): + self.module.fail_json(msg=" 'user_name' is empty. Please provide valid user_name.") + + if user_type != "Unix" and win_domain: + win_domain = win_domain.replace(".com", "") + win_name = win_domain + '\\' + user_name + + if win_name is None and user_name: + unix_name = user_name + + ''' + Check if quota tree is already present in the filesystem + ''' + if tree_quota_id or path: + quota_tree_id_present = self.check_quota_tree_is_present(fs_id, path, tree_quota_id) + tree_quota_id = quota_tree_id_present + + ''' + Check if the user quota is already present in the filesystem/ quota tree + ''' + if tree_quota_id: + user_id_present = self.check_user_quota_in_quota_tree(tree_quota_id, uid, unix_name, win_name, + user_quota_id) + fs_id = None if tree_quota_id is not None else fs_id + else: + user_id_present = self.check_user_is_present(fs_id, uid, unix_name, win_name, user_quota_id) + + if user_id_present: + user_quota_id = user_id_present + + if state == "present": + if user_quota_id: + # Modify user quota. If no change modify_user_quota is false. + result['modify_user_quota'] = self.modify_user_quota(user_quota_id, soft_limit, + hard_limit, cap_unit) + + else: + LOG.info("Creating user quota") + create_user_quota_obj = self.create_user_quota(fs_id, soft_limit, hard_limit, + cap_unit, uid, unix_name, win_name, + tree_quota_id) + if create_user_quota_obj: + user_quota_id = create_user_quota_obj.id + result['create_user_quota'] = True + else: + user_quota_id = None + ''' + Deleting user quota. + When both soft limit and hard limit are set to 0, it implies the user quota has + unlimited quota. Thereby, Unity removes the user quota id. + ''' + + if state == "absent" and user_quota_id: + soft_limit = 0 + hard_limit = 0 + err_msg = "Deleting user quota %s" % user_quota_id + LOG.info(err_msg) + result['delete_user_quota'] = self.modify_user_quota(user_quota_id, + soft_limit, hard_limit, cap_unit) + ''' + Get user details + ''' + + if state == "present" and user_quota_id: + user_quota_details = self.get_filesystem_user_quota_display_attributes(user_quota_id) + + result['get_user_quota_details'] = user_quota_details + if result['create_user_quota'] or result['modify_user_quota'] or result['delete_user_quota']: + result['changed'] = True + + self.module.exit_json(**result) + + +def get_user_quota_parameters(): + """This method provide parameters required for the ansible filesystem + user quota module on Unity""" + return dict( + filesystem_id=dict(required=False, type='str'), + filesystem_name=dict(required=False, type='str'), + state=dict(required=True, type='str', choices=['present', 'absent']), + user_type=dict(required=False, type='str', + choices=['Windows', 'Unix']), + user_name=dict(required=False, type='str'), + uid=dict(required=False, type='str'), + win_domain=dict(required=False, type='str'), + hard_limit=dict(required=False, type='int'), + soft_limit=dict(required=False, type='int'), + cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']), + user_quota_id=dict(required=False, type='str'), + nas_server_name=dict(required=False, type='str'), + nas_server_id=dict(required=False, type='str'), + tree_quota_id=dict(required=False, type='str'), + path=dict(required=False, type='str', no_log=True) + ) + + +def main(): + """ Create Unity user quota object and perform action on it + based on user input from playbook""" + obj = UserQuota() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/plugins/modules/volume.py b/ansible_collections/dellemc/unity/plugins/modules/volume.py new file mode 100644 index 00000000..29d7dd7a --- /dev/null +++ b/ansible_collections/dellemc/unity/plugins/modules/volume.py @@ -0,0 +1,1256 @@ +#!/usr/bin/python +# Copyright: (c) 2020, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Ansible module for managing volumes on Unity""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" + +module: volume +version_added: '1.1.0' +short_description: Manage volume on Unity storage system +description: +- Managing volume on Unity storage system includes- + Create new volume, + Modify volume attributes, + Map Volume to host, + Unmap volume to host, + Display volume details, + Delete volume. + +extends_documentation_fragment: + - dellemc.unity.unity + +author: +- Arindam Datta (@arindam-emc) + +options: + vol_name: + description: + - The name of the volume. Mandatory only for create operation. + type: str + vol_id: + description: + - The id of the volume. + - It can be used only for get, modify, map/unmap host, or delete operation. + type: str + pool_name: + description: + - This is the name of the pool where the volume will be created. + - Either the I(pool_name) or I(pool_id) must be provided to create a new volume. + type: str + pool_id: + description: + - This is the id of the pool where the volume will be created. + - Either the I(pool_name) or I(pool_id) must be provided to create a new volume. + type: str + size: + description: + - The size of the volume. + type: int + cap_unit: + description: + - The unit of the volume size. It defaults to C(GB), if not specified. + choices: ['GB' , 'TB'] + type: str + description: + description: + - Description about the volume. + - Description can be removed by passing empty string (""). + type: str + snap_schedule: + description: + - Snapshot schedule assigned to the volume. + - Add/Remove/Modify the snapshot schedule for the volume. + type: str + compression: + description: + - Boolean variable , specifies whether or not to enable compression. + Compression is supported only for thin volumes. + type: bool + is_thin: + description: + - Boolean variable , specifies whether or not it is a thin volume. + - The value is set as C(true) by default if not specified. + type: bool + sp: + description: + - Storage Processor for this volume. + choices: ['SPA' , 'SPB'] + type: str + io_limit_policy: + description: + - IO limit policy associated with this volume. + Once it is set, it cannot be removed through ansible module but it can + be changed. + type: str + host_name: + description: + - Name of the host to be mapped/unmapped with this volume. + - Either I(host_name) or I(host_id) can be specified in one task along with + I(mapping_state). + type: str + host_id: + description: + - ID of the host to be mapped/unmapped with this volume. + - Either I(host_name) or I(host_id) can be specified in one task along with + I(mapping_state). + type: str + hlu: + description: + - Host Lun Unit to be mapped/unmapped with this volume. + - It is an optional parameter, hlu can be specified along + with I(host_name) or I(host_id) and I(mapping_state). + - If I(hlu) is not specified, unity will choose it automatically. + The maximum value supported is C(255). + type: int + mapping_state: + description: + - State of host access for volume. + choices: ['mapped' , 'unmapped'] + type: str + new_vol_name: + description: + - New name of the volume for rename operation. + type: str + tiering_policy: + description: + - Tiering policy choices for how the storage resource data will be + distributed among the tiers available in the pool. + choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST'] + type: str + state: + description: + - State variable to determine whether volume will exist or not. + choices: ['absent', 'present'] + required: true + type: str + hosts: + description: + - Name of hosts for mapping to a volume. + type: list + elements: dict + suboptions: + host_name: + description: + - Name of the host. + type: str + host_id: + description: + - ID of the host. + type: str + hlu: + description: + - Host Lun Unit to be mapped/unmapped with this volume. + - It is an optional parameter, I(hlu) can be specified along + with I(host_name) or I(host_id) and I(mapping_state). + - If I(hlu) is not specified, unity will choose it automatically. + The maximum value supported is C(255). + type: str + +notes: + - The I(check_mode) is not supported. +""" + +EXAMPLES = r""" +- name: Create Volume + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + description: "{{description}}" + pool_name: "{{pool}}" + size: 2 + cap_unit: "{{cap_GB}}" + state: "{{state_present}}" + +- name: Expand Volume by volume id + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_id: "{{vol_id}}" + size: 5 + cap_unit: "{{cap_GB}}" + state: "{{state_present}}" + +- name: Modify Volume, map host by host_name + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + host_name: "{{host_name}}" + hlu: 5 + mapping_state: "{{state_mapped}}" + state: "{{state_present}}" + +- name: Modify Volume, unmap host mapping by host_name + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + host_name: "{{host_name}}" + mapping_state: "{{state_unmapped}}" + state: "{{state_present}}" + +- name: Map multiple hosts to a Volume + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_id: "{{vol_id}}" + hosts: + - host_name: "10.226.198.248" + hlu: 1 + - host_id: "Host_929" + hlu: 2 + mapping_state: "mapped" + state: "present" + +- name: Modify Volume attributes + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + new_vol_name: "{{new_vol_name}}" + tiering_policy: "AUTOTIER" + compression: True + state: "{{state_present}}" + +- name: Delete Volume by vol name + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_name: "{{vol_name}}" + state: "{{state_absent}}" + +- name: Delete Volume by vol id + dellemc.unity.volume: + unispherehost: "{{unispherehost}}" + username: "{{username}}" + password: "{{password}}" + validate_certs: "{{validate_certs}}" + vol_id: "{{vol_id}}" + state: "{{state_absent}}" +""" + +RETURN = r''' + +changed: + description: Whether or not the resource has changed. + returned: always + type: bool + sample: True + +volume_details: + description: Details of the volume. + returned: When volume exists + type: dict + contains: + id: + description: The system generated ID given to the volume. + type: str + name: + description: Name of the volume. + type: str + description: + description: Description about the volume. + type: str + is_data_reduction_enabled: + description: Whether or not compression enabled on this volume. + type: bool + size_total_with_unit: + description: Size of the volume with actual unit. + type: str + snap_schedule: + description: Snapshot schedule applied to this volume. + type: dict + tiering_policy: + description: Tiering policy applied to this volume. + type: str + current_sp: + description: Current storage processor for this volume. + type: str + pool: + description: The pool in which this volume is allocated. + type: dict + host_access: + description: Host mapped to this volume. + type: list + io_limit_policy: + description: IO limit policy associated with this volume. + type: dict + wwn: + description: The world wide name of this volume. + type: str + is_thin_enabled: + description: Indicates whether thin provisioning is enabled for this + volume. + type: bool + sample: { + "current_node": "NodeEnum.SPB", + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "default_node": "NodeEnum.SPB", + "description": null, + "effective_io_limit_max_iops": null, + "effective_io_limit_max_kbps": null, + "existed": true, + "family_base_lun": { + "UnityLun": { + "hash": 8774954523796, + "id": "sv_27" + } + }, + "family_clone_count": 0, + "hash": 8774954522426, + "health": { + "UnityHealth": { + "hash": 8774954528278 + } + }, + "host_access": [ + { + "accessMask": "PRODUCTION", + "hlu": 0, + "id": "Host_75", + "name": "10.226.198.250" + } + ], + "id": "sv_27", + "io_limit_policy": null, + "is_advanced_dedup_enabled": false, + "is_compression_enabled": null, + "is_data_reduction_enabled": false, + "is_replication_destination": false, + "is_snap_schedule_paused": false, + "is_thin_clone": false, + "is_thin_enabled": false, + "metadata_size": 4294967296, + "metadata_size_allocated": 4026531840, + "name": "VSI-UNITY-test-task", + "per_tier_size_used": [ + 111400714240, + 0, + 0 + ], + "pool": { + "id": "pool_3", + "name": "Extreme_Perf_tier" + }, + "size_allocated": 107374182400, + "size_total": 107374182400, + "size_total_with_unit": "100.0 GB", + "size_used": null, + "snap_count": 0, + "snap_schedule": null, + "snap_wwn": "60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97", + "snaps_size": 0, + "snaps_size_allocated": 0, + "storage_resource": { + "UnityStorageResource": { + "hash": 8774954518887 + } + }, + "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH", + "type": "LUNTypeEnum.VMWARE_ISCSI", + "wwn": "60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils +import logging + +LOG = utils.get_logger('volume') + +application_type = "Ansible/1.5.0" + + +def is_none_or_empty_string(param): + + """ validates the input string for None or empty values + """ + return not param or len(str(param)) <= 0 + + +class Volume(object): + + """Class with volume operations""" + + param_host_id = None + param_io_limit_pol_id = None + param_snap_schedule_name = None + + def __init__(self): + """Define all parameters required by this module""" + self.module_params = utils.get_unity_management_host_parameters() + self.module_params.update(get_volume_parameters()) + + mutually_exclusive = [['vol_name', 'vol_id'], + ['pool_name', 'pool_id'], + ['host_name', 'host_id']] + + required_one_of = [['vol_name', 'vol_id']] + + # initialize the Ansible module + self.module = AnsibleModule( + argument_spec=self.module_params, + supports_check_mode=False, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of) + utils.ensure_required_libs(self.module) + + self.unity_conn = utils.get_unity_unisphere_connection( + self.module.params, application_type) + + def get_volume(self, vol_name=None, vol_id=None): + """Get the details of a volume. + :param vol_name: The name of the volume + :param vol_id: The id of the volume + :return: instance of the respective volume if exist. + """ + + id_or_name = vol_id if vol_id else vol_name + errormsg = "Failed to get the volume {0} with error {1}" + + try: + + obj_vol = self.unity_conn.get_lun(name=vol_name, _id=vol_id) + + if vol_id and obj_vol.existed: + LOG.info("Successfully got the volume object %s ", obj_vol) + return obj_vol + elif vol_name: + LOG.info("Successfully got the volume object %s ", obj_vol) + return obj_vol + else: + LOG.info("Failed to get the volume %s", id_or_name) + return None + + except utils.HttpError as e: + if e.http_status == 401: + cred_err = "Incorrect username or password , {0}".format( + e.message) + msg = errormsg.format(id_or_name, cred_err) + self.module.fail_json(msg=msg) + else: + msg = errormsg.format(id_or_name, str(e)) + self.module.fail_json(msg=msg) + + except utils.UnityResourceNotFoundError as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + return None + + except Exception as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_host(self, host_name=None, host_id=None): + """Get the instance of a host. + :param host_name: The name of the host + :param host_id: The id of the volume + :return: instance of the respective host if exist. + """ + + id_or_name = host_id if host_id else host_name + errormsg = "Failed to get the host {0} with error {1}" + + try: + + obj_host = self.unity_conn.get_host(name=host_name, _id=host_id) + + if host_id and obj_host.existed: + LOG.info("Successfully got the host object %s ", obj_host) + return obj_host + elif host_name: + LOG.info("Successfully got the host object %s ", obj_host) + return obj_host + else: + msg = "Failed to get the host {0}".format(id_or_name) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except Exception as e: + + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_snap_schedule(self, name): + """Get the instance of a snapshot schedule. + :param name: The name of the snapshot schedule + :return: instance of the respective snapshot schedule if exist. + """ + + errormsg = "Failed to get the snapshot schedule {0} with error {1}" + + try: + LOG.debug("Attempting to get Snapshot Schedule with name %s", + name) + obj_ss = utils.UnitySnapScheduleList.get(self.unity_conn._cli, + name=name) + if obj_ss and (len(obj_ss) > 0): + LOG.info("Successfully got Snapshot Schedule %s", obj_ss) + return obj_ss + else: + msg = "Failed to get snapshot schedule " \ + "with name {0}".format(name) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except Exception as e: + msg = errormsg.format(name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_io_limit_policy(self, name=None, id=None): + """Get the instance of a io limit policy. + :param name: The io limit policy name + :param id: The io limit policy id + :return: instance of the respective io_limit_policy if exist. + """ + + errormsg = "Failed to get the io limit policy {0} with error {1}" + id_or_name = name if name else id + + try: + obj_iopol = self.unity_conn.get_io_limit_policy(_id=id, name=name) + if id and obj_iopol.existed: + LOG.info("Successfully got the IO limit policy object %s", + obj_iopol) + return obj_iopol + elif name: + LOG.info("Successfully got the IO limit policy object %s ", + obj_iopol) + return obj_iopol + else: + msg = "Failed to get the io limit policy with {0}".format( + id_or_name) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except Exception as e: + msg = errormsg.format(name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_pool(self, pool_name=None, pool_id=None): + """Get the instance of a pool. + :param pool_name: The name of the pool + :param pool_id: The id of the pool + :return: Dict containing pool details if exists + """ + + id_or_name = pool_id if pool_id else pool_name + errormsg = "Failed to get the pool {0} with error {1}" + + try: + obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id) + + if pool_id and obj_pool.existed: + LOG.info("Successfully got the pool object %s", + obj_pool) + return obj_pool + if pool_name: + LOG.info("Successfully got pool %s", obj_pool) + return obj_pool + else: + msg = "Failed to get the pool with " \ + "{0}".format(id_or_name) + LOG.error(msg) + self.module.fail_json(msg=msg) + + except Exception as e: + msg = errormsg.format(id_or_name, str(e)) + LOG.error(msg) + self.module.fail_json(msg=msg) + + def get_NodeEnum_enum(self, sp): + """Get the storage processor enum. + :param sp: The storage processor string + :return: storage processor enum + """ + + if sp in utils.NodeEnum.__members__: + return utils.NodeEnum[sp] + else: + errormsg = "Invalid choice {0} for storage processor".format( + sp) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_tiering_policy_enum(self, tiering_policy): + """Get the tiering_policy enum. + :param tiering_policy: The tiering_policy string + :return: tiering_policy enum + """ + + if tiering_policy in utils.TieringPolicyEnum.__members__: + return utils.TieringPolicyEnum[tiering_policy] + else: + errormsg = "Invalid choice {0} for tiering policy".format( + tiering_policy) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def create_volume(self, obj_pool, size, host_access=None): + """Create a volume. + :param obj_pool: pool object instance + :param size: size of the volume in GB + :param host_access: host to be associated with this volume + :return: Volume object on successful creation + """ + + vol_name = self.module.params['vol_name'] + + try: + + description = self.module.params['description'] + compression = self.module.params['compression'] + is_thin = self.module.params['is_thin'] + snap_schedule = None + + sp = self.module.params['sp'] + sp = self.get_NodeEnum_enum(sp) if sp else None + + io_limit_policy = self.get_io_limit_policy( + id=self.param_io_limit_pol_id) \ + if self.module.params['io_limit_policy'] else None + + if self.param_snap_schedule_name: + snap_schedule = {"name": self.param_snap_schedule_name} + + tiering_policy = self.module.params['tiering_policy'] + tiering_policy = self.get_tiering_policy_enum(tiering_policy) \ + if tiering_policy else None + + obj_vol = obj_pool.create_lun(lun_name=vol_name, + size_gb=size, + sp=sp, + host_access=host_access, + is_thin=is_thin, + description=description, + tiering_policy=tiering_policy, + snap_schedule=snap_schedule, + io_limit_policy=io_limit_policy, + is_compression=compression) + + LOG.info("Successfully created volume , %s", obj_vol) + + return obj_vol + + except Exception as e: + errormsg = "Create volume operation {0} failed" \ + " with error {1}".format(vol_name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def host_access_modify_required(self, host_access_list): + """Check if host access modification is required + :param host_access_list: host access dict list + :return: Dict with attributes to modify, or None if no + modification is required. + """ + + try: + to_modify = False + hlu = self.module.params['hlu'] + mapping_state = self.module.params['mapping_state'] + + host_id_list = [] + hlu_list = [] + new_list = [] + if not host_access_list and self.new_host_list and\ + mapping_state == 'unmapped': + return to_modify + + elif host_access_list: + for host_access in host_access_list.host: + host_id_list.append(host_access.id) + host = self.get_host(host_id=host_access.id).update() + host_dict = host.host_luns._get_properties() + LOG.debug("check if hlu present : %s", host_dict) + + if "hlu" in host_dict.keys(): + hlu_list.append(host_dict['hlu']) + + if mapping_state == 'mapped': + if (self.param_host_id not in host_id_list): + for item in self.new_host_list: + new_list.append(item.get("host_id")) + if not list(set(new_list) - set(host_id_list)): + return False + to_modify = True + + if mapping_state == 'unmapped': + if self.new_host_list: + for item in self.new_host_list: + new_list.append(item.get("host_id")) + if list(set(new_list) - set(host_id_list)): + return False + self.overlapping_list = list(set(host_id_list) - set(new_list)) + to_modify = True + LOG.debug("host_access_modify_required : %s ", str(to_modify)) + return to_modify + + except Exception as e: + errormsg = "Failed to compare the host_access with error {0} " \ + "{1}".format(host_access_list, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def volume_modify_required(self, obj_vol, cap_unit): + """Check if volume modification is required + :param obj_vol: volume instance + :param cap_unit: capacity unit + :return: Boolean value to indicate if modification is required + """ + + try: + to_update = {} + + new_vol_name = self.module.params['new_vol_name'] + if new_vol_name and obj_vol.name != new_vol_name: + to_update.update({'name': new_vol_name}) + + description = self.module.params['description'] + if description and obj_vol.description != description: + to_update.update({'description': description}) + + size = self.module.params['size'] + if size and cap_unit: + size_byte = int(utils.get_size_bytes(size, cap_unit)) + if size_byte < obj_vol.size_total: + self.module.fail_json(msg="Volume size can be " + "expanded only") + elif size_byte > obj_vol.size_total: + to_update.update({'size': size_byte}) + + compression = self.module.params['compression'] + if compression is not None and \ + compression != obj_vol.is_data_reduction_enabled: + to_update.update({'is_compression': compression}) + + is_thin = self.module.params['is_thin'] + if is_thin is not None and is_thin != obj_vol.is_thin_enabled: + self.module.fail_json(msg="Modifying is_thin is not allowed") + + sp = self.module.params['sp'] + if sp and self.get_NodeEnum_enum(sp) != obj_vol.current_node: + to_update.update({'sp': self.get_NodeEnum_enum(sp)}) + + tiering_policy = self.module.params['tiering_policy'] + if tiering_policy and self.get_tiering_policy_enum( + tiering_policy) != obj_vol.tiering_policy: + to_update.update({'tiering_policy': + self.get_tiering_policy_enum( + tiering_policy)}) + + # prepare io_limit_policy object + if self.param_io_limit_pol_id: + if (not obj_vol.io_limit_policy) \ + or (self.param_io_limit_pol_id + != obj_vol.io_limit_policy.id): + to_update.update( + {'io_limit_policy': self.param_io_limit_pol_id}) + + # prepare snap_schedule object + if self.param_snap_schedule_name: + if (not obj_vol.snap_schedule) \ + or (self.param_snap_schedule_name + != obj_vol.snap_schedule.name): + to_update.update({'snap_schedule': + self.param_snap_schedule_name}) + + # for removing existing snap_schedule + if self.param_snap_schedule_name == "": + if obj_vol.snap_schedule: + to_update.update({'is_snap_schedule_paused': False}) + else: + LOG.warn("No snapshot schedule is associated") + + LOG.debug("Volume to modify Dict : %s", to_update) + if len(to_update) > 0: + return to_update + else: + return None + + except Exception as e: + errormsg = "Failed to determine if volume {0},requires " \ + "modification, with error {1}".format(obj_vol.name, + str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def multiple_host_map(self, host_dic_list, obj_vol): + """Attach multiple hosts to a volume + :param host_dic_list: hosts to map the volume + :param obj_vol: volume instance + :return: response from API call + """ + + try: + host_access = [] + current_hosts = self.get_volume_host_access_list(obj_vol) + for existing_host in current_hosts: + host_access.append( + {'accessMask': eval('utils.HostLUNAccessEnum.' + existing_host['accessMask']), + 'host': + {'id': existing_host['id']}, 'hlu': existing_host['hlu']}) + for item in host_dic_list: + host_access.append( + {'accessMask': utils.HostLUNAccessEnum.PRODUCTION, + 'host': + {'id': item['host_id']}, 'hlu': item['hlu']}) + resp = obj_vol.modify(host_access=host_access) + return resp + except Exception as e: + errormsg = "Failed to attach hosts {0} with volume {1} with error {2} ".format(host_dic_list, obj_vol.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def multiple_detach(self, host_list_detach, obj_vol): + """Detach multiple hosts from a volume + :param host_list_detach: hosts to unmap the volume + :param obj_vol: volume instance + :return: response from API call + """ + + try: + host_access = [] + for item in host_list_detach: + host_access.append({'accessMask': utils.HostLUNAccessEnum.PRODUCTION, + 'host': {'id': item}}) + resp = obj_vol.modify(host_access=host_access) + return resp + except Exception as e: + errormsg = "Failed to detach hosts {0} from volume {1} with error {2} ".format(host_list_detach, obj_vol.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def modify_volume(self, obj_vol, to_modify_dict): + """modify volume attributes + :param obj_vol: volume instance + :param to_modify_dict: dict containing attributes to be modified. + :return: None + """ + + try: + + if 'io_limit_policy' in to_modify_dict.keys(): + to_modify_dict['io_limit_policy'] = self.get_io_limit_policy( + id=to_modify_dict['io_limit_policy']) + + if 'snap_schedule' in to_modify_dict.keys() and \ + to_modify_dict['snap_schedule'] != "": + to_modify_dict['snap_schedule'] = \ + {"name": to_modify_dict['snap_schedule']} + + param_list = ['name', 'size', 'host_access', 'description', 'sp', + 'io_limit_policy', 'tiering_policy', + 'snap_schedule', 'is_snap_schedule_paused', + 'is_compression'] + + for item in param_list: + if item not in to_modify_dict.keys(): + to_modify_dict.update({item: None}) + + LOG.debug("Final update dict before modify " + "api call: %s", to_modify_dict) + + obj_vol.modify(name=to_modify_dict['name'], + size=to_modify_dict['size'], + host_access=to_modify_dict['host_access'], + description=to_modify_dict['description'], + sp=to_modify_dict['sp'], + io_limit_policy=to_modify_dict['io_limit_policy'], + tiering_policy=to_modify_dict['tiering_policy'], + snap_schedule=to_modify_dict['snap_schedule'], + is_snap_schedule_paused=to_modify_dict[ + 'is_snap_schedule_paused'], + is_compression=to_modify_dict['is_compression']) + + except Exception as e: + errormsg = "Failed to modify the volume {0} " \ + "with error {1}".format(obj_vol.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def delete_volume(self, vol_id): + """Delete volume. + :param vol_obj: The object instance of the volume to be deleted + """ + + try: + obj_vol = self.get_volume(vol_id=vol_id) + obj_vol.delete(force_snap_delete=False) + return True + + except Exception as e: + errormsg = "Delete operation of volume id:{0} " \ + "failed with error {1}".format(id, + str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def get_volume_host_access_list(self, obj_vol): + """ + Get volume host access list + :param obj_vol: volume instance + :return: host list + """ + host_list = [] + if obj_vol.host_access: + for host_access in obj_vol.host_access: + host = self.get_host(host_id=host_access.host.id).update() + hlu = None + for host_lun in host.host_luns: + if host_lun.lun.name == obj_vol.name: + hlu = host_lun.hlu + host_list.append({'name': host_access.host.name, + 'id': host_access.host.id, + 'accessMask': host_access.access_mask.name, + 'hlu': hlu}) + return host_list + + def get_volume_display_attributes(self, obj_vol): + """get display volume attributes + :param obj_vol: volume instance + :return: volume dict to display + """ + try: + obj_vol = obj_vol.update() + volume_details = obj_vol._get_properties() + volume_details['size_total_with_unit'] = utils. \ + convert_size_with_unit(int(volume_details['size_total'])) + volume_details.update({'host_access': self.get_volume_host_access_list(obj_vol)}) + if obj_vol.snap_schedule: + volume_details.update( + {'snap_schedule': {'name': obj_vol.snap_schedule.name, + 'id': obj_vol.snap_schedule.id}}) + if obj_vol.io_limit_policy: + volume_details.update( + {'io_limit_policy': {'name': obj_vol.io_limit_policy.id, + 'id': obj_vol.io_limit_policy.id}}) + if obj_vol.pool: + volume_details.update({'pool': {'name': obj_vol.pool.name, + 'id': obj_vol.pool.id}}) + + return volume_details + + except Exception as e: + errormsg = "Failed to display the volume {0} with " \ + "error {1}".format(obj_vol.name, str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_input_string(self): + """ validates the input string checks if it is empty string + + """ + invalid_string = "" + try: + no_chk_list = ['snap_schedule', 'description'] + for key in self.module.params: + val = self.module.params[key] + if key not in no_chk_list and isinstance(val, str) \ + and val == invalid_string: + errmsg = 'Invalid input parameter "" for {0}'.format( + key) + self.module.fail_json(msg=errmsg) + + except Exception as e: + errormsg = "Failed to validate the module param with " \ + "error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def validate_host_list(self, host_list_input): + """ validates the host_list_input value for None and empty + + """ + try: + for host_list in host_list_input: + if ("host_name" in host_list.keys() and "host_id" in host_list.keys()): + if host_list["host_name"] and host_list["host_id"]: + errmsg = 'parameters are mutually exclusive: host_name|host_id' + self.module.fail_json(msg=errmsg) + is_host_details_missing = True + for key, value in host_list.items(): + if key == "host_name" and not is_none_or_empty_string(value): + is_host_details_missing = False + elif key == "host_id" and not is_none_or_empty_string(value): + is_host_details_missing = False + + if is_host_details_missing: + errmsg = 'Invalid input parameter for {0}'.format(key) + self.module.fail_json(msg=errmsg) + + except Exception as e: + errormsg = "Failed to validate the module param with " \ + "error {0}".format(str(e)) + LOG.error(errormsg) + self.module.fail_json(msg=errormsg) + + def resolve_host_mappings(self, hosts): + """ This method creates a dictionary of hosts and hlu parameter values + :param hosts: host and hlu value passed from input file + :return: list of host and hlu dictionary + """ + host_list_new = [] + + if hosts: + for item in hosts: + host_dict = dict() + host_id = None + hlu = None + if item['host_name']: + host = self.get_host(host_name=item['host_name']) + if host: + host_id = host.id + if item['host_id']: + host_id = item['host_id'] + if item['hlu']: + hlu = item['hlu'] + host_dict['host_id'] = host_id + host_dict['hlu'] = hlu + host_list_new.append(host_dict) + return host_list_new + + def perform_module_operation(self): + """ + Perform different actions on volume module based on parameters + passed in the playbook + """ + self.new_host_list = [] + self.overlapping_list = [] + vol_name = self.module.params['vol_name'] + vol_id = self.module.params['vol_id'] + pool_name = self.module.params['pool_name'] + pool_id = self.module.params['pool_id'] + size = self.module.params['size'] + cap_unit = self.module.params['cap_unit'] + snap_schedule = self.module.params['snap_schedule'] + io_limit_policy = self.module.params['io_limit_policy'] + host_name = self.module.params['host_name'] + host_id = self.module.params['host_id'] + hlu = self.module.params['hlu'] + mapping_state = self.module.params['mapping_state'] + new_vol_name = self.module.params['new_vol_name'] + state = self.module.params['state'] + hosts = self.module.params['hosts'] + + # result is a dictionary to contain end state and volume details + changed = False + result = dict( + changed=False, + volume_details={} + ) + + to_modify_dict = None + volume_details = None + to_modify_host = False + + self.validate_input_string() + + if hosts: + self.validate_host_list(hosts) + + if size is not None and size == 0: + self.module.fail_json(msg="Size can not be 0 (Zero)") + + if size and not cap_unit: + cap_unit = 'GB' + + if (cap_unit is not None) and not size: + self.module.fail_json(msg="cap_unit can be specified along " + "with size") + + if hlu and (not host_name and not host_id and not hosts): + self.module.fail_json(msg="hlu can be specified with " + "host_id or host_name") + if mapping_state and (not host_name and not host_id and not hosts): + self.module.fail_json(msg="mapping_state can be specified" + " with host_id or host_name or hosts") + + obj_vol = self.get_volume(vol_id=vol_id, vol_name=vol_name) + + if host_name or host_id: + if not mapping_state: + errmsg = "'mapping_state' is required along with " \ + "'host_name' or 'host_id' or 'hosts'" + self.module.fail_json(msg=errmsg) + host = [{'host_name': host_name, 'host_id': host_id, 'hlu': hlu}] + self.new_host_list = self.resolve_host_mappings(host) + + if hosts: + if not mapping_state: + errmsg = "'mapping_state' is required along with " \ + "'host_name' or 'host_id' or 'hosts'" + self.module.fail_json(msg=errmsg) + self.new_host_list += self.resolve_host_mappings(hosts) + + if io_limit_policy: + io_limit_policy = self.get_io_limit_policy(name=io_limit_policy) + self.param_io_limit_pol_id = io_limit_policy.id + + if snap_schedule: + snap_schedule = self.get_snap_schedule(name=snap_schedule) + self.param_snap_schedule_name = snap_schedule.name[0] + + # this is for removing existing snap_schedule + if snap_schedule == "": + self.param_snap_schedule_name = snap_schedule + + if obj_vol: + volume_details = obj_vol._get_properties() + vol_id = obj_vol.get_id() + to_modify_dict = self.volume_modify_required(obj_vol, cap_unit) + LOG.debug("Volume Modify Required: %s", to_modify_dict) + if obj_vol.host_access: + to_modify_host = self.host_access_modify_required( + host_access_list=obj_vol.host_access) + LOG.debug("Host Modify Required in access: %s", to_modify_host) + elif self.new_host_list: + to_modify_host = self.host_access_modify_required( + host_access_list=obj_vol.host_access) + LOG.debug("Host Modify Required: %s", to_modify_host) + + if state == 'present' and not volume_details: + if not vol_name: + msg_noname = "volume with id {0} is not found, unable to " \ + "create a volume without a valid " \ + "vol_name".format(vol_id) + self.module.fail_json(msg=msg_noname) + + if snap_schedule == "": + self.module.fail_json(msg="Invalid snap_schedule") + + if new_vol_name: + self.module.fail_json(msg="new_vol_name is not required " + "to create a new volume") + if not pool_name and not pool_id: + self.module.fail_json(msg="pool_id or pool_name is required " + "to create new volume") + if not size: + self.module.fail_json(msg="Size is required to create" + " a volume") + host_access = None + if self.new_host_list: + host_access = [] + for item in self.new_host_list: + if item['hlu']: + host_access.append( + {'accessMask': utils.HostLUNAccessEnum.PRODUCTION, 'host': {'id': item['host_id']}, + 'hlu': item['hlu']}) + else: + host_access.append( + {'accessMask': utils.HostLUNAccessEnum.PRODUCTION, 'host': {'id': item['host_id']}}) + + size = utils.get_size_in_gb(size, cap_unit) + + obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id) + + obj_vol = self.create_volume(obj_pool=obj_pool, size=size, + host_access=host_access) + LOG.debug("Successfully created volume , %s", obj_vol) + vol_id = obj_vol.id + volume_details = obj_vol._get_properties() + LOG.debug("Got volume id , %s", vol_id) + changed = True + + if state == 'present' and volume_details and to_modify_dict: + self.modify_volume(obj_vol=obj_vol, to_modify_dict=to_modify_dict) + changed = True + + if (state == 'present' and volume_details + and mapping_state == 'mapped' and to_modify_host): + if self.new_host_list: + resp = self.multiple_host_map(host_dic_list=self.new_host_list, obj_vol=obj_vol) + changed = True if resp else False + + if (state == 'present' and volume_details + and mapping_state == 'unmapped' and to_modify_host): + if self.new_host_list: + resp = self.multiple_detach(host_list_detach=self.overlapping_list, obj_vol=obj_vol) + LOG.info(resp) + changed = True if resp else False + + if state == 'absent' and volume_details: + changed = self.delete_volume(vol_id) + volume_details = None + + if state == 'present' and volume_details: + volume_details = self.get_volume_display_attributes( + obj_vol=obj_vol) + + result['changed'] = changed + result['volume_details'] = volume_details + self.module.exit_json(**result) + + +def get_volume_parameters(): + """This method provide parameters required for the ansible volume + module on Unity""" + return dict( + vol_name=dict(required=False, type='str'), + vol_id=dict(required=False, type='str'), + description=dict(required=False, type='str'), + pool_name=dict(required=False, type='str'), + pool_id=dict(required=False, type='str'), + size=dict(required=False, type='int'), + cap_unit=dict(required=False, type='str', choices=['GB', 'TB']), + is_thin=dict(required=False, type='bool'), + compression=dict(required=False, type='bool'), + sp=dict(required=False, type='str', choices=['SPA', 'SPB']), + io_limit_policy=dict(required=False, type='str'), + snap_schedule=dict(required=False, type='str'), + host_name=dict(required=False, type='str'), + host_id=dict(required=False, type='str'), + hosts=dict(required=False, type='list', elements='dict', + options=dict( + host_id=dict(required=False, type='str'), + host_name=dict(required=False, type='str'), + hlu=dict(required=False, type='str') + )), + hlu=dict(required=False, type='int'), + mapping_state=dict(required=False, type='str', + choices=['mapped', 'unmapped']), + new_vol_name=dict(required=False, type='str'), + tiering_policy=dict(required=False, type='str', choices=[ + 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']), + state=dict(required=True, type='str', choices=['present', 'absent']) + ) + + +def main(): + """ Create Unity volume object and perform action on it + based on user input from playbook""" + obj = Volume() + obj.perform_module_operation() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/dellemc/unity/requirements.txt b/ansible_collections/dellemc/unity/requirements.txt new file mode 100644 index 00000000..2325e97f --- /dev/null +++ b/ansible_collections/dellemc/unity/requirements.txt @@ -0,0 +1,3 @@ +urllib3 +storops>=1.2.11 +setuptools diff --git a/ansible_collections/dellemc/unity/requirements.yml b/ansible_collections/dellemc/unity/requirements.yml new file mode 100644 index 00000000..548a3107 --- /dev/null +++ b/ansible_collections/dellemc/unity/requirements.yml @@ -0,0 +1,3 @@ +--- +collections: + - name: dellemc.unity diff --git a/ansible_collections/dellemc/unity/tests/requirements.txt b/ansible_collections/dellemc/unity/tests/requirements.txt new file mode 100644 index 00000000..3541acd1 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/requirements.txt @@ -0,0 +1,7 @@ +pytest +pytest-xdist +pytest-mock +pytest-cov +pytest-forked +coverage==4.5.4 +mock diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt new file mode 100644 index 00000000..a381c08a --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt @@ -0,0 +1,23 @@ +plugins/modules/nfs.py compile-2.6 +plugins/modules/nfs.py import-2.6 +plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license +plugins/modules/filesystem.py validate-modules:missing-gplv3-license +plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/host.py validate-modules:missing-gplv3-license +plugins/modules/nasserver.py validate-modules:missing-gplv3-license +plugins/modules/nfs.py validate-modules:missing-gplv3-license +plugins/modules/smbshare.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/tree_quota.py validate-modules:missing-gplv3-license +plugins/modules/user_quota.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/cifsserver.py validate-modules:missing-gplv3-license +plugins/modules/nfsserver.py validate-modules:missing-gplv3-license +plugins/modules/host.py import-2.6 +plugins/modules/host.py import-2.7 +plugins/modules/interface.py import-2.6 +plugins/modules/interface.py import-2.7 +plugins/modules/interface.py validate-modules:missing-gplv3-license \ No newline at end of file diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt new file mode 100644 index 00000000..cb95dcfc --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt @@ -0,0 +1,19 @@ +plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license +plugins/modules/filesystem.py validate-modules:missing-gplv3-license +plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/host.py validate-modules:missing-gplv3-license +plugins/modules/nasserver.py validate-modules:missing-gplv3-license +plugins/modules/nfs.py validate-modules:missing-gplv3-license +plugins/modules/smbshare.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/tree_quota.py validate-modules:missing-gplv3-license +plugins/modules/user_quota.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/cifsserver.py validate-modules:missing-gplv3-license +plugins/modules/nfsserver.py validate-modules:missing-gplv3-license +plugins/modules/host.py import-2.7 +plugins/modules/interface.py import-2.7 +plugins/modules/interface.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt new file mode 100644 index 00000000..cb95dcfc --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt @@ -0,0 +1,19 @@ +plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license +plugins/modules/filesystem.py validate-modules:missing-gplv3-license +plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license +plugins/modules/info.py validate-modules:missing-gplv3-license +plugins/modules/host.py validate-modules:missing-gplv3-license +plugins/modules/nasserver.py validate-modules:missing-gplv3-license +plugins/modules/nfs.py validate-modules:missing-gplv3-license +plugins/modules/smbshare.py validate-modules:missing-gplv3-license +plugins/modules/snapshot.py validate-modules:missing-gplv3-license +plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license +plugins/modules/storagepool.py validate-modules:missing-gplv3-license +plugins/modules/tree_quota.py validate-modules:missing-gplv3-license +plugins/modules/user_quota.py validate-modules:missing-gplv3-license +plugins/modules/volume.py validate-modules:missing-gplv3-license +plugins/modules/cifsserver.py validate-modules:missing-gplv3-license +plugins/modules/nfsserver.py validate-modules:missing-gplv3-license +plugins/modules/host.py import-2.7 +plugins/modules/interface.py import-2.7 +plugins/modules/interface.py validate-modules:missing-gplv3-license diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py new file mode 100644 index 00000000..4ddee966 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py @@ -0,0 +1,19 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock ApiException for Unity Test modules""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockApiException(Exception): + body = "SDK Error message" + status = "500" + + +class HttpError(Exception): + body = "Http Error message" + http_status = 401 diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py new file mode 100644 index 00000000..427d530f --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py @@ -0,0 +1,200 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of CIFS server module on Unity""" + +from __future__ import (absolute_import, division, print_function) +from unittest.mock import MagicMock + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject + + +class MockCIFSServerApi: + CIFS_SERVER_MODULE_ARGS = { + 'nas_server_id': None, + 'nas_server_name': None, + 'netbios_name': None, + 'workgroup': None, + 'local_password': None, + 'domain': None, + 'domain_username': None, + 'domain_password': None, + 'cifs_server_id': None, + 'cifs_server_name': None, + 'interfaces': None, + 'unjoin_cifs_server_account': None, + 'state': None + } + + @staticmethod + def get_cifs_server_details_method_response(): + return { + "description": None, + "domain": "xxx.xxx.xxx.xxx", + "existed": True, + "file_interfaces": { + "UnityFileInterfaceList": [ + { + "UnityFileInterface": { + "hash": 8791477905949, + "id": "if_43" + } + } + ] + }, + "hash": 8791478461637, + "health": { + "UnityHealth": { + "hash": 8791478461623 + } + }, + "id": "cifs_59", + "is_standalone": False, + "last_used_organizational_unit": "ou=Computers,ou=EMC NAS servers", + "name": "test_cifs_server", + "nas_server": { + "UnityNasServer": { + "hash": 8791478461595, + "id": "nas_18" + } + }, + "netbios_name": "TEST_CIFS_SERVER", + "smb_multi_channel_supported": True, + "smb_protocol_versions": [ + "1.0", + "2.0", + "2.1", + "3.0" + ], + "smbca_supported": True, + "workgroup": None + } + + @staticmethod + def get_cifs_server_details_method_netbios_response(): + return { + "UnityCifsServerList": [{ + "UnityCifsServer": { + "existed": True, + "file_interfaces": { + "UnityFileInterfaceList": [{ + "UnityFileInterface": { + "hash": -9223363293222387298, + "id": "if_43" + } + }] + }, + "hash": 8743632213638, + "health": { + "UnityHealth": { + "hash": -9223363293222562209 + } + }, + "id": "cifs_60", + "is_standalone": True, + "nas_server": { + "UnityNasServer": { + "hash": -9223363293221242245, + "id": "nas_18" + } + }, + "netbios_name": "ANSIBLE_CIFS", + "smb_multi_channel_supported": True, + "smb_protocol_versions": ["1.0", "2.0", "2.1", "3.0"], + "smbca_supported": True, + "workgroup": "ANSIBLE" + } + }] + } + + @staticmethod + def create_cifs_server_without_nas(): + return "Please provide nas server id/name to create CIFS server." + + @staticmethod + def invalid_credentials(): + return "Incorrect username or password provided." + + @staticmethod + def modify_error_msg(): + return "Modification is not supported through Ansible module" + + @staticmethod + def get_nas_server_details(): + return { + "UnityNasServer": { + "cifs_server": { + "UnityCifsServerList": [{ + "UnityCifsServer": { + "hash": 8734183189936, + "id": "cifs_60" + } + }] + }, + "current_sp": { + "UnityStorageProcessor": { + "hash": 8734188780762, + "id": "spa" + } + }, + "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NONE", + "existed": True, + "file_dns_server": { + "UnityFileDnsServer": { + "hash": 8734183189782, + "id": "dns_11" + } + }, + "file_interface": { + "UnityFileInterfaceList": [{ + "UnityFileInterface": { + "hash": -9223363302671584431, + "id": "if_43" + } + }] + }, + "hash": -9223363302671053452, + "health": { + "UnityHealth": { + "hash": 8734182402245 + } + }, + "home_sp": { + "UnityStorageProcessor": { + "hash": -9223363302671594510, + "id": "spa" + } + }, + "id": "nas_18", + "is_backup_only": False, + "is_multi_protocol_enabled": False, + "is_packet_reflect_enabled": False, + "is_replication_destination": False, + "is_replication_enabled": False, + "name": "test_nas1", + "pool": { + "UnityPool": { + "hash": -9223363302672128291, + "id": "pool_7" + } + }, + "preferred_interface_settings": { + "UnityPreferredInterfaceSettings": { + "hash": -9223363302671585904, + "id": "preferred_if_16" + } + }, + "replication_type": "ReplicationTypeEnum.NONE", + "size_allocated": 2952790016, + "virus_checker": { + "UnityVirusChecker": { + "hash": 8734183191465, + "id": "cava_18" + } + } + } + } diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py new file mode 100644 index 00000000..07fe6b5d --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py @@ -0,0 +1,122 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of consistency group module on Unity""" + +from __future__ import (absolute_import, division, print_function) +from unittest.mock import MagicMock + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject + + +class MockConsistenyGroupApi: + CONSISTENCY_GROUP_MODULE_ARGS = { + 'unispherehost': '**.***.**.***', + 'port': '123', + 'cg_id': None, + 'cg_name': None, + 'new_cg_name': None, + 'pool_id': None, + 'description': None, + 'snap_schedule': None, + 'tiering_policy': None, + 'volumes': [], + 'vol_state': None, + 'hosts': [], + 'mapping_state': None, + 'replication_params': {}, + 'replication_state': None, + 'state': None + } + IP_ADDRESS_MOCK_VALUE = '***.***.***.**' + + @staticmethod + def cg_get_details_method_response(): + return {'advanced_dedup_status': 'DedupStatusEnum.DISABLED', 'block_host_access': None, 'data_reduction_percent': 0, + 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'data_reduction_status': 'DataReductionStatusEnum.DISABLED', + 'datastores': None, 'dedup_status': None, 'description': '', 'esx_filesystem_block_size': None, + 'esx_filesystem_major_version': None, 'filesystem': None, 'health': {}, 'host_v_vol_datastore': None, + 'id': 'cg_id_1', 'is_replication_destination': False, 'is_snap_schedule_paused': None, + 'luns': [{'id': 'lun_id_1', 'name': 'test_lun_cg_issue', 'is_thin_enabled': False, + 'size_total': 1, 'is_data_reduction_enabled': False}], + 'name': 'lun_test_cg_source_12', 'per_tier_size_used': [1, 0, 0], + 'pools': [{'id': 'pool_id_1'}], + 'relocation_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'replication_type': 'ReplicationTypeEnum.NONE', + 'size_allocated': 0, 'size_total': 1, 'size_used': None, 'snap_count': 0, 'snap_schedule': None, + 'snaps_size_allocated': 0, 'snaps_size_total': 0, 'thin_status': 'ThinStatusEnum.TRUE', + 'type': 'StorageResourceTypeEnum.CONSISTENCY_GROUP', 'virtual_volumes': None, 'vmware_uuid': None, + 'existed': True, 'snapshots': [], 'cg_replication_enabled': False} + + @staticmethod + def get_cg_object(): + return MockSDKObject({'advanced_dedup_status': 'DedupStatusEnum.DISABLED', 'block_host_access': None, + 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, + 'data_reduction_status': 'DataReductionStatusEnum.DISABLED', + 'datastores': None, 'dedup_status': None, 'description': '', 'esx_filesystem_block_size': None, + 'esx_filesystem_major_version': None, 'filesystem': None, 'health': {}, 'host_v_vol_datastore': None, + 'id': 'cg_id_1', 'is_replication_destination': False, 'is_snap_schedule_paused': None, + 'luns': [MockSDKObject({'id': 'lun_id_1', 'name': 'test_lun_cg_issue', + 'is_thin_enabled': False, 'size_total': 1, 'is_data_reduction_enabled': False})], + 'name': 'lun_test_cg_source_12', 'per_tier_size_used': [1, 0, 0], + 'pools': [MockSDKObject({'id': 'pool_id_1'})], + 'relocation_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'replication_type': 'ReplicationTypeEnum.NONE', + 'size_allocated': 0, 'size_total': 1, 'size_used': None, 'snap_count': 0, 'snap_schedule': None, + 'snaps_size_allocated': 0, 'snaps_size_total': 0, 'thin_status': 'ThinStatusEnum.TRUE', + 'type': 'StorageResourceTypeEnum.CONSISTENCY_GROUP', 'virtual_volumes': None, 'vmware_uuid': None, + 'existed': True, 'snapshots': [], 'cg_replication_enabled': False}) + + @staticmethod + def get_cg_replication_dependent_response(response_type): + if response_type == 'cg_replication_enabled_details': + cg_replication_enabled_details = MockConsistenyGroupApi.cg_get_details_method_response() + cg_replication_enabled_details['cg_replication_enabled'] = True + return cg_replication_enabled_details + elif response_type == 'remote_system': + return [MockSDKObject({"connection_type": "ReplicationCapabilityEnum.ASYNC", "existed": True, + "health": {"UnityHealth": {}}, "id": "system_id_1", "local_spa_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE], + "local_spb_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE], + "management_address": "**.***.**.**", "model": "U XXX", + "name": "ABXXXXXX", "remote_spa_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE], + "remote_spb_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE], + "serial_number": "ABXXXXXX", "sync_fc_ports": ["abc_def", "ghi_jkl"], "username": "username"})] + elif response_type == 'remote_system_pool_object': + return MockSDKObject({"alert_threshold": 60, "creation_time": "2021-10-18 12:51:27+00:00", "description": "", + "existed": True, "harvest_state": "UsageHarvestStateEnum.IDLE", "health": {"UnityHealth": {}}, + "id": "pool_3", "is_all_flash": True, "is_empty": False, "is_fast_cache_enabled": False, + "is_harvest_enabled": True, "is_snap_harvest_enabled": True, "name": "Extreme_Perf_tier", + "object_id": 1, "pool_fast_vp": {"UnityPoolFastVp": {}}, "pool_space_harvest_high_threshold": 95.0, + "pool_space_harvest_low_threshold": 70.5, "pool_type": "StoragePoolTypeEnum.DYNAMIC", + "raid_type": "RaidTypeEnum.RAID5", "size_free": 1, "size_subscribed": 1, "size_total": 1, + "size_used": 1, "snap_size_subscribed": 1, "snap_size_used": 1, "snap_space_harvest_high_threshold": 20.5, + "snap_space_harvest_low_threshold": 1.0, + "tiers": {"UnityPoolTierList": [{"UnityPoolTier": {}}, {"UnityPoolTier": {}}, {"UnityPoolTier": {}}]}}) + elif response_type == 'replication_session': + return MockSDKObject({"current_transfer_est_remain_time": 0, "daily_snap_replication_policy": {}, + "dst_resource_id": "dest_id_1", "dst_status": "ReplicationSessionStatusEnum.OK", "existed": True, + "health": {}, "hourly_snap_replication_policy": {}, + "id": "111111_XXX1111111_0000_1111111_XXX111111111_0000", + "last_sync_time": "2022-02-17 09: 50: 54+00: 00", + "local_role": "ReplicationSessionReplicationRoleEnum.LOOPBACK", + "max_time_out_of_sync": 60, "members": {}, "name": "rep_session_1", + "network_status": "ReplicationSessionNetworkStatusEnum.OK", "remote_system": {}, + "replication_resource_type": "ReplicationEndpointResourceTypeEnum.CONSISTENCYGROUP", + "src_resource_id": "src_id_1", + "src_status": "ReplicationSessionStatusEnum.OK", + "status": "ReplicationOpStatusEnum.AUTO_SYNC_CONFIGURED", + "sync_progress": 0, "sync_state": "ReplicationSessionSyncStateEnum.IDLE"}) + elif response_type == 'destination_cg_name_validation': + return 'destination_cg_name value should be in range of 1 to 95' + elif response_type == 'enable_cg_exception': + return 'Enabling replication to the consistency group lun_test_cg_source_12 failed with error ' + elif response_type == 'disable_cg_exception': + return 'Disabling replication to the consistency group lun_test_cg_source_12 failed with error ' + + @staticmethod + def get_remote_system_conn_response(): + conn = MockConsistenyGroupApi.get_cg_replication_dependent_response("remote_system")[0] + conn.get_pool = MagicMock(return_value=MockConsistenyGroupApi.get_cg_replication_dependent_response('remote_system_pool_object')) + return conn diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py new file mode 100644 index 00000000..d855815a --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py @@ -0,0 +1,68 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of FileSystem module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject + + +class MockFileSystemApi: + @staticmethod + def get_file_system_response(): + filesystem_response = {"UnityFileSystem": { + "access_policy": "AccessPolicyEnum.NATIVE", + "cifs_notify_on_change_dir_depth": 512, + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "description": "", + "existed": True, + "size_total": 5, + "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN", + "id": "fs_208", + "replication_type": "Remote"}} + filesystem_response['storage_resource'] = MockSDKObject({'replication_type': None}) + return filesystem_response + + @staticmethod + def get_replication_params(is_valid=True): + rpo = 60 + if not is_valid: + rpo = 2 + return {'replication_params': { + 'replication_name': None, + 'new_replication_name': None, + 'replication_mode': 'asynchronous', + 'replication_type': 'local', + 'rpo': rpo, + 'remote_system': None, + 'destination_pool_name': 'pool_test_name_1', + 'destination_pool_id': None}, + 'replication_state': 'enable', + 'state': 'present' + } + else: + return {'replication_params': { + 'replication_name': None, + 'replication_mode': 'asynchronous', + 'new_replication_name': None, + 'replication_type': 'remote', + 'rpo': rpo, + 'remote_system': { + 'remote_system_host': '1.1.1.1', + 'remote_system_verifycert': False, + 'remote_system_username': 'username', + 'remote_system_password': 'password', + 'remote_system_port': 1 + }, + 'destination_pool_name': 'pool_test_name_1', + 'destination_pool_id': None}, + 'replication_state': 'enable', + 'state': 'present' + } diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py new file mode 100644 index 00000000..4e93b628 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py @@ -0,0 +1,154 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of host module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject + + +class MockHostApi: + HOST_MODULE_ARGS = { + 'unispherehost': '**.***.**.***', + 'port': '123', + 'host_id': None, + 'host_name': None, + 'host_os': None, + 'description': None, + 'initiators': None, + 'initiator_state': None, + 'new_host_name': None, + 'network_address': None, + 'network_address_state': None, + 'state': None + } + + IP_ADDRESS_MOCK_VALUE = '***.***.*.*' + IQN_INITIATOR_MOCK_VALUE = 'iqn.1111-11.com.vmware: host_name_1-111111f' + + @staticmethod + def get_host_count_response(): + return [{"auto_manage_type": "HostManageEnum.OTHERS", "description": "", "existed": True, + "fc_host_initiators": {"UnityHostInitiatorList": [{"UnityHostInitiator": {}}]}, "health": + {"UnityHealth": {}}, "host_ip_ports": {"UnityHostIpPortList": [{"UnityHostIpPort": {}}, + {"UnityHostIpPort": {}}]}, "host_pushed_uuid": "1-1-1-1-1", + "id": "Host_id_1", "iscsi_host_initiators": {"UnityHostInitiatorList": [{"UnityHostInitiator": {}}]}, + "name": "host_name_1", "os_type": "XXXXXXXX", "type": "HostTypeEnum.HOST_AUTO"}] + + @staticmethod + def get_host_initiators_list(): + return ['1:1:1:1:1:1:1:1:1', MockHostApi.IQN_INITIATOR_MOCK_VALUE] + + @staticmethod + def get_host_details_response(response_type): + if response_type == 'api': + return {'auto_manage_type': 'HostManageEnum.OTHERS', 'datastores': None, 'description': '', + 'fc_host_initiators': [MockSDKObject({'chap_user_name': None, + 'health': {'UnityHealth': {}}, 'id': 'HostInitiator_fc_1', + 'initiator_id': '1:1:1:1:1:1:1:1:1', + 'initiator_source_type': 'HostInitiatorSourceTypeEnum.OPEN_NATIVE', 'is_bound': None, + 'is_chap_secret_enabled': False, + 'is_ignored': False, 'iscsi_type': None, + 'node_wwn': '11:12:13:14:**:**:**:**', + 'parent_host': {'UnityHost': {'id': 'Host_id_1'}}, + 'paths': [MockSDKObject({'id': 'HostInitiator_mock_6', 'is_logged_in': True}), + MockSDKObject({'id': 'HostInitiator_mock_5', 'is_logged_in': True}), + MockSDKObject({'id': 'HostInitiator_mock_4', 'is_logged_in': True}), + MockSDKObject({'id': 'HostInitiator_mock_3', 'is_logged_in': True})], + 'port_wwn': '10:10:10:10:10:10:10:10:10', 'source_type': None, + 'type': 'HostInitiatorTypeEnum.FC', 'existed': True})], + 'host_container': None, 'host_ip_ports': [MockSDKObject({'address': MockHostApi.IP_ADDRESS_MOCK_VALUE, + 'host': None, 'id': 'HostNetworkAddress_1', + 'is_ignored': None, 'name': None, 'netmask': None, 'type': None, + 'v6_prefix_length': None, 'existed': True}), + MockSDKObject({'address': 'host_name_1', 'host': None, 'id': 'HostNetworkAddress_2', + 'is_ignored': None, 'name': None, 'netmask': None, 'type': None, + 'v6_prefix_length': None, 'existed': True})], + 'host_luns': MockSDKObject({'lun': + [MockSDKObject({'hlu': 1, 'host': None, 'id': 'host_a', 'name': 'host_name_a', 'is_read_only': None, + 'lun': {'UnityLun': {}}, 'snap': None, 'type': None, 'existed': True}), + MockSDKObject({'hlu': 0, 'host': None, 'id': 'host_b', 'name': 'host_name_b', 'is_read_only': None, + 'lun': {'UnityLun': {}}, 'snap': None, 'type': None, 'existed': True})]}), + 'host_polled_uuid': None, 'host_pushed_uuid': '1-1-1-1-1', + 'host_uuid': None, 'host_v_vol_datastore': None, 'id': 'Host_id_1', + 'iscsi_host_initiators': [MockSDKObject({'chap_user_name': None, 'health': {'UnityHealth': {}}, 'id': 'HostInitiator_iscsi_1', + 'initiator_id': MockHostApi.IQN_INITIATOR_MOCK_VALUE, + 'initiator_source_type': 'HostInitiatorSourceTypeEnum.OPEN_NATIVE', 'is_bound': True, + 'is_chap_secret_enabled': False, 'is_ignored': False, + 'iscsi_type': 'HostInitiatorIscsiTypeEnum.SOFTWARE', 'node_wwn': None, + 'parent_host': {'UnityHost': {'id': 'Host_id_1'}}, + 'paths': [MockSDKObject({'id': 'HostInitiator_mock_1', 'is_logged_in': True}), + MockSDKObject({'id': 'HostInitiator_mock_2', 'is_logged_in': True})], + 'port_wwn': None, 'source_type': None, 'type': 'HostInitiatorTypeEnum.ISCSI', + 'existed': True})], + 'last_poll_time': None, 'name': 'host_name_1', + 'os_type': 'XXXXXXXX', 'registration_type': None, 'storage_resources': None, 'tenant': None, + 'type': 'HostTypeEnum.HOST_AUTO', + 'vms': None, 'existed': True, 'health': {'UnityHealth': {}}} + elif response_type == 'module': + return {'changed': False, + 'host_details': {'auto_manage_type': 'HostManageEnum.OTHERS', 'datastores': None, 'description': '', + 'fc_host_initiators': [{'id': 'HostInitiator_fc_1', + 'name': '1:1:1:1:1:1:1:1:1', + 'paths': [{'id': 'HostInitiator_mock_6', + 'is_logged_in': True}, + {'id': 'HostInitiator_mock_5', + 'is_logged_in': True}, + {'id': 'HostInitiator_mock_4', + 'is_logged_in': True}, + {'id': 'HostInitiator_mock_3', + 'is_logged_in': True}]}], + 'health': {'UnityHealth': {}}, + 'host_container': None, + 'host_luns': [{'id': "host_a", 'name': 'host_name_a'}, {'id': 'host_b', 'name': 'host_name_b'}], + 'host_polled_uuid': None, 'host_pushed_uuid': '1-1-1-1-1', + 'host_uuid': None, 'host_v_vol_datastore': None, 'id': 'Host_id_1', + 'iscsi_host_initiators': [{'id': 'HostInitiator_iscsi_1', + 'name': MockHostApi.IQN_INITIATOR_MOCK_VALUE, + 'paths': [{'id': 'HostInitiator_mock_1', + 'is_logged_in': True}, + {'id': 'HostInitiator_mock_2', + 'is_logged_in': True}]}], + 'last_poll_time': None, 'name': 'host_name_1', 'os_type': 'XXXXXXXX', + 'registration_type': None, 'storage_resources': None, 'tenant': None, + 'type': 'HostTypeEnum.HOST_AUTO', 'vms': None, 'existed': True, + 'network_addresses': [MockHostApi.IP_ADDRESS_MOCK_VALUE, 'host_name_1']}} + elif response_type == 'error': + return "Incorrect username or password provided." + + @staticmethod + def get_host_details_after_network_address_addition(response_type): + if response_type == 'api': + host_object = MockHostApi.get_host_details_response('api') + host_object['host_ip_ports'].append(MockSDKObject({'address': 'net_add_1', 'host': None, 'id': 'HostNetworkAddress_3', + 'is_ignored': None, 'name': None, 'netmask': None, 'type': None, + 'v6_prefix_length': None, 'existed': True})) + return host_object + elif response_type == 'module': + host_module_response = MockHostApi.get_host_details_response('module') + host_module_response['host_details']['network_addresses'].append('net_add_1') + host_module_response['changed'] = True + return host_module_response + elif response_type == 'invalid_address': + return 'Please enter valid IPV4 address or host name for network address' + + @staticmethod + def get_host_details_after_network_address_removal(response_type): + if response_type == 'api': + host_object = MockHostApi.get_host_details_response('api') + host_object['host_ip_ports'] = [MockSDKObject({'address': MockHostApi.IP_ADDRESS_MOCK_VALUE, 'host': None, 'id': 'HostNetworkAddress_1', + 'is_ignored': None, 'name': None, 'netmask': None, 'type': None, + 'v6_prefix_length': None, 'existed': True})] + return host_object + elif response_type == 'module': + host_module_response = MockHostApi.get_host_details_response('module') + host_module_response['host_details']['network_addresses'].remove('host_name_1') + host_module_response['changed'] = True + return host_module_response + elif response_type == 'invalid_IPV4': + return 'Please enter valid IPV4 address for network address' diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py new file mode 100644 index 00000000..6bd53ea9 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py @@ -0,0 +1,122 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of interface on Unity""" + +from __future__ import (absolute_import, division, print_function) +from unittest.mock import MagicMock + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from copy import deepcopy + + +class MockInterfaceApi: + INTERFACE_MODULE_ARGS = { + 'unispherehost': '**.***.**.***', + 'port': '123', + 'nas_server_id': None, + 'nas_server_name': None, + 'ethernet_port_name': None, + 'ethernet_port_id': None, + 'role': None, + 'interface_ip': None, + 'netmask': None, + 'prefix_length': None, + 'gateway': None, + 'vlan_id': None, + 'state': None + } + ETHERNET_PORT_NAME = "Card Ethernet Port" + NETMASK_DUMMY = "255.xx.xx.xx" + GATEWAY_DUMMY = "10.***.**.1" + INTERFACE_DUMMY = "10.***.**.**" + + NAS_SERVER_OBJECT = \ + MockSDKObject({'allow_unmapped_user': None, 'cifs_server': {'UnityCifsServerList': [{'UnityCifsServer': {'id': 'cifs_id_0'}}]}, + 'current_sp': {'UnityStorageProcessor': {'id': 'abc'}}, + 'current_unix_directory_service': 'NasServerUnixDirectoryServiceEnum.NIS', 'default_unix_user': None, + 'default_windows_user': None, 'file_dns_server': {'UnityFileDnsServer': {'id': 'dns_id_0'}}, + 'file_interface': {'UnityFileInterfaceList': [{'UnityFileInterface': {'id': 'file_interface_id_0'}}]}, + 'filesystems': {'UnityFileSystemList': [{'UnityFileSystem': {'id': 'fs_id_0'}}]}, + 'home_sp': {'UnityStorageProcessor': {'id': 'abd'}}, + 'id': 'nas_id_00', 'is_backup_only': False, 'is_multi_protocol_enabled': False, + 'is_packet_reflect_enabled': False, 'is_replication_destination': False, + 'is_replication_enabled': True, 'is_windows_to_unix_username_mapping_enabled': None, + 'name': 'dummy_nas', 'pool': {'UnityPool': {'id': 'pool_id_0'}}, + 'preferred_interface_settings': {'UnityPreferredInterfaceSettings': {'id': 'preferred_interface_0'}}, + 'replication_type': 'ReplicationTypeEnum.MIXED', + 'tenant': None, 'virus_checker': {'UnityVirusChecker': {'id': 'cava_id_0'}}, + 'existed': True}) + + INTERFACE_OBJECT = \ + MockSDKObject({"existed": True, + "gateway": GATEWAY_DUMMY, + "id": "file_interface_id_0", + "ip_address": INTERFACE_DUMMY, + "ip_port": {"UnityIpPort": {"id": "ethernet_port_id_0"}}, + "ip_protocol_version": "IpProtocolVersionEnum.IPv4", + "is_disabled": False, "is_preferred": True, + "mac_address": "AA:AA:AA:**:**:**", + "name": "dummy_if_name", + "nas_server": {"UnityNasServer": {"id": "nas_id_00"}}, + "netmask": NETMASK_DUMMY, + "role": "FileInterfaceRoleEnum.BACKUP", + "vlan_id": 324}) + + FILE_INTERFACE_ROLE_ENUM_DUMMY = { + 'PRODUCTION': (0, 'Production'), + 'BACKUP': (1, 'Backup') + } + + @staticmethod + def get_nas_without_interface(): + nas_object = deepcopy(MockInterfaceApi.NAS_SERVER_OBJECT) + nas_object.file_interface['UnityFileInterfaceList'] = [] + return nas_object + + @staticmethod + def get_nas_server_obj_existed_false(): + nas_object = deepcopy(MockInterfaceApi.NAS_SERVER_OBJECT) + nas_object.existed = False + return nas_object + + @staticmethod + def get_interface_exception_response(response_type): + if response_type == 'nas_server_id_exception': + return "Failed to get details of NAS server: dummy_nas with error: " + elif response_type == 'interface_exception': + return "Getting Interface details failed with error " + elif response_type == 'add_interface_exception': + return "Failed to add interface to NAS Server with error: " + elif response_type == 'delete_interface_exception': + return "Failed to delete interface with error: " + + @staticmethod + def get_interface_error_response(response_type): + if response_type == 'invalid_ethernet_port_name': + return "Please provide valid value for: ethernet_port_name" + elif response_type == 'invalid_vlan_id': + return "vlan_id should be in the range of 3 to 4094" + elif response_type == 'invalid_interface_ip': + return 'The value for interface ip is invalid' + elif response_type == 'invalid_gateway': + return "The value for gateway is invalid" + elif response_type == 'invalid_netmask': + return 'Invalid IPV4 address specified for netmask' + elif response_type == 'modify_failure': + return "Modification of Interfaces for NAS server is not supported through Ansible module" + elif response_type == 'no_role': + return "Role is a mandatory parameter for adding interface to NAS Server." + elif response_type == 'no_ethernet': + return "ethernet_port_name/ethernet_port_id is mandatory parameter for adding interface to NAS Server." + + @staticmethod + def get_nas_server_obj_errors(response_type): + if response_type == 'existed_false': + return "NAS server with id nas_id_00 does not exist" + elif response_type == 'exception': + return "Failed to get details of NAS server with error: " diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py new file mode 100644 index 00000000..cb11e6d8 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py @@ -0,0 +1,64 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of NASServer module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockNASServerApi: + @staticmethod + def get_nas_server_response(): + return ({"access_policy": "AccessPolicyEnum.NATIVE", + "cifs_notify_on_change_dir_depth": 512, + "data_reduction_percent": 0, + "data_reduction_ratio": 1.0, + "data_reduction_size_saved": 0, + "description": "", + "existed": True, + "size_total": 5, + "id": "nas0", + "name": "nas0", + "replication_type": "Remote"}) + + @staticmethod + def get_replication_params(is_valid=True): + rpo = 60 + if not is_valid: + rpo = 2 + return {'replication_params': { + 'replication_name': None, + 'new_replication_name': None, + 'replication_type': 'local', + 'replication_mode': 'asynchronous', + 'rpo': rpo, + 'remote_system': None, + 'destination_nas_server_name': None, + 'destination_pool_name': 'pool_test_name_1', + 'destination_pool_id': None}, + 'replication_state': 'enable', + 'state': 'present' + } + else: + return {'replication_params': { + 'replication_name': None, + 'replication_mode': 'asynchronous', + 'new_replication_name': None, + 'replication_type': 'remote', + 'rpo': rpo, + 'remote_system': { + 'remote_system_host': '1.1.1.1', + 'remote_system_verifycert': False, + 'remote_system_username': 'username', + 'remote_system_password': 'password', + 'remote_system_port': 1 + }, + 'destination_nas_server_name': None, + 'destination_pool_name': 'pool_test_name_1', + 'destination_pool_id': None}, + 'replication_state': 'enable', + 'state': 'present' + } diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py new file mode 100644 index 00000000..41af88dc --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py @@ -0,0 +1,139 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of nfs module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject + + +class MockNfsApi: + NFS_MODULE_ARGS = { + 'unispherehost': '**.***.**.***', + 'port': '123', + 'description': None, + 'anonymous_uid': None, + 'anonymous_gid': None, + 'min_security': None, + 'default_access': None, + 'nas_server_id': None, + 'nas_server_name': None, + 'nfs_export_id': None, + 'nfs_export_name': None, + 'snapshot_name': None, + 'snapshot_id': None, + 'filesystem_name': None, + 'filesystem_id': None, + 'host_state': None, + 'adv_host_mgmt_enabled': None, + 'no_access_hosts': None, + 'read_only_hosts': None, + 'read_only_root_hosts': None, + 'read_write_hosts': None, + 'read_write_root_hosts': None, + 'path': None, + 'state': None + } + + DUMMY_DOMAIN_VALUE = "google.com" + DUMMY_SUBNET_VALUE = "**.***.2.2/10" + + FILESYSTEM_OBJECT = MockSDKObject({"access_policy": "AccessPolicyEnum.UNIX", "cifs_notify_on_change_dir_depth": 512, + "data_reduction_percent": 0, "data_reduction_ratio": 1.0, "data_reduction_size_saved": 0, + "description": "", "existed": True, + "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN", + "format": "FSFormatEnum.UFS64", "host_io_size": "HostIOSizeEnum.GENERAL_16K", + "id": "fs_id_1", "is_advanced_dedup_enabled": False, "is_cifs_notify_on_access_enabled": False, + "is_cifs_notify_on_write_enabled": False, "is_cifs_op_locks_enabled": True, + "is_cifs_sync_writes_enabled": False, "is_data_reduction_enabled": False, "is_read_only": False, + "is_smbca": False, "is_thin_enabled": True, "locking_policy": "FSLockingPolicyEnum.MANDATORY", + "min_size_allocated": 0, "name": "fs_dummy_name", "nas_server": {"id": "nas_id_1"}, + "nfs_share": [{"id": "NFSShare_id_1"}], "pool": {"id": "pool_id_1"}, + "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES", "snap_count": 0, "snaps_size": 0, + "snaps_size_allocated": 0, "storage_resource": {"id": "stg_id_1"}, + "supported_protocols": "FSSupportedProtocolEnum.NFS", + "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH", + "type": "FilesystemTypeEnum.FILESYSTEM"}) + + NFS_SHARE_OBJECT = MockSDKObject({"anonymous_gid": 4294967294, "anonymous_uid": 4294967294, + "default_access": "NFSShareDefaultAccessEnum.NO_ACCESS", "description": "", "existed": True, + "export_option": 1, "export_paths": ["**.***.**.**:/nfsshare_dummy_name"], + "filesystem": MockSDKObject({"id": "fs_id_1", "name": "fs_name1", "nas_server": "not_none"}), + "id": "NFSShare_id_1", + "min_security": "NFSShareSecurityEnum.SYS", + "modification_time": "2022-04-24 17:07:57.749000+00:00", + "name": "nfsshare_dummy_name", + "no_access_hosts_string": "**.***.**.0/255.***.*.*,198.***.**.*/255.***.*.*", + "path": "/", "read_only_hosts_string": "", "read_only_root_hosts_string": "", + "read_write_hosts_string": "", + "read_write_root_hosts_string": "", "role": "NFSShareRoleEnum.PRODUCTION", + "type": "NFSTypeEnum.NFS_SHARE"}) + + NFS_SHARE_DISPLAY_ATTR = {'anonymous_gid': 4294967294, 'anonymous_uid': 4294967294, 'creation_time': '2022-03-09 15:05:34.720000+00:00', + 'default_access': 'NFSShareDefaultAccessEnum.NO_ACCESS', 'description': '', 'export_option': 1, + 'export_paths': ['**.***.**.**:/nfsshare_dummy_name'], + 'filesystem': {'UnityFileSystem': {'id': 'fs_id_1', 'name': 'fs_name1'}}, 'host_accesses': None, + 'id': 'NFSShare_id_1', 'is_read_only': None, 'min_security': 'NFSShareSecurityEnum.SYS', + 'modification_time': '2022-04-24 17:07:57.749000+00:00', 'name': 'nfsshare_dummy_name', + 'nfs_owner_username': None, 'no_access_hosts': None, + 'no_access_hosts_string': '**.***.**.0/255.***.*.*,198.***.**.*/255.***.*.*', + 'path': '/', 'read_only_hosts': None, 'read_only_hosts_string': '', 'read_only_root_access_hosts': None, + 'read_only_root_hosts_string': '', 'read_write_hosts': None, 'read_write_hosts_string': '', + 'read_write_root_hosts_string': '', 'role': 'NFSShareRoleEnum.PRODUCTION', 'root_access_hosts': None, + 'snap': None, 'type': 'NFSTypeEnum.NFS_SHARE', 'existed': True, + 'nas_server': {'UnityNasServer': {'id': 'nas_id_1', 'name': 'lglad068'}}} + + @staticmethod + def get_nfs_share_object_on_host_access(action, advhostmgmt): + if advhostmgmt: + if action == 'add': + nfs_share_object = MockNfsApi.NFS_SHARE_OBJECT + return nfs_share_object + elif action == 'remove': + nfs_share_object = MockNfsApi.NFS_SHARE_OBJECT + nfs_share_object.no_access_hosts_string = 'host1,**.***.**.0/255.***.*.*,**.***.2.2/255.***.*.*,198.***.**.*/255.***.*.*' + return nfs_share_object + else: + if action == 'add': + nfs_share_display_attr = MockNfsApi.NFS_SHARE_OBJECT + nfs_share_display_attr.read_only_root_hosts_string = '' + return nfs_share_display_attr + elif action == 'remove': + nfs_share_display_attr = MockNfsApi.NFS_SHARE_OBJECT + nfs_share_display_attr.read_only_root_hosts_string = '*.google.com,**.***.0.0/255.***.*.*' + return nfs_share_display_attr + + @staticmethod + def get_nfs_share_display_attr_on_host_access(action, advhostmgmt): + if advhostmgmt: + if action == 'add': + nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR + nfs_share_display_attr['no_access_hosts_string'] = 'host1,**.***.**.0/255.***.*.*,**.***.2.2/255.***.*.*,198.***.**.*/255.***.*.*' + return nfs_share_display_attr + elif action == 'remove': + nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR + nfs_share_display_attr['no_access_hosts_string'] = '**.***.**.0/255.***.*.*,198.***.**.*/255.***.*.*' + return nfs_share_display_attr + else: + if action == 'add': + nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR + nfs_share_display_attr['read_only_root_hosts_string'] = '*.google.com,**.***.0.0/255.***.*.*' + return nfs_share_display_attr + elif action == 'remove': + nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR + nfs_share_display_attr['read_only_root_hosts_string'] = '' + return nfs_share_display_attr + + @staticmethod + def host_access_negative_response(response_type): + if response_type == 'subnet_validation': + return "Subnet should be in format 'IP address/netmask' or 'IP address/prefix length'" + elif response_type == 'advhostmngmnt_field_validation': + return "'host_state' and 'adv_host_mgmt_enabled' is required along with: read_only_root_hosts" + elif response_type == 'modify_exception': + return 'Failed to modify nfs error: ' diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py new file mode 100644 index 00000000..1254f003 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py @@ -0,0 +1,259 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of NFS server module on Unity""" + +from __future__ import (absolute_import, division, print_function) +from unittest.mock import MagicMock + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject + + +class MockNFSServerApi: + NFS_SERVER_MODULE_ARGS = { + 'nas_server_id': None, + 'nas_server_name': None, + 'host_name': None, + 'is_secure_enabled': None, + 'kerberos_domain_controller_type': None, + 'kerberos_domain_controller_username': None, + 'kerberos_domain_controller_password': None, + 'is_extended_credentials_enabled': None, + 'nfs_v4_enabled': None, + 'nfs_server_id': None, + 'interfaces': None, + 'remove_spn_from_kerberos': None, + 'state': None + } + + NAS_SERVER_OBJ = MockSDKObject({"id": "nas_10"}) + + @staticmethod + def get_nfs_server_details_method_response(): + return { + "credentials_cache_ttl": "0:15:00", + "existed": True, + "file_interfaces": { + "UnityFileInterfaceList": [{ + "UnityFileInterface": { + "hash": 1111111111111, + "id": "if_3" + } + }] + }, + "hash": 1111111111111, + "id": "nfs_95", + "is_extended_credentials_enabled": False, + "is_secure_enabled": False, + "nas_server": MockNFSServerApi.NAS_SERVER_OBJ, + "nfs_v4_enabled": True, + 'servicee_principal_name': None + } + + @staticmethod + def get_nfs_server_details(): + return [MockSDKObject({ + "credentials_cache_ttl": "0:15:00", + "existed": True, + "file_interfaces": { + "UnityFileInterfaceList": [{ + "UnityFileInterface": { + "hash": 1111111111111, + "id": "if_3" + } + }] + }, + "hash": 1111111111111, + "id": "nfs_95", + "is_extended_credentials_enabled": False, + "is_secure_enabled": False, + "nas_server": MockNFSServerApi.NAS_SERVER_OBJ, + "nfs_v4_enabled": True, + 'servicee_principal_name': None})] + + @staticmethod + def validate_params_exception(): + return "Please provide valid value for:" + + @staticmethod + def create_nfs_server_without_nas_server_id(): + return "Please provide nas server id/name to create NFS server." + + @staticmethod + def get_nas_server_id_api_exception(): + return "Failed to get details of NAS server:" + + @staticmethod + def create_nfs_server_without_is_secure_enabled(): + return "For create NFS Server nfs_v4_enabled and is_secure_enabled should be true." + + @staticmethod + def create_nfs_server_with_api_exception(): + return "Failed to create NFS server with on NAS Server" + + @staticmethod + def get_nfs_server_api_exception(): + return "Incorrect username or password provided." + + @staticmethod + def get_nfs_server_api_exception_1(): + return "Failed to get details of NFS Server with error" + + @staticmethod + def delete_exception(): + return "Failed to delete NFS server:" + + @staticmethod + def modify_error_msg(): + return "Modification of NFS Server parameters is not supported through Ansible module" + + @staticmethod + def get_nas_server_details(): + return { + "UnityNasServer": { + "cifs_server": { + "UnityCifsServerList": [{ + "UnityCifsServer": { + "hash": 1111111111111, + "id": "cifs_60" + } + }] + }, + "current_sp": { + "UnityStorageProcessor": { + "hash": 1111111111111, + "id": "spa" + } + }, + "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NONE", + "existed": True, + "file_dns_server": { + "UnityFileDnsServer": { + "hash": 1111111111111, + "id": "dns_11" + } + }, + "file_interface": { + "UnityFileInterfaceList": [{ + "UnityFileInterface": { + "hash": -1111111111111, + "id": "if_43" + } + }] + }, + "hash": -1111111111111, + "health": { + "UnityHealth": { + "hash": 1111111111111 + } + }, + "home_sp": { + "UnityStorageProcessor": { + "hash": -1111111111111, + "id": "spa" + } + }, + "id": "nas_18", + "is_backup_only": False, + "is_multi_protocol_enabled": False, + "is_packet_reflect_enabled": False, + "is_replication_destination": False, + "is_replication_enabled": False, + "name": "test_nas1", + "pool": { + "UnityPool": { + "hash": -1111111111111, + "id": "pool_7" + } + }, + "preferred_interface_settings": { + "UnityPreferredInterfaceSettings": { + "hash": -1111111111111, + "id": "preferred_if_16" + } + }, + "replication_type": "ReplicationTypeEnum.NONE", + "size_allocated": 1111111111111, + "virus_checker": { + "UnityVirusChecker": { + "hash": 1111111111111, + "id": "cava_18" + } + } + } + } + + @staticmethod + def get_nas_server_id(): + return MockSDKObject({ + "cifs_server": { + "UnityCifsServerList": [{ + "UnityCifsServer": { + "hash": 1111111111111, + "id": "cifs_34" + } + }] + }, + "current_sp": { + "UnityStorageProcessor": { + "hash": 1111111111111, + "id": "spb" + } + }, + "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NIS", + "existed": True, + "file_dns_server": { + "UnityFileDnsServer": { + "hash": 1111111111111, + "id": "dns_12" + } + }, + "file_interface": { + "UnityFileInterfaceList": [{ + "UnityFileInterface": { + "hash": 1111111111111, + "id": "if_37" + } + }] + }, + "hash": 1111111111111, + "health": { + "UnityHealth": { + "hash": 1111111111111 + } + }, + "home_sp": { + "UnityStorageProcessor": { + "hash": 1111111111111, + "id": "spb" + } + }, + "id": "nas_10", + "is_backup_only": False, + "is_multi_protocol_enabled": False, + "is_packet_reflect_enabled": False, + "is_replication_destination": False, + "is_replication_enabled": True, + "name": "dummy_nas", + "pool": { + "UnityPool": { + "hash": 1111111111111, + "id": "pool_7" + } + }, + "preferred_interface_settings": { + "UnityPreferredInterfaceSettings": { + "hash": 1111111111111, + "id": "preferred_if_10" + } + }, + "replication_type": "ReplicationTypeEnum.REMOTE", + "size_allocated": 1111111111111, + "virus_checker": { + "UnityVirusChecker": { + "hash": 1111111111111, + "id": "cava_10"}}}) diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py new file mode 100644 index 00000000..2556870b --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py @@ -0,0 +1,32 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock SDKResponse for Unit tests for Unity modules""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockSDKObject: + def __init__(self, data): + self.skip_list = ['skip_list'] + for key, value in data.items(): + setattr(self, key, value) + + def add_to_skip_list(self, key): + self.skip_list.append(key) + + def _get_properties(self): + data = {} + for attr, value in self.__dict__.items(): + if attr not in self.skip_list: + data[attr] = value + return data + + def get_id(self): + return "res_0" + + def name(self): + return "res_0" diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py new file mode 100644 index 00000000..1ec9fcad --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py @@ -0,0 +1,168 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of storagepool module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject + + +class MockStoragePoolApi: + STORAGE_POOL_MODULE_ARGS = { + 'unispherehost': '**.***.**.***', + 'port': '123', + 'pool_name': None, + 'pool_id': None, + 'new_pool_name': None, + 'pool_description': None, + 'fast_cache': None, + 'fast_vp': None, + 'raid_groups': {}, + 'state': None + } + RAID_TYPE_5 = 'RaidTypeEnum.RAID5' + + @staticmethod + def get_pool_details_response(response_type): + if response_type == 'get_pool': + return {'alert_threshold': 84, 'creation_time': '2021-11-11 11:11:11+00:00', + 'description': '', 'harvest_state': 'UsageHarvestStateEnum.IDLE', + 'health': {'UnityHealth': {}}, 'id': 'pool_mock_1', + 'is_all_flash': True, 'is_empty': False, 'is_fast_cache_enabled': False, + 'is_harvest_enabled': True, 'is_snap_harvest_enabled': False, + 'metadata_size_subscribed': 1, 'metadata_size_used': 1, + 'name': 'Ansible_Unity_TEST_1', 'object_id': 1, 'pool_fast_vp': {'UnityPoolFastVp': {}}, + 'pool_space_harvest_high_threshold': 95.0, 'pool_space_harvest_low_threshold': 85.0, 'pool_type': + 'StoragePoolTypeEnum.DYNAMIC', 'raid_type': MockStoragePoolApi.RAID_TYPE_5, 'rebalance_progress': None, 'size_free': 1, + 'size_subscribed': 1, 'size_total': 1, 'size_used': 1, 'snap_size_subscribed': + 1, 'snap_size_used': 1, 'snap_space_harvest_high_threshold': 25.0, 'snap_space_harvest_low_threshold': + 20.0, 'tiers': {'UnityPoolTierList': [{'UnityPoolTier': {}}, {'UnityPoolTier': {}}, {'UnityPoolTier': {}}]}, 'existed': True} + elif response_type == 'pool_object': + return {'alert_threshold': 84, 'creation_time': '2021-11-11 11:11:11+00:00', + 'description': '', 'harvest_state': 'UsageHarvestStateEnum.IDLE', + 'health': {'UnityHealth': {}}, 'id': 'pool_mock_1', + 'is_all_flash': True, 'is_empty': False, 'is_fast_cache_enabled': False, + 'is_harvest_enabled': True, 'is_snap_harvest_enabled': False, + 'metadata_size_subscribed': 1, 'metadata_size_used': 1, + 'name': 'Ansible_Unity_TEST_1', 'object_id': 1, + 'pool_fast_vp': {'UnityPoolFastVp': {}}, + 'pool_space_harvest_high_threshold': 95.0, + 'pool_space_harvest_low_threshold': 85.0, 'pool_type': 'StoragePoolTypeEnum.DYNAMIC', + 'raid_type': MockStoragePoolApi.RAID_TYPE_5, 'rebalance_progress': None, 'size_free': 1, + 'size_subscribed': 1, 'size_total': 1, 'size_used': 1, + 'snap_size_subscribed': 1, 'snap_size_used': 1, + 'snap_space_harvest_high_threshold': 25.0, 'snap_space_harvest_low_threshold': 20.0, + 'tiers': MockSDKObject({'disk_count': [5, 0, 0], 'name': ['Extreme Performance', 'Performance', 'Capacity'], + 'pool_units': [{'UnityPoolUnitList': [{'UnityPoolUnit': {'id': 'pool_unit_mock_1'}}]}, None, None], + 'raid_type': [MockStoragePoolApi.RAID_TYPE_5, 'RaidTypeEnum.NONE', 'RaidTypeEnum.NONE'], + 'size_free': [1, 0, 0], + 'size_moving_down': [0, 0, 0], 'size_moving_up': [0, 0, 0], + 'size_moving_within': [0, 0, 0], 'size_total': [1, 0, 0], + 'size_used': [1, 0, 0], 'stripe_width': ['RaidStripeWidthEnum._5', None, None], + 'tier_type': ['TierTypeEnum.EXTREME_PERFORMANCE', 'TierTypeEnum.PERFORMANCE', 'TierTypeEnum.CAPACITY'], + 'existed': True}), + 'existed': True} + elif response_type == 'disk_list': + return [MockSDKObject({"bus_id": 99, "current_speed": 1, "disk_group": {"UnityDiskGroup": {"id": "disk_mock_1"}}, + "disk_technology": MockSDKObject({"name": "mock_disk_tech"}), "emc_part_number": "XXXXXXXX", + "emc_serial_number": "XXXXXXXX", "existed": True, "health": {"UnityHealth": {}}, + "id": "disk_mock_2", "is_fast_cache_in_use": False, "is_in_use": True, + "is_sed": False, "manufacturer": "mock_disk_manufacturer", + "max_speed": 1, "model": "mock_disk_model", "name": "Drive 12", + "needs_replacement": False, "pool": MockSDKObject({"id": "pool_5", "name": "Pool_Mock_TEST_2", "UnityPool": {}}), + "raw_size": 1, "rpm": 0, "size": 1, "slot_number": 12, + "tier_type": MockSDKObject({"name": "EXTREME_PERFORMANCE"}), "vendor_size": 1, + "version": "S109", "wwn": "00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"}), + MockSDKObject({"bus_id": 99, "current_speed": 1, + "disk_group": {"UnityDiskGroup": {"id": "disk_mock_1"}}, + "disk_technology": MockSDKObject({"name": "mock_disk_tech"}), "emc_part_number": "XXXXXXXX", + "emc_serial_number": "XXXXXXXX", "existed": True, "health": {"UnityHealth": {}}, + "id": "mock_disk_id", "is_fast_cache_in_use": False, "is_in_use": True, "is_sed": False, + "manufacturer": "mock_disk_manufacturer", "max_speed": 1, "model": "mock_disk_model", + "name": "disk_disk_name", "needs_replacement": False, + "pool": MockSDKObject({"id": "pool_mock_1", "name": "Ansible_Unity_TEST_1"}), + "raw_size": 1, "rpm": 0, "size": 1, + "slot_number": 13, "tier_type": MockSDKObject({"name": "EXTREME_PERFORMANCE"}), + "vendor_size": 1, "version": "S109", + "wwn": "01:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"})] + elif response_type == 'module': + return {"storage_pool_details": + {"alert_threshold": 84, "creation_time": "2021-11-11 11:11:11+00:00", "description": "", + "drives": [{"disk_technology": "mock_disk_tech", "id": "mock_disk_id", "name": "disk_disk_name", + "size": 1, "tier_type": "EXTREME_PERFORMANCE"}], + "existed": True, "harvest_state": "UsageHarvestStateEnum.IDLE", + "health": {"UnityHealth": {}}, + "id": "pool_mock_1", "is_all_flash": True, "is_empty": False, + "is_fast_cache_enabled": False, "is_fast_vp_enabled": True, + "is_harvest_enabled": True, "is_snap_harvest_enabled": False, + "metadata_size_subscribed": 1, "metadata_size_used": + 1, "name": "Ansible_Unity_TEST_1", "object_id": 1, + "pool_fast_vp": {"UnityPoolFastVp": {}}, + "pool_space_harvest_high_threshold": 95.0, + "pool_space_harvest_low_threshold": 85.0, "pool_type": "StoragePoolTypeEnum.DYNAMIC", + "raid_type": "RaidTypeEnum.RAID5", "rebalance_progress": None, "size_free": 1, + "size_free_with_unit": "1.0 B", "size_subscribed": 1, "size_subscribed_with_unit": "1.0 B", + "size_total": 1, "size_total_with_unit": "1.0 B", "size_used": 1, "size_used_with_unit": "1.0 B", + "snap_size_subscribed": 1, "snap_size_subscribed_with_unit": "1.0 B", "snap_size_used": 1, + "snap_size_used_with_unit": "1.0 B", "snap_space_harvest_high_threshold": 25.0, "snap_space_harvest_low_threshold": 20.0, + "tiers": {"UnityPoolTierList": [{"disk_count": [5, 0, 0], "existed": True, + "name": ["Extreme Performance", "Performance", "Capacity"], + "pool_units": [{"UnityPoolUnitList": [{"UnityPoolUnit": {"id": "pool_unit_mock_1"}}]}, None, None], + "raid_type": ["RaidTypeEnum.RAID5", "RaidTypeEnum.NONE", "RaidTypeEnum.NONE"], + "size_free": [1, 0, 0], "size_moving_down": [0, 0, 0], + "size_moving_up": [0, 0, 0], + "size_moving_within": [0, 0, 0], + "size_total": [1, 0, 0], + "size_used": [1, 0, 0], + "stripe_width": ["RaidStripeWidthEnum._5", None, None], + "tier_type": ["TierTypeEnum.EXTREME_PERFORMANCE", "TierTypeEnum.PERFORMANCE", + "TierTypeEnum.CAPACITY"]}]}}} + elif response_type == 'error': + return 'Get details of storage pool failed with error: ' + + @staticmethod + def create_pool_response(response_type): + if response_type == 'api': + return {"storage_pool_details": + {"alert_threshold": 50, "creation_time": "2022-03-08 10:51:08+00:00", "description": "Unity test pool.", + "drives": [{"disk_technology": "SAS", "id": "disk_id_1", "name": "DPE Drive 1", + "size": 1, "tier_type": "PERFORMANCE"}, + {"disk_technology": "SAS", "id": "disk_id_2", "name": "DPE Drive 2", + "size": 1, "tier_type": "PERFORMANCE"}, + {"disk_technology": "SAS", "id": "disk_id_3", "name": "DPE Drive 3", + "size": 1, "tier_type": "PERFORMANCE"}], + "existed": True, "harvest_state": "UsageHarvestStateEnum.IDLE", + "health": {"UnityHealth": {}}, + "id": "pool_id_1", "is_all_flash": False, "is_empty": True, + "is_fast_cache_enabled": False, "is_fast_vp_enabled": True, + "is_harvest_enabled": True, "is_snap_harvest_enabled": True, + "metadata_size_subscribed": 0, "metadata_size_used": + 0, "name": "Mock_Test", "object_id": 123, + "pool_fast_vp": {"UnityPoolFastVp": {}}, + "pool_space_harvest_high_threshold": 59.0, + "pool_space_harvest_low_threshold": 40.0, "pool_type": "StoragePoolTypeEnum.DYNAMIC", + "raid_type": "RaidTypeEnum.RAID10", "rebalance_progress": None, "size_free": 1, + "size_free_with_unit": "1 GB", "size_subscribed": 0, "size_subscribed_with_unit": "0B", + "size_total": 1, "size_total_with_unit": "1 GB", "size_used": 0, "size_used_with_unit": "0B", + "snap_size_subscribed": 0, "snap_size_subscribed_with_unit": "0B", "snap_size_used": 0, + "snap_size_used_with_unit": "0B", "snap_space_harvest_high_threshold": 80.0, "snap_space_harvest_low_threshold": 60.0, + "tiers": {"UnityPoolTierList": [{"disk_count": [0, 3, 0], "existed": True, + "name": ["Extreme Performance", "Performance", "Capacity"], + "pool_units": [{"UnityPoolUnitList": [{"UnityPoolUnit": {"id": "rg_id_1"}}, + {"UnityPoolUnit": {"id": "rg_id_2"}}]}, None], + "raid_type": ["RaidTypeEnum.NONE", "RaidTypeEnum.RAID10", "RaidTypeEnum.NONE"], + "size_free": [0, 1, 0], "size_moving_down": [0, 0, 0], + "size_moving_up": [0, 0, 0], + "size_moving_within": [0, 0, 0], + "size_total": [0, 1, 0], + "size_used": [0, 0, 0], + "stripe_width": [None, "RaidStripeWidthEnum._2", None], + "tier_type": ["TierTypeEnum.EXTREME_PERFORMANCE", "TierTypeEnum.PERFORMANCE", + "TierTypeEnum.CAPACITY"]}]}}} + elif response_type == 'error': + return 'Failed to create storage pool with error: ' diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py new file mode 100644 index 00000000..e28c2e93 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py @@ -0,0 +1,169 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of CIFS server module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_cifsserver_api \ + import MockCIFSServerApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \ + import HttpError as http_error, MockApiException +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils + +utils.get_logger = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock() +utils.UnityCifsServer = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.unity.plugins.modules.cifsserver import CIFSServer + + +class TestCIFSServer(): + + get_module_args = MockCIFSServerApi.CIFS_SERVER_MODULE_ARGS + + @pytest.fixture + def cifsserver_module_mock(self): + cifsserver_module_mock = CIFSServer() + cifsserver_module_mock.unity_conn = MagicMock() + utils.cifsserver = MagicMock() + return cifsserver_module_mock + + def test_get_cifs_server_details(self, cifsserver_module_mock): + cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response() + self.get_module_args.update({ + 'cifs_server_name': 'test_cifs_server', + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.unity_conn.get_cifs_server = MagicMock(return_value=MockSDKObject(cifs_server_details)) + cifsserver_module_mock.perform_module_operation() + assert MockCIFSServerApi.get_cifs_server_details_method_response() == \ + cifsserver_module_mock.module.exit_json.call_args[1]['cifs_server_details'] + + def test_get_cifs_server_details_using_id(self, cifsserver_module_mock): + cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response() + self.get_module_args.update({ + 'cifs_server_id': 'cifs_59', + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.unity_conn.get_cifs_server = MagicMock(return_value=MockSDKObject(cifs_server_details)) + cifsserver_module_mock.perform_module_operation() + assert MockCIFSServerApi.get_cifs_server_details_method_response() == \ + cifsserver_module_mock.module.exit_json.call_args[1]['cifs_server_details'] + + def test_get_get_nas_server_id(self, cifsserver_module_mock): + nas_server_details = MockCIFSServerApi.get_nas_server_details() + self.get_module_args.update({ + 'cifs_server_id': 'cifs_59', + 'nas_server_name': 'test_nas1', + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.unity_conn.get_nas_server = MagicMock(return_value=MockSDKObject(nas_server_details)) + cifsserver_module_mock.perform_module_operation() + cifsserver_module_mock.unity_conn.get_nas_server.assert_called() + + def test_create_cifs_server(self, cifsserver_module_mock): + self.get_module_args.update({ + 'nas_server_id': 'nas_18', + 'cifs_server_name': 'test_cifs_server', + 'domain': 'xxx.xxx.xxx.xxx', + 'domain_username': 'xxxxxxxx', + 'domain_password': 'xxxxxxxx', + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.get_details = MagicMock(return_value=None) + cifsserver_module_mock.unity_conn.create_cifs_server = MagicMock(return_value=True) + cifsserver_module_mock.perform_module_operation() + assert cifsserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_create_cifs_server_throws_exception(self, cifsserver_module_mock): + self.get_module_args.update({ + 'cifs_server_name': 'test_cifs_server', + 'domain': 'xxx.xxx.xxx.xxx', + 'domain_username': 'xxxxxxxx', + 'domain_password': 'xxxxxxxx', + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.get_details = MagicMock(return_value=None) + cifsserver_module_mock.perform_module_operation() + assert MockCIFSServerApi.create_cifs_server_without_nas() == cifsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_delete_cifs_server(self, cifsserver_module_mock): + cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response() + self.get_module_args.update({ + 'cifs_server_name': 'test_cifs_server', + 'unjoin_cifs_server_account': False, + 'domain_username': 'xxxxxxxx', + 'domain_password': 'xxxxxxxx', + 'state': 'absent' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details) + cifsserver_module_mock.get_cifs_server_instance = MagicMock(return_value=MockSDKObject(cifs_server_details)) + cifsserver_module_mock.perform_module_operation() + assert cifsserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_is_modification_required(self, cifsserver_module_mock): + cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response() + self.get_module_args.update({ + 'cifs_server_name': 'test_cifs_server', + 'netbios_name': 'ansible_netbios', + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details) + cifsserver_module_mock.perform_module_operation() + assert MockCIFSServerApi.modify_error_msg() == cifsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_is_domain_modification_required(self, cifsserver_module_mock): + cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response() + self.get_module_args.update({ + 'cifs_server_name': 'test_cifs_server', + 'domain': 'yyy.yyy.yyy.yyy', + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details) + cifsserver_module_mock.perform_module_operation() + print(cifsserver_module_mock.module.fail_json.call_args[1]) + assert MockCIFSServerApi.modify_error_msg() == cifsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_is_modify_interfaces(self, cifsserver_module_mock): + cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response() + self.get_module_args.update({ + 'cifs_server_name': 'test_cifs_server', + 'interfaces': ['if_39'], + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details) + cifsserver_module_mock.perform_module_operation() + print(cifsserver_module_mock.module.fail_json.call_args[1]) + assert MockCIFSServerApi.modify_error_msg() == cifsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_is_modify_interfaces_idempotency(self, cifsserver_module_mock): + cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response() + self.get_module_args.update({ + 'cifs_server_name': 'test_cifs_server', + 'interfaces': ['if_43'], + 'state': 'present' + }) + cifsserver_module_mock.module.params = self.get_module_args + cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details) + cifsserver_module_mock.perform_module_operation() + assert cifsserver_module_mock.module.exit_json.call_args[1]['changed'] is False diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py new file mode 100644 index 00000000..dd2cdd81 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py @@ -0,0 +1,193 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for consistency group module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_consistencygroup_api \ + import MockConsistenyGroupApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock(side_effect=[MagicMock(), + MockConsistenyGroupApi.get_remote_system_conn_response()]) +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() + +from ansible_collections.dellemc.unity.plugins.modules.consistencygroup import ConsistencyGroup + + +class TestConsistencyGroup(): + + get_module_args = MockConsistenyGroupApi.CONSISTENCY_GROUP_MODULE_ARGS + + @pytest.fixture + def consistencygroup_module_mock(self): + consistencygroup_module_mock = ConsistencyGroup() + consistencygroup_module_mock.unity_conn = MagicMock() + utils.cg = MagicMock() + return consistencygroup_module_mock + + def test_enable_cg_replication(self, consistencygroup_module_mock): + self.get_module_args.update({ + 'cg_name': 'lun_test_cg_source_12', + 'replication_params': { + 'destination_cg_name': 'destination_cg_1', + 'replication_mode': 'asynchronous', + 'rpo': 60, + 'replication_type': 'remote', + 'remote_system': { + 'remote_system_host': '11.111.11.11', + 'remote_system_verifycert': False, + 'remote_system_username': 'username', + 'remote_system_password': 'password', + 'remote_system_port': 1111 + }, + 'destination_pool_name': 'pool_test_name_1', + 'destination_pool_id': None + }, + 'replication_state': 'enable', + 'state': 'present' + }) + consistencygroup_module_mock.module.params = self.get_module_args + cg_details = MockConsistenyGroupApi.cg_get_details_method_response() + cg_object = MockConsistenyGroupApi.get_cg_object() + consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object) + consistencygroup_module_mock.get_details = MagicMock(side_effect=[ + cg_details, + MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details')]) + cg_object.get_id = MagicMock(return_value=cg_details['id']) + utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object) + cg_object.check_cg_is_replicated = MagicMock(return_value=False) + consistencygroup_module_mock.unity_conn.get_remote_system = \ + MagicMock(return_value=MockConsistenyGroupApi.get_cg_replication_dependent_response('remote_system')) + utils.UnityStorageResource = MagicMock(return_value=MockSDKObject({})) + cg_object.replicate_cg_with_dst_resource_provisioning = MagicMock(return_value=None) + consistencygroup_module_mock.perform_module_operation() + assert consistencygroup_module_mock.module.exit_json.call_args[1]['consistency_group_details']['cg_replication_enabled'] is True + assert consistencygroup_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_enable_cg_replication_negative_1(self, consistencygroup_module_mock): + self.get_module_args.update({ + 'cg_name': 'lun_test_cg_source_12', + 'replication_params': { + 'destination_cg_name': '', + 'replication_mode': 'asynchronous', + 'rpo': 60, + 'replication_type': 'local', + 'destination_pool_name': None, + 'destination_pool_id': 'pool_test_1' + }, + 'replication_state': 'enable', + 'state': 'present' + }) + consistencygroup_module_mock.module.params = self.get_module_args + cg_details = MockConsistenyGroupApi.cg_get_details_method_response() + cg_object = MockConsistenyGroupApi.get_cg_object() + consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object) + consistencygroup_module_mock.get_details = MagicMock(side_effect=[ + cg_details, + MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details')]) + cg_object.get_id = MagicMock(return_value=cg_details['id']) + utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object) + cg_object.check_cg_is_replicated = MagicMock(return_value=False) + consistencygroup_module_mock.unity_conn.get_remote_system = \ + MagicMock(return_value=MockConsistenyGroupApi.get_cg_replication_dependent_response('remote_system')) + utils.UnityStorageResource = MagicMock(return_value=MockSDKObject({})) + cg_object.replicate_cg_with_dst_resource_provisioning = MagicMock(return_value=None) + consistencygroup_module_mock.perform_module_operation() + assert consistencygroup_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockConsistenyGroupApi.get_cg_replication_dependent_response('destination_cg_name_validation') + + def test_enable_cg_replication_negative_2(self, consistencygroup_module_mock): + self.get_module_args.update({ + 'cg_name': 'lun_test_cg_source_12', + 'replication_params': { + 'destination_cg_name': 'destination_cg_1', + 'replication_mode': 'asynchronous', + 'rpo': 60, + 'replication_type': 'remote', + 'remote_system': { + 'remote_system_host': '11.111.11.11', + 'remote_system_verifycert': False, + 'remote_system_username': 'username', + 'remote_system_password': 'password', + 'remote_system_port': 1111 + }, + 'destination_pool_name': None, + 'destination_pool_id': 'pool_test_1' + }, + 'replication_state': 'enable', + 'state': 'present' + }) + consistencygroup_module_mock.module.params = self.get_module_args + cg_details = MockConsistenyGroupApi.cg_get_details_method_response() + cg_object = MockConsistenyGroupApi.get_cg_object() + consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object) + consistencygroup_module_mock.get_details = MagicMock(side_effect=[ + cg_details, + MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details')]) + cg_object.get_id = MagicMock(return_value=cg_details['id']) + utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object) + cg_object.check_cg_is_replicated = MagicMock(return_value=False) + consistencygroup_module_mock.unity_conn.get_remote_system = MagicMock(side_effect=MockApiException) + consistencygroup_module_mock.perform_module_operation() + assert consistencygroup_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockConsistenyGroupApi.get_cg_replication_dependent_response('enable_cg_exception') + + def test_disable_cg_replication(self, consistencygroup_module_mock): + self.get_module_args.update({ + 'cg_name': 'lun_test_cg_source_12', + 'replication_state': 'disable', + 'state': 'present' + }) + consistencygroup_module_mock.module.params = self.get_module_args + cg_details = MockConsistenyGroupApi.cg_get_details_method_response() + cg_object = MockConsistenyGroupApi.get_cg_object() + consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object) + consistencygroup_module_mock.get_details = MagicMock(side_effect=[ + MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details'), + cg_details]) + cg_object.get_id = MagicMock(return_value=cg_details['id']) + utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object) + cg_object.check_cg_is_replicated = MagicMock(return_value=True) + repl_session = MockConsistenyGroupApi.get_cg_replication_dependent_response('replication_session') + repl_session.delete = MagicMock(return_value=None) + consistencygroup_module_mock.unity_conn.get_replication_session = \ + MagicMock(return_value=[repl_session]) + consistencygroup_module_mock.perform_module_operation() + assert consistencygroup_module_mock.module.exit_json.call_args[1]['consistency_group_details']['cg_replication_enabled'] is False + assert consistencygroup_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_disable_cg_replication_throws_exception(self, consistencygroup_module_mock): + self.get_module_args.update({ + 'cg_name': 'lun_test_cg_source_12', + 'replication_state': 'disable', + 'state': 'present' + }) + consistencygroup_module_mock.module.params = self.get_module_args + cg_details = MockConsistenyGroupApi.cg_get_details_method_response() + cg_object = MockConsistenyGroupApi.get_cg_object() + consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object) + consistencygroup_module_mock.get_details = MagicMock(side_effect=[ + MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details'), + cg_details]) + cg_object.get_id = MagicMock(return_value=cg_details['id']) + utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object) + cg_object.check_cg_is_replicated = MagicMock(side_effect=MockApiException) + consistencygroup_module_mock.perform_module_operation() + assert consistencygroup_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockConsistenyGroupApi.get_cg_replication_dependent_response('disable_cg_exception') diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py new file mode 100644 index 00000000..df0b2f76 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py @@ -0,0 +1,94 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for FileSystem module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import re +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_filesystem_api \ + import MockFileSystemApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.UnityReplicationSession = object + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() + +from ansible_collections.dellemc.unity.plugins.modules.filesystem import Filesystem + + +class TestFileSystem(): + + FILE_SYSTEM_MODULE_ARGS = {'filesystem_id': '123', 'filesystem_name': None, 'nas_server_name': None, + 'nas_server_id': None, 'pool_name': None, 'pool_id': None, 'size': None, + 'cap_unit': None, 'quota_config': None, 'snap_schedule_name': None, + 'snap_schedule_id': None, 'replication_params': {}, 'replication_state': None, 'state': None} + + @pytest.fixture + def filesystem_module_mock(self): + filesystem_module_mock = Filesystem() + filesystem_module_mock.unity_conn = MagicMock() + return filesystem_module_mock + + def test_enable_fs_replication(self, filesystem_module_mock): + self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params()) + filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS + filesystem_response = MockFileSystemApi.get_file_system_response() + filesystem_response['replicate_with_dst_resource_provisioning'] = MagicMock(return_value=True) + filesystem_module_mock.perform_module_operation() + assert filesystem_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_enable_fs_replication_invalid_params(self, filesystem_module_mock): + self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params(False)) + filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS + filesystem_module_mock.is_modify_required = MagicMock(return_value=False) + filesystem_module_mock.perform_module_operation() + assert "rpo value should be in range of 5 to 1440" in \ + filesystem_module_mock.module.fail_json.call_args[1]['msg'] + + def test_enable_fs_replication_throws_ex(self, filesystem_module_mock): + self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params()) + filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS + filesystem_module_mock.is_modify_required = MagicMock(return_value=False) + filesystem_response = MockFileSystemApi.get_file_system_response() + filesystem_response['replicate_with_dst_resource_provisioning'] = MagicMock(side_effect=Exception) + filesystem_module_mock.get_filesystem = MagicMock(side_effect=[ + MockSDKObject(filesystem_response)]) + filesystem_module_mock.get_filesystem_display_attributes = MagicMock(side_effect=[ + MockSDKObject(filesystem_response)]) + filesystem_module_mock.perform_module_operation() + assert "Enabling replication to the filesystem failed with error" in \ + re.sub(' <.*?>>', '', filesystem_module_mock.module.fail_json.call_args[1]['msg']) + + def test_modify_fs_replication(self, filesystem_module_mock): + self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params()) + filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS + filesystem_module_mock.perform_module_operation() + filesystem_module_mock.get_replication_session_on_filter = MagicMock() + assert filesystem_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_disable_replication(self, filesystem_module_mock): + self.FILE_SYSTEM_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'}) + filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS + filesystem_module_mock.get_filesystem_display_attributes = MagicMock() + filesystem_module_mock.perform_module_operation() + assert filesystem_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_disable_replication_throws_ex(self, filesystem_module_mock): + self.FILE_SYSTEM_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'}) + filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS + filesystem_module_mock.get_replication_session = MagicMock(side_effect=Exception) + filesystem_module_mock.get_filesystem_display_attributes = MagicMock() + filesystem_module_mock.perform_module_operation() + assert "Disabling replication on the filesystem failed with error" in \ + re.sub(' <.*?>', '', filesystem_module_mock.module.fail_json.call_args[1]['msg']) diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py new file mode 100644 index 00000000..de94c38d --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py @@ -0,0 +1,143 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for host module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_host_api \ + import MockHostApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \ + import HttpError as http_error, MockApiException +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() + +from ansible_collections.dellemc.unity.plugins.modules.host import Host + + +class TestHost(): + + get_module_args = MockHostApi.HOST_MODULE_ARGS + + @pytest.fixture + def host_module_mock(self): + host_module_mock = Host() + host_module_mock.unity = MagicMock() + utils.host = MagicMock() + return host_module_mock + + def test_get_host_details(self, host_module_mock): + host_details = MockHostApi.get_host_details_response('api') + self.get_module_args.update({ + 'host_name': 'host_name_1', + }) + host_module_mock.module.params = self.get_module_args + host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list()) + utils.host.UnityHostList.get = MagicMock(return_value=MockHostApi.get_host_count_response()) + host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]]) + host_module_mock.unity.get_host = MagicMock(return_value=MockSDKObject(host_details)) + host_module_mock.perform_module_operation() + assert MockHostApi.get_host_details_response('module')['host_details'] == host_module_mock.module.exit_json.call_args[1]['host_details'] + + def test_get_host_details_throws_exception(self, host_module_mock): + self.get_module_args.update({ + 'host_name': 'name1' + }) + host_module_mock.module.params = self.get_module_args + utils.HttpError = http_error + utils.host.UnityHostList.get = MagicMock(side_effect=http_error) + host_module_mock.create_host = MagicMock(return_value=(False, MagicMock())) + host_module_mock.perform_module_operation() + assert MockHostApi.get_host_details_response('error') == host_module_mock.module.fail_json.call_args[1]['msg'] + + def test_add_network_address_to_host(self, host_module_mock): + self.get_module_args.update({ + 'host_name': 'host_name_1', + 'network_address': 'net_add_1', + 'network_address_state': 'present-in-host', + 'state': 'present' + }) + host_module_mock.module.params = self.get_module_args + host_details = MockHostApi.get_host_details_response('api') + host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]]) + host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list()) + host_module_mock.unity.get_host = MagicMock(return_value=MockSDKObject(MockHostApi.get_host_details_after_network_address_addition('api'))) + host_details = MockSDKObject(host_details) + host_details.add_ip_port = MagicMock(return_value=None) + host_details.add_to_skip_list('add_ip_port') + host_module_mock.get_host_details = MagicMock(return_value=host_details) + host_module_mock.perform_module_operation() + assert MockHostApi.get_host_details_after_network_address_addition('module')['host_details'] == \ + host_module_mock.module.exit_json.call_args[1]['host_details'] + assert MockHostApi.get_host_details_after_network_address_addition('module')['changed'] == host_module_mock.module.exit_json.call_args[1]['changed'] + + def test_add_network_address_to_host_negative(self, host_module_mock): + self.get_module_args.update({ + 'host_name': 'host_name_1', + 'network_address': 'net_ad$$$$$d_12', + 'network_address_state': 'present-in-host', + 'state': 'present' + }) + host_module_mock.module.params = self.get_module_args + host_details = MockHostApi.get_host_details_response('api') + host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]]) + host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list()) + host_module_mock.manage_network_address = MagicMock(return_value=(None, False)) + host_module_mock.get_host_details = MagicMock(return_value=MockSDKObject(host_details)) + host_module_mock.perform_module_operation() + assert MockHostApi.get_host_details_after_network_address_addition('invalid_address') == \ + host_module_mock.module.fail_json.call_args[1]['msg'] + assert host_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_remove_network_address_from_host(self, host_module_mock): + self.get_module_args.update({ + 'host_name': 'host_name_1', + 'network_address': 'host_name_1', + 'network_address_state': 'absent-in-host', + 'state': 'present' + }) + host_module_mock.module.params = self.get_module_args + host_details = MockHostApi.get_host_details_response('api') + host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]]) + host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list()) + host_module_mock.unity.get_host = MagicMock(return_value=MockSDKObject(MockHostApi.get_host_details_after_network_address_removal('api'))) + host_details = MockSDKObject(host_details) + host_details.delete_ip_port = MagicMock(return_value=None) + host_details.add_to_skip_list('delete_ip_port') + host_module_mock.get_host_details = MagicMock(return_value=host_details) + host_module_mock.perform_module_operation() + assert MockHostApi.get_host_details_after_network_address_removal('module')['host_details'] == \ + host_module_mock.module.exit_json.call_args[1]['host_details'] + assert MockHostApi.get_host_details_after_network_address_removal('module')['changed'] == host_module_mock.module.exit_json.call_args[1]['changed'] + + def test_remove_network_address_from_host_negative(self, host_module_mock): + self.get_module_args.update({ + 'host_name': 'host_name_1', + 'network_address': '1.1.1', + 'network_address_state': 'absent-in-host', + 'state': 'present' + }) + host_module_mock.module.params = self.get_module_args + host_details = MockHostApi.get_host_details_response('api') + host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]]) + host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list()) + host_module_mock.manage_network_address = MagicMock(return_value=(None, False)) + host_module_mock.get_host_details = MagicMock(return_value=MockSDKObject(host_details)) + host_module_mock.perform_module_operation() + assert MockHostApi.get_host_details_after_network_address_removal('invalid_IPV4') == \ + host_module_mock.module.fail_json.call_args[1]['msg'] + assert host_module_mock.module.exit_json.call_args[1]['changed'] is False diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py new file mode 100644 index 00000000..88151bcb --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py @@ -0,0 +1,350 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for interface module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_interface_api \ + import MockInterfaceApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() + +from ansible_collections.dellemc.unity.plugins.modules.interface import Interface + + +class TestInterface(): + + interface_module_args = MockInterfaceApi.INTERFACE_MODULE_ARGS + + @pytest.fixture + def interface_module_mock(self): + interface_module_mock = Interface() + interface_module_mock.module.check_mode = False + interface_module_mock.unity_conn = MagicMock() + return interface_module_mock + + def test_validate_param_ethernet_port_name_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'ethernet_port_name': " ", + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + mock_none_response = MagicMock(return_value=None) + interface_module_mock.get_nas_server_obj = mock_none_response + interface_module_mock.validate_create_params = mock_none_response + interface_module_mock.add_interface = mock_none_response + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_interface_error_response('invalid_ethernet_port_name') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_validate_param_vlan_id_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'vlan_id': 2, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + mock_none_response = MagicMock(return_value=None) + interface_module_mock.get_nas_server_obj = mock_none_response + interface_module_mock.validate_create_params = mock_none_response + interface_module_mock.add_interface = mock_none_response + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_interface_error_response('invalid_vlan_id') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_validate_param_interface_ip_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': "10.2.2", + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + mock_none_response = MagicMock(return_value=None) + interface_module_mock.get_nas_server_obj = mock_none_response + interface_module_mock.validate_create_params = mock_none_response + interface_module_mock.add_interface = mock_none_response + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_interface_error_response('invalid_interface_ip') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_validate_param_gateway_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'gateway': "10.2.1", + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + mock_none_response = MagicMock(return_value=None) + interface_module_mock.get_nas_server_obj = mock_none_response + interface_module_mock.validate_create_params = mock_none_response + interface_module_mock.add_interface = mock_none_response + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_interface_error_response('invalid_gateway') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_validate_param_netmask_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'netmask': "10.2.0/2", + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + mock_none_response = MagicMock(return_value=None) + interface_module_mock.get_nas_server_obj = mock_none_response + interface_module_mock.validate_create_params = mock_none_response + interface_module_mock.add_interface = mock_none_response + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_interface_error_response('invalid_netmask') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_nas_server_obj_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_id': "nas_id_00", + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + mock_none_response = MagicMock(return_value=None) + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=MockInterfaceApi.get_nas_server_obj_existed_false()) + interface_module_mock.validate_create_params = mock_none_response + interface_module_mock.add_interface = mock_none_response + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_nas_server_obj_errors('existed_false') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_nas_server_obj_exception(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_id': "nas_id_00", + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + mock_none_response = MagicMock(return_value=None) + interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=MockApiException) + interface_module_mock.validate_create_params = mock_none_response + interface_module_mock.add_interface = mock_none_response + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_nas_server_obj_errors('exception') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_operation_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'vlan_id': 4, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object) + interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT) + interface_module_mock.perform_module_operation() + assert MockInterfaceApi.get_interface_error_response('modify_failure') == \ + interface_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_interface_details(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object) + interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT) + interface_module_mock.perform_module_operation() + interface_details = MockInterfaceApi.INTERFACE_OBJECT._get_properties() + assert interface_module_mock.module.exit_json.call_args[1]['interface_details'] == interface_details + + def test_get_interface_details_exception(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object) + interface_module_mock.unity_conn.get_file_interface = MagicMock(side_effect=[MockApiException, MockInterfaceApi.INTERFACE_OBJECT]) + interface_module_mock.validate_create_params = MagicMock(return_value=None) + interface_module_mock.add_interface = MagicMock(return_value=None) + interface_module_mock.perform_module_operation() + assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockInterfaceApi.get_interface_exception_response('interface_exception') + + def test_add_interface_without_role_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME, + 'netmask': MockInterfaceApi.NETMASK_DUMMY, + 'gateway': MockInterfaceApi.GATEWAY_DUMMY, + 'vlan_id': 324, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_existing = MockInterfaceApi.get_nas_without_interface() + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_Value=nas_server_existing) + interface_module_mock.add_interface = MagicMock(return_value=None) + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockInterfaceApi.get_interface_error_response('no_role') + + def test_add_interface_without_ethernet_negative(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'role': "PRODUCTION", + 'netmask': MockInterfaceApi.NETMASK_DUMMY, + 'gateway': MockInterfaceApi.GATEWAY_DUMMY, + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'vlan_id': 324, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_existing = MockInterfaceApi.get_nas_without_interface() + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_Value=nas_server_existing) + interface_module_mock.add_interface = MagicMock(return_value=None) + interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})]) + interface_module_mock.perform_module_operation() + assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockInterfaceApi.get_interface_error_response('no_ethernet') + + def test_add_interface(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME, + 'role': "PRODUCTION", + 'netmask': MockInterfaceApi.NETMASK_DUMMY, + 'gateway': MockInterfaceApi.GATEWAY_DUMMY, + 'vlan_id': 324, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + nas_server_existing = MockInterfaceApi.get_nas_without_interface() + nas_server_existing.get_id = MagicMock(return_value='nas_id_00') + nas_server_existing.add_to_skip_list('get_id') + interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=[nas_server_existing, + nas_server_object]) + interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT) + utils.FileInterfaceRoleEnum = MockInterfaceApi.FILE_INTERFACE_ROLE_ENUM_DUMMY + ethernet_port_info = MagicMock() + ethernet_port_info.id = 'ethernet_port_id_0' + interface_module_mock.unity_conn.get_ethernet_port = MagicMock(return_value=ethernet_port_info) + utils.UnityFileInterface = MagicMock() + utils.UnityFileInterface.create = MagicMock(return_value=None) + interface_module_mock.perform_module_operation() + interface_details = MockInterfaceApi.INTERFACE_OBJECT._get_properties() + assert interface_module_mock.module.exit_json.call_args[1]['interface_details'] == interface_details + + def test_add_interface_no_change(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME, + 'role': "PRODUCTION", + 'netmask': MockInterfaceApi.NETMASK_DUMMY, + 'gateway': MockInterfaceApi.GATEWAY_DUMMY, + 'vlan_id': 324, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=[nas_server_object, + nas_server_object]) + interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT) + utils.FileInterfaceRoleEnum = MockInterfaceApi.FILE_INTERFACE_ROLE_ENUM_DUMMY + ethernet_port_info = MagicMock() + ethernet_port_info.id = 'ethernet_port_id_0' + interface_module_mock.unity_conn.get_ethernet_port = MagicMock(return_value=ethernet_port_info) + utils.UnityFileInterface = MagicMock() + utils.UnityFileInterface.create = MagicMock(return_value=None) + interface_module_mock.perform_module_operation() + assert interface_module_mock.module.exit_json.call_args[1]['changed'] is False + + def test_add_interface_exception(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME, + 'role': "PRODUCTION", + 'netmask': MockInterfaceApi.NETMASK_DUMMY, + 'gateway': MockInterfaceApi.GATEWAY_DUMMY, + 'vlan_id': 324, + 'state': "present" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + nas_server_existing = MockInterfaceApi.get_nas_without_interface() + nas_server_existing.get_id = MagicMock(return_value='nas_id_00') + nas_server_existing.add_to_skip_list('get_id') + interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=[nas_server_existing, + nas_server_object]) + interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT) + utils.FileInterfaceRoleEnum = MockInterfaceApi.FILE_INTERFACE_ROLE_ENUM_DUMMY + ethernet_port_info = MagicMock() + ethernet_port_info.id = 'ethernet_port_id_0' + interface_module_mock.unity_conn.get_ethernet_port = MagicMock(return_value=ethernet_port_info) + utils.UnityFileInterface = MagicMock() + utils.UnityFileInterface.create = MagicMock(side_effect=MockApiException) + interface_module_mock.perform_module_operation() + assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockInterfaceApi.get_interface_exception_response('add_interface_exception') + + def test_delete_interface(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'state': "absent" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object) + interface_object = MockInterfaceApi.INTERFACE_OBJECT + interface_object.delete = MagicMock(return_value=None) + interface_object.add_to_skip_list('delete') + interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=interface_object) + interface_module_mock.perform_module_operation() + assert interface_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_delete_interface_exception(self, interface_module_mock): + self.interface_module_args.update({ + 'nas_server_name': "dummy_nas", + 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY, + 'state': "absent" + }) + interface_module_mock.module.params = self.interface_module_args + nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT + interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object) + interface_object = MockInterfaceApi.INTERFACE_OBJECT + interface_object.delete = MagicMock(side_effect=MockApiException) + interface_object.add_to_skip_list('delete') + interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=interface_object) + interface_module_mock.perform_module_operation() + assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \ + MockInterfaceApi.get_interface_exception_response('delete_interface_exception') diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py new file mode 100644 index 00000000..a929ba49 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py @@ -0,0 +1,112 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for NAS Server module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_nasserver_api \ + import MockNASServerApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.nas_server = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock() + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() + +from ansible_collections.dellemc.unity.plugins.modules.nasserver import NASServer + + +class TestNASServer(): + + NAS_SERVER_MODULE_ARGS = {'nas_server_name': 'nas0', 'nas_server_id': None, 'nas_server_new_name': None, 'default_unix_user': None, + 'default_windows_user': None, 'is_replication_destination': None, 'is_multiprotocol_enabled': None, + 'allow_unmapped_user': None, 'enable_windows_to_unix_username_mapping': None, + 'is_backup_only': None, 'is_packet_reflect_enabled': None, 'current_unix_directory_service': None, + 'replication_reuse_resource': None, 'replication_params': {}, 'replication_state': None, 'state': None} + + @pytest.fixture + def nasserver_module_mock(self): + nasserver_module_mock = NASServer() + nasserver_module_mock.unity_conn = MagicMock() + return nasserver_module_mock + + def get_nas_response(self): + nasserver_response = MockNASServerApi.get_nas_server_response() + nasserver_response['replicate_with_dst_resource_provisioning'] = MagicMock(return_value=True) + return nasserver_response + + def test_enable_nas_replication(self, nasserver_module_mock): + self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params()) + nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS + nasserver_module_mock.to_update = MagicMock(return_value=False) + nasserver_module_mock.get_nas_server = \ + MagicMock(return_value=MockSDKObject(self.get_nas_response())) + nasserver_module_mock.perform_module_operation() + assert nasserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_enable_nas_replication_invalid_params(self, nasserver_module_mock): + self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params(False)) + nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS + nasserver_module_mock.get_nas_server = \ + MagicMock(return_value=MockSDKObject(self.get_nas_response())) + nasserver_module_mock.to_update = MagicMock(return_value=False) + nasserver_module_mock.perform_module_operation() + assert "rpo value should be in range of 5 to 1440" in \ + nasserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_enable_nas_replication_throws_ex(self, nasserver_module_mock): + self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params()) + nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS + nasserver_module_mock.to_update = MagicMock(return_value=False) + nasserver_module_mock.get_nas_server = \ + MagicMock(return_value=MockSDKObject(self.get_nas_response())) + nasserver_module_mock.get_remote_system = MagicMock(side_effect=Exception) + nasserver_module_mock.perform_module_operation() + assert "Enabling replication to the nas server %s failed with error" \ + % self.NAS_SERVER_MODULE_ARGS['nas_server_name'] in \ + nasserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_modify_nas_replication(self, nasserver_module_mock): + self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params()) + nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS + nasserver_module_mock.to_update = MagicMock(return_value=False) + nasserver_module_mock.get_nas_server = \ + MagicMock(return_value=MockSDKObject(self.get_nas_response())) + nasserver_module_mock.get_replication_session_on_filter = MagicMock() + nasserver_module_mock.perform_module_operation() + assert nasserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_disable_replication(self, nasserver_module_mock): + self.NAS_SERVER_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'}) + nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS + nasserver_module_mock.get_nas_server = \ + MagicMock(return_value=MockSDKObject(self.get_nas_response())) + nasserver_module_mock.to_update = MagicMock(return_value=False) + nasserver_module_mock.update_replication_params = MagicMock() + nasserver_module_mock.perform_module_operation() + assert nasserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_disable_replication_throws_ex(self, nasserver_module_mock): + self.NAS_SERVER_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'}) + nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS + nasserver_module_mock.get_nas_server = \ + MagicMock(return_value=MockSDKObject(self.get_nas_response())) + nasserver_module_mock.to_update = MagicMock(return_value=False) + nasserver_module_mock.get_replication_session = MagicMock(side_effect=Exception) + nasserver_module_mock.perform_module_operation() + assert "Disabling replication on the nas server %s failed with error" \ + % self.NAS_SERVER_MODULE_ARGS['nas_server_name'] in \ + nasserver_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py new file mode 100644 index 00000000..53e945ea --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py @@ -0,0 +1,180 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for nfs module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_nfs_api \ + import MockNfsApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() + +from ansible_collections.dellemc.unity.plugins.modules import nfs + + +class TestNfs(): + + get_module_args = MockNfsApi.NFS_MODULE_ARGS + + @pytest.fixture + def nfs_module_mock(self): + nfs_module_mock = nfs.NFS() + nfs_module_mock.unity = MagicMock() + return nfs_module_mock + + def test_add_host_in_nfs_share_on_advhostmgmt_true(self, nfs_module_mock): + self.get_module_args.update({ + 'nfs_export_name': "nfsshare_dummy_name", + 'filesystem_id': "fs_id_1", + 'adv_host_mgmt_enabled': True, + 'no_access_hosts': [{'host_name': "host1"}, {'ip_address': "**.***.2.2"}], + 'host_state': 'present-in-export', + 'state': 'present' + }) + nfs_module_mock.module.params = self.get_module_args + utils.UnityNfsShareList = MagicMock + nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('add', True) + nfs_object.modify = MagicMock(return_value=None) + nfs_object.add_to_skip_list('modify') + fs_object = MockNfsApi.FILESYSTEM_OBJECT + get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('add', True) + nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object) + nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object) + nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data) + nfs_module_mock.perform_module_operation() + assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_remove_host_in_nfs_share_on_advhostmgmt_true(self, nfs_module_mock): + self.get_module_args.update({ + 'nfs_export_name': "nfsshare_dummy_name", + 'filesystem_id': "fs_id_1", + 'adv_host_mgmt_enabled': True, + 'no_access_hosts': [{'host_name': "host1"}, {'ip_address': "**.***.2.2"}], + 'host_state': 'absent-in-export', + 'state': 'present' + }) + nfs_module_mock.module.params = self.get_module_args + utils.UnityNfsShareList = MagicMock + nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('remove', True) + nfs_object.modify = MagicMock(return_value=None) + nfs_object.add_to_skip_list('modify') + fs_object = MockNfsApi.FILESYSTEM_OBJECT + get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('remove', True) + nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object) + nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object) + nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data) + nfs_module_mock.perform_module_operation() + assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_add_host_in_nfs_share_on_advhostmgmt_false(self, nfs_module_mock): + self.get_module_args.update({ + 'nfs_export_name': "nfsshare_dummy_name", + 'filesystem_id': "fs_id_1", + 'adv_host_mgmt_enabled': False, + 'read_only_root_hosts': [{'domain': MockNfsApi.DUMMY_DOMAIN_VALUE}, {'subnet': MockNfsApi.DUMMY_SUBNET_VALUE}], + 'host_state': 'present-in-export', + 'state': 'present' + }) + nfs_module_mock.module.params = self.get_module_args + utils.UnityNfsShareList = MagicMock + nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('add', False) + nfs_object.modify = MagicMock(return_value=None) + nfs_object.add_to_skip_list('modify') + fs_object = MockNfsApi.FILESYSTEM_OBJECT + get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('add', False) + nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object) + nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object) + nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data) + nfs_module_mock.perform_module_operation() + assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_remove_host_in_nfs_share_on_advhostmgmt_false(self, nfs_module_mock): + self.get_module_args.update({ + 'nfs_export_name': "nfsshare_dummy_name", + 'filesystem_id': "fs_id_1", + 'adv_host_mgmt_enabled': True, + 'read_only_root_hosts': [{'domain': MockNfsApi.DUMMY_DOMAIN_VALUE}, {'subnet': MockNfsApi.DUMMY_SUBNET_VALUE}], + 'host_state': 'absent-in-export', + 'state': 'present' + }) + nfs_module_mock.module.params = self.get_module_args + utils.UnityNfsShareList = MagicMock + nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('remove', False) + nfs_object.modify = MagicMock(return_value=None) + nfs_object.add_to_skip_list('modify') + fs_object = MockNfsApi.FILESYSTEM_OBJECT + get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('remove', False) + nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object) + nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object) + nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data) + nfs_module_mock.perform_module_operation() + assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_host_access_nfs_share_subnet_negative(self, nfs_module_mock): + self.get_module_args.update({ + 'nfs_export_name': "nfsshare_dummy_name", + 'filesystem_id': "fs_id_1", + 'adv_host_mgmt_enabled': False, + 'read_only_root_hosts': [{'subnet': "1x.x.x.x"}], + 'host_state': 'present-in-export', + 'state': 'present' + }) + nfs_module_mock.module.params = self.get_module_args + nfs_module_mock.get_filesystem = MagicMock(return_value=None) + nfs_module_mock.get_nfs_share = MagicMock(return_value=None) + nfs_module_mock.create_nfs_share = MagicMock(return_value=None) + nfs.get_nfs_share_display_attrs = MagicMock(return_value=None) + nfs_module_mock.perform_module_operation() + assert nfs_module_mock.module.fail_json.call_args[1]['msg'] == MockNfsApi.host_access_negative_response('subnet_validation') + + def test_host_access_nfs_share_advhostmngmt_negative(self, nfs_module_mock): + self.get_module_args.update({ + 'nfs_export_name': "nfsshare_dummy_name", + 'filesystem_id': "fs_id_1", + 'read_only_root_hosts': [{'subnet': "1x.x.x.x/10"}], + 'host_state': 'present-in-export', + 'state': 'present' + }) + nfs_module_mock.module.params = self.get_module_args + nfs_module_mock.get_filesystem = MagicMock(return_value=None) + nfs_module_mock.get_nfs_share = MagicMock(return_value=None) + nfs_module_mock.create_nfs_share = MagicMock(return_value=None) + nfs.get_nfs_share_display_attrs = MagicMock(return_value=None) + nfs_module_mock.perform_module_operation() + assert nfs_module_mock.module.fail_json.call_args[1]['msg'] == MockNfsApi.host_access_negative_response('advhostmngmnt_field_validation') + + def test_host_access_nfs_share_exception_negative(self, nfs_module_mock): + self.get_module_args.update({ + 'nfs_export_name': "nfsshare_dummy_name", + 'filesystem_id': "fs_id_1", + 'adv_host_mgmt_enabled': True, + 'read_only_root_hosts': [{'domain': MockNfsApi.DUMMY_DOMAIN_VALUE}, {'subnet': MockNfsApi.DUMMY_SUBNET_VALUE}], + 'host_state': 'absent-in-export', + 'state': 'present' + }) + nfs_module_mock.module.params = self.get_module_args + utils.UnityNfsShareList = MagicMock + nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('remove', False) + nfs_object.modify = MagicMock(side_effect=MockApiException) + nfs_object.add_to_skip_list('modify') + fs_object = MockNfsApi.FILESYSTEM_OBJECT + nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object) + nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object) + nfs.get_nfs_share_display_attrs = MagicMock(return_value=None) + nfs_module_mock.perform_module_operation() + assert nfs_module_mock.module.fail_json.call_args[1]['msg'] == MockNfsApi.host_access_negative_response('modify_exception') diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py new file mode 100644 index 00000000..1bc938d4 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py @@ -0,0 +1,226 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Mock Api response for Unit tests of NFS server module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_nfsserver_api \ + import MockNFSServerApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \ + import HttpError as http_error, MockApiException +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils + +utils.get_logger = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock() +utils.UnityNfsServer = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.unity.plugins.modules.nfsserver import NFSServer + + +class TestNFSServer(): + + get_module_args = MockNFSServerApi.NFS_SERVER_MODULE_ARGS + + @pytest.fixture + def nfsserver_module_mock(self): + nfsserver_module_mock = NFSServer() + nfsserver_module_mock.unity_conn = MagicMock() + utils.nfsserver = MagicMock() + nfsserver_module_mock.module.check_mode = False + return nfsserver_module_mock + + def test_get_nfs_server_details(self, nfsserver_module_mock): + nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response() + self.get_module_args.update({ + 'nfs_server_id': 'nfs_95', + 'state': 'present' + }) + nfsserver_module_mock.module.params = self.get_module_args + host_details = MockNFSServerApi.get_nas_server_id() + host_details.get_id = MagicMock(return_value="nas_10") + host_details.add_to_skip_list('get_id') + nfsserver_module_mock.unity_conn.get_nas_server = MagicMock(return_value=host_details) + nfsserver_module_mock.unity_conn.get_nfs_server = MagicMock(return_value=MockNFSServerApi.get_nfs_server_details()[0]) + nfsserver_module_mock.perform_module_operation() + assert MockNFSServerApi.get_nfs_server_details_method_response() == \ + nfsserver_module_mock.module.exit_json.call_args[1]['nfs_server_details'] + + def test_get_nfs_server_details_with_exception(self, nfsserver_module_mock): + self.get_module_args.update({ + 'nas_server_name': 'test_nas_server', + 'state': 'present' + }) + nfsserver_module_mock.module.params = self.get_module_args + host_details = MockNFSServerApi.get_nas_server_id() + host_details.get_id = MagicMock(return_value="nas_10") + host_details.add_to_skip_list('get_id') + nfsserver_module_mock.unity_conn.get_nas_server = MagicMock(return_value=host_details) + utils.HttpError = http_error + nfsserver_module_mock.unity_conn.get_nfs_server = MagicMock(side_effect=http_error) + nfsserver_module_mock.perform_module_operation() + assert MockNFSServerApi.get_nfs_server_api_exception() == \ + nfsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_nfs_server(self, nfsserver_module_mock): + self.get_module_args.update({ + 'nas_server_name': 'dummy_name', + 'host_name': "dummy_nas23", + 'is_secure_enabled': True, + 'kerberos_domain_controller_type': "WINDOWS", + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'is_extended_credentials_enabled': False, + 'nfs_v4_enabled': True, + 'state': "present" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None) + utils.KdcTypeEnum = MagicMock(return_value={"KdcTypeEnum": {"description": "Windows", "name": "WINDOWS", "value": 2}}) + utils.UnityNfsServer = MagicMock() + utils.UnityNfsServer.create = MagicMock(return_value=True) + nfsserver_module_mock.perform_module_operation() + assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_create_nfs_server_with_unix(self, nfsserver_module_mock): + self.get_module_args.update({ + 'nas_server_name': 'dummy_name', + 'host_name': "dummy_nas23", + 'is_secure_enabled': True, + 'kerberos_domain_controller_type': "UNIX", + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'is_extended_credentials_enabled': False, + 'nfs_v4_enabled': True, + 'state': "present" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None) + utils.KdcTypeEnum = MagicMock(return_value={"KdcTypeEnum": {"description": "Windows", "name": "UNIX", "value": 1}}) + utils.UnityNfsServer = MagicMock() + utils.UnityNfsServer.create = MagicMock(return_value=True) + nfsserver_module_mock.perform_module_operation() + assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_create_nfs_server_throws_exception(self, nfsserver_module_mock): + self.get_module_args.update({ + 'nas_server_name': 'dummy_name', + 'host_name': "dummy_nas23", + 'is_secure_enabled': True, + 'kerberos_domain_controller_type': "WINDOWS", + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'is_extended_credentials_enabled': False, + 'nfs_v4_enabled': True, + 'state': "present" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None) + utils.UnityNfsServer = MagicMock() + utils.UnityNfsServer.create = MagicMock(side_effect=MockApiException) + nfsserver_module_mock.perform_module_operation() + assert MockNFSServerApi.create_nfs_server_with_api_exception() in nfsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_delete_nfs_server(self, nfsserver_module_mock): + nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response() + self.get_module_args.update({ + 'nas_server_name': 'test_nas_server', + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'remove_spn_from_kerberos': True, + 'state': "absent" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details) + nfsserver_module_mock.perform_module_operation() + assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_delete_nfs_server_with_spn_false(self, nfsserver_module_mock): + nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response() + self.get_module_args.update({ + 'nas_server_name': 'test_nas_server', + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'remove_spn_from_kerberos': False, + 'state': "absent" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details) + nfsserver_module_mock.perform_module_operation() + assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True + + def test_delete_nfs_server_with_exception(self, nfsserver_module_mock): + nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response() + self.get_module_args.update({ + 'nas_server_name': 'test_nas_server', + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'remove_spn_from_kerberos': False, + 'state': "absent" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details) + nfsserver_module_mock.unity_conn.get_nfs_server = MagicMock(side_effect=MockApiException) + nfsserver_module_mock.perform_module_operation() + assert MockNFSServerApi.delete_exception() in nfsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_is_modification_required(self, nfsserver_module_mock): + nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response() + self.get_module_args.update({ + 'nas_server_name': 'test_nas_server', + 'is_extended_credentials_enabled': True, + 'state': 'present' + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details) + nfsserver_module_mock.perform_module_operation() + assert MockNFSServerApi.modify_error_msg() == nfsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_nas_server_id_exception(self, nfsserver_module_mock): + nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response() + self.get_module_args.update({ + 'nas_server_name': 'dummy_name', + 'is_secure_enabled': True, + 'host_name': "dummy_nas23", + 'kerberos_domain_controller_type': "WINDOWS", + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'is_extended_credentials_enabled': False, + 'nfs_v4_enabled': True, + 'state': "present" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=MockApiException) + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details) + nfsserver_module_mock.perform_module_operation() + assert MockNFSServerApi.get_nas_server_id_api_exception() in \ + nfsserver_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_nas_server_without_nas_server_id(self, nfsserver_module_mock): + self.get_module_args.update({ + 'is_secure_enabled': True, + 'host_name': "dummy_nas23", + 'kerberos_domain_controller_type': "WINDOWS", + 'kerberos_domain_controller_username': "xxxxxxxx", + 'kerberos_domain_controller_password': "xxxxxxxx", + 'is_extended_credentials_enabled': False, + 'nfs_v4_enabled': True, + 'state': "present" + }) + nfsserver_module_mock.module.params = self.get_module_args + nfsserver_module_mock.get_nas_server_id = MagicMock(return_value=None) + nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None) + nfsserver_module_mock.create_nfs_server = MagicMock(return_value=None) + nfsserver_module_mock.perform_module_operation() + assert MockNFSServerApi.create_nfs_server_without_nas_server_id() in \ + nfsserver_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py new file mode 100644 index 00000000..94bf18c3 --- /dev/null +++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py @@ -0,0 +1,132 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for host module on Unity""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_storagepool_api \ + import MockStoragePoolApi +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKObject +from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_unity_management_host_parameters = MagicMock() +utils.ensure_required_libs = MagicMock() +utils.get_unity_unisphere_connection = MagicMock() +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() + +from ansible_collections.dellemc.unity.plugins.modules.storagepool import StoragePool + + +class TestUnityStoragePool(): + + get_module_args = MockStoragePoolApi.STORAGE_POOL_MODULE_ARGS + + @pytest.fixture + def storagepool_module_mock(self): + storagepool_module_mock = StoragePool() + storagepool_module_mock.conn = MagicMock() + return storagepool_module_mock + + def test_get_host_details(self, storagepool_module_mock): + self.get_module_args.update({ + 'pool_name': 'Ansible_Unity_TEST_1', + }) + storagepool_module_mock.module.params = self.get_module_args + get_pool = MockSDKObject(MockStoragePoolApi.get_pool_details_response('get_pool')) + get_pool._get_property_from_raw = MagicMock(return_value=MockSDKObject({'is_schedule_enabled': True})) + get_pool.add_to_skip_list('_get_property_from_raw') + storagepool_module_mock.conn.get_pool = MagicMock(return_value=get_pool) + pool_object = MockStoragePoolApi.get_pool_details_response('pool_object') + utils.UnityPool = MagicMock() + utils.UnityPool.get = MagicMock(return_value=MockSDKObject(pool_object)) + disk_list = MockStoragePoolApi.get_pool_details_response('disk_list') + utils.UnityDiskList = MagicMock() + utils.UnityDiskList.get = MagicMock(return_value=disk_list) + storagepool_module_mock.perform_module_operation() + assert MockStoragePoolApi.get_pool_details_response('module')['storage_pool_details'] == \ + storagepool_module_mock.module.exit_json.call_args[1]['storage_pool_details'] + + def test_get_host_details_throws_exception(self, storagepool_module_mock): + self.get_module_args.update({ + 'pool_name': 'Ansible_Unity_SP_3', + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_module_mock.conn.get_pool = MagicMock(side_effect=MockApiException) + storagepool_module_mock.result = MagicMock() + storagepool_module_mock.get_pool_drives = MagicMock() + storagepool_module_mock.perform_module_operation() + storagepool_module_mock.is_pool_modification_required = MagicMock(return_value=False) + assert MockStoragePoolApi.get_pool_details_response('error') == storagepool_module_mock.module.fail_json.call_args[1]['msg'] + + def test_create_pool(self, storagepool_module_mock): + self.get_module_args.update({ + 'pool_name': 'test_pool', + 'pool_description': 'Unity test pool.', + 'raid_groups': { + 'disk_group_id': "dg_16", + 'disk_num': 3, + 'raid_type': 'RAID10', + 'stripe_width': 'BEST_FIT', + }, + 'alert_threshold': 50, + 'is_harvest_enabled': True, + 'pool_harvest_high_threshold': 59, + 'pool_harvest_low_threshold': 40, + 'is_snap_harvest_enabled': True, + 'snap_harvest_high_threshold': 80, + 'snap_harvest_low_threshold': 60, + 'fast_vp': "enabled", + 'fast_cache': "disabled", + 'pool_type': 'TRADITIONAL', + 'state': 'present' + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_module_mock.get_raid_groups_response = MagicMock(return_value=None) + storagepool_module_mock.get_details = MagicMock(return_value=None) + pool_object = MockStoragePoolApi.create_pool_response('api') + utils.UnityPool = MagicMock() + utils.UnityPool.create = MagicMock(return_value=MockSDKObject(pool_object)) + storagepool_module_mock.perform_module_operation() + assert storagepool_module_mock.module.exit_json.call_args[1]['changed'] + + def test_create_pool_throws_exception(self, storagepool_module_mock): + self.get_module_args.update({ + 'pool_name': 'test_pool', + 'pool_description': 'Unity test pool.', + 'raid_groups': { + 'disk_group_id': "dg_16", + 'disk_num': 3, + 'raid_type': 'RAID10', + 'stripe_width': 'BEST_FIT', + }, + 'alert_threshold': 50, + 'is_harvest_enabled': True, + 'pool_harvest_high_threshold': 59, + 'pool_harvest_low_threshold': 40, + 'is_snap_harvest_enabled': True, + 'snap_harvest_high_threshold': 80, + 'snap_harvest_low_threshold': 60, + 'fast_vp': "enabled", + 'fast_cache': "disabled", + 'pool_type': 'TRADITIONAL', + 'state': 'present' + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_module_mock.get_details = MagicMock(return_value=None) + utils.UnityPool = MagicMock() + storagepool_module_mock.get_raid_groups_response = MagicMock(side_effect=MockApiException) + storagepool_module_mock.perform_module_operation() + assert MockStoragePoolApi.create_pool_response('error') in \ + storagepool_module_mock.module.fail_json.call_args[1]['msg'] -- cgit v1.2.3